pax_global_header00006660000000000000000000000064135051141650014513gustar00rootroot0000000000000052 comment=529903571037b5f72e619e0a1921207a1ae880b9 dwarves-dfsg-1.15/000077500000000000000000000000001350511416500140355ustar00rootroot00000000000000dwarves-dfsg-1.15/.gitignore000066400000000000000000000000211350511416500160160ustar00rootroot00000000000000/build /config.h dwarves-dfsg-1.15/.gitmodules000066400000000000000000000001161350511416500162100ustar00rootroot00000000000000[submodule "lib/bpf"] path = lib/bpf url = https://github.com/libbpf/libbpf dwarves-dfsg-1.15/CMakeLists.txt000066400000000000000000000136311350511416500166010ustar00rootroot00000000000000project(pahole C) cmake_minimum_required(VERSION 2.8.8) cmake_policy(SET CMP0005 NEW) INCLUDE_DIRECTORIES( ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR} ) # Try to parse this later, Helio just showed me a KDE4 example to support # x86-64 builds. # the following are directories where stuff will be installed to set(__LIB "" CACHE STRING "Define suffix of directory name (32/64)" ) macro(_set_fancy _var _value _comment) if (NOT DEFINED ${_var}) set(${_var} ${_value}) else (NOT DEFINED ${_var}) set(${_var} "${${_var}}" CACHE PATH "${_comment}") endif (NOT DEFINED ${_var}) endmacro(_set_fancy) # where to look first for cmake modules, # before ${CMAKE_ROOT}/Modules/ is checked set(CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/modules") if (NOT CMAKE_BUILD_TYPE) set (CMAKE_BUILD_TYPE Debug CACHE STRING "Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel." FORCE) endif (NOT CMAKE_BUILD_TYPE) add_definitions(-D_GNU_SOURCE -DDWARVES_VERSION="v1.15") find_package(DWARF REQUIRED) find_package(ZLIB REQUIRED) # make sure git submodule(s) are checked out find_package(Git QUIET) if(GIT_FOUND AND EXISTS "${PROJECT_SOURCE_DIR}/.git") # Update submodules as needed option(GIT_SUBMODULE "Check submodules during build" ON) if(GIT_SUBMODULE) message(STATUS "Submodule update") execute_process(COMMAND ${GIT_EXECUTABLE} submodule update --init --recursive WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} RESULT_VARIABLE GIT_SUBMOD_RESULT) if(NOT GIT_SUBMOD_RESULT EQUAL "0") message(FATAL_ERROR "git submodule update --init failed with ${GIT_SUBMOD_RESULT}, please checkout submodules") else() message(STATUS "Submodule update - done") endif() endif() endif() if(NOT EXISTS "${PROJECT_SOURCE_DIR}/lib/bpf/src/btf.h") message(FATAL_ERROR "The submodules were not downloaded! GIT_SUBMODULE was turned off or failed. Please update submodules and try again.") endif() _set_fancy(LIB_INSTALL_DIR "${EXEC_INSTALL_PREFIX}${CMAKE_INSTALL_PREFIX}/${__LIB}" "libdir") # libbpf uses reallocarray, which is not available in all versions of glibc # libbpf's include/tools/libc_compat.h provides implementation, but needs # COMPACT_NEED_REALLOCARRAY to be set INCLUDE(CheckCSourceCompiles) CHECK_C_SOURCE_COMPILES( " #define _GNU_SOURCE #include int main(void) { return !!reallocarray(NULL, 1, 1); } " HAVE_REALLOCARRAY_SUPPORT) if (NOT HAVE_REALLOCARRAY_SUPPORT) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DCOMPAT_NEED_REALLOCARRAY") endif() file(GLOB libbpf_sources "lib/bpf/src/*.c") add_library(bpf OBJECT ${libbpf_sources}) set_property(TARGET bpf PROPERTY POSITION_INDEPENDENT_CODE 1) target_include_directories(bpf PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/lib/bpf/include ${CMAKE_CURRENT_SOURCE_DIR}/lib/bpf/include/uapi) set(dwarves_LIB_SRCS dwarves.c dwarves_fprintf.c gobuffer strings ctf_encoder.c ctf_loader.c libctf.c btf_encoder.c btf_loader.c libbtf.c dwarf_loader.c dutil.c elf_symtab.c rbtree.c) add_library(dwarves SHARED ${dwarves_LIB_SRCS} $) set_target_properties(dwarves PROPERTIES VERSION 1.0.0 SOVERSION 1) set_target_properties(dwarves PROPERTIES INTERFACE_LINK_LIBRARIES "") target_link_libraries(dwarves ${DWARF_LIBRARIES} ${ZLIB_LIBRARIES}) set(dwarves_emit_LIB_SRCS dwarves_emit.c) add_library(dwarves_emit SHARED ${dwarves_emit_LIB_SRCS}) set_target_properties(dwarves_emit PROPERTIES VERSION 1.0.0 SOVERSION 1) target_link_libraries(dwarves_emit dwarves) set(dwarves_reorganize_LIB_SRCS dwarves_reorganize.c) add_library(dwarves_reorganize SHARED ${dwarves_reorganize_LIB_SRCS}) set_target_properties(dwarves_reorganize PROPERTIES VERSION 1.0.0 SOVERSION 1) target_link_libraries(dwarves_reorganize dwarves) set(codiff_SRCS codiff.c) add_executable(codiff ${codiff_SRCS}) target_link_libraries(codiff dwarves) set(ctracer_SRCS ctracer.c) add_executable(ctracer ${ctracer_SRCS}) target_link_libraries(ctracer dwarves dwarves_emit dwarves_reorganize ${ELF_LIBRARY}) set(dtagnames_SRCS dtagnames.c) add_executable(dtagnames ${dtagnames_SRCS}) target_link_libraries(dtagnames dwarves) set(pahole_SRCS pahole.c) add_executable(pahole ${pahole_SRCS}) target_link_libraries(pahole dwarves dwarves_reorganize) set(pdwtags_SRCS pdwtags.c) add_executable(pdwtags ${pdwtags_SRCS}) target_link_libraries(pdwtags dwarves) set(pglobal_SRCS pglobal.c) add_executable(pglobal ${pglobal_SRCS}) target_link_libraries(pglobal dwarves) set(pfunct_SRCS pfunct.c) add_executable(pfunct ${pfunct_SRCS}) target_link_libraries(pfunct dwarves dwarves_emit ${ELF_LIBRARY}) set(prefcnt_SRCS prefcnt.c) add_executable(prefcnt ${prefcnt_SRCS}) target_link_libraries(prefcnt dwarves) set(scncopy_SRCS scncopy.c elfcreator.c) add_executable(scncopy ${scncopy_SRCS}) target_link_libraries(scncopy dwarves ${ELF_LIBRARY}) set(syscse_SRCS syscse.c) add_executable(syscse ${syscse_SRCS}) target_link_libraries(syscse dwarves) install(TARGETS codiff ctracer dtagnames pahole pdwtags pfunct pglobal prefcnt scncopy syscse RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/bin) install(TARGETS dwarves LIBRARY DESTINATION ${LIB_INSTALL_DIR}) install(TARGETS dwarves dwarves_emit dwarves_reorganize LIBRARY DESTINATION ${LIB_INSTALL_DIR}) install(FILES dwarves.h dwarves_emit.h dwarves_reorganize.h dutil.h gobuffer.h list.h rbtree.h strings.h btf_encoder.h config.h ctf_encoder.h ctf.h elfcreator.h elf_symtab.h hash.h libbtf.h libctf.h DESTINATION ${CMAKE_INSTALL_PREFIX}/include/dwarves/) install(FILES man-pages/pahole.1 DESTINATION ${CMAKE_INSTALL_PREFIX}/share/man/man1/) install(PROGRAMS ostra/ostra-cg DESTINATION ${CMAKE_INSTALL_PREFIX}/bin) install(PROGRAMS btfdiff fullcircle DESTINATION ${CMAKE_INSTALL_PREFIX}/bin) install(FILES ostra/python/ostra.py DESTINATION ${CMAKE_INSTALL_PREFIX}/share/dwarves/runtime/python) install(FILES lib/Makefile lib/ctracer_relay.c lib/ctracer_relay.h lib/linux.blacklist.cu DESTINATION ${CMAKE_INSTALL_PREFIX}/share/dwarves/runtime) dwarves-dfsg-1.15/COPYING000066400000000000000000000431031350511416500150710ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. dwarves-dfsg-1.15/MANIFEST000066400000000000000000000014331350511416500151670ustar00rootroot00000000000000config.h.cmake btfdiff btf_encoder.c btf_encoder.h btf_loader.c ctf_encoder.c ctf_encoder.h ctf_loader.c dwarf_loader.c dwarves.c dwarves.h dwarves_emit.c dwarves_emit.h dwarves_fprintf.c dwarves_reorganize.c dwarves_reorganize.h cmake/modules/FindDWARF.cmake CMakeLists.txt codiff.c ctracer.c dtagnames.c elfcreator.c elfcreator.h elf_symtab.c elf_symtab.h fullcircle gobuffer.c gobuffer.h hash.h libbtf.c libbtf.h list.h MANIFEST man-pages/pahole.1 pahole.c pdwtags.c pfunct.c pglobal.c prefcnt.c rbtree.c rbtree.h scncopy.c syscse.c strings.c strings.h dutil.c dutil.h COPYING NEWS README README.btf README.ctracer rpm/SPECS/dwarves.spec lib/Makefile lib/ctracer_relay.c lib/ctracer_relay.h lib/linux.blacklist.cu ostra/ostra-cg ostra/python/ostra.py ctf.h libctf.c libctf.h regtest lib/bpf/ dwarves-dfsg-1.15/NEWS000066400000000000000000000512621350511416500145420ustar00rootroot00000000000000v1.15 Thu Jun 27 2019 3ed9a67967cf fprintf: Avoid null dereference with NULL configs 568dae4bd498 printf: Fixup printing "const" early with "const void" 68f261d8dfff fprintf: Fix recursively printing named structs in --expand_types 139a3b337381 ostra: Initial python3 conversion 01276a7e8966 spec: Sync spec with fedora's 9f1f0628b9ad rpm: Add missing devel headers 989dc3f1ba0d cmake: Install missing devel headers v1.13 Tue Apr 16 2019 See changes-v1.13 for a more verbose description of the changes. 0fb727166a0e pfunct: Strip inlines in the code generated for --compile 7b967744db7b emit: Emit the types for inline structs defined in pointer members 30526e2848db fprintf: Print relative offsets for inner pointer structs cfa377c238f8 emit: Do not emit a forward declararion to a nameless struct cf459ca16fb2 fprintf: Pretty print struct members that are pointers to nameless structs 09ed2e78befe pfunct: Emit definitions for pointers inside pointer to function args e9fc2f647026 fullcircle: Check that we found the CFLAGS 05921c47f557 emit: Handle typedefs that are a pointer to typedefs 87af9839bf63 fullcircle: Try building from pfunct --compile and check types c7fd9cc1fe97 pfunct: Fixup more return types 56c50b8dbe9b emit: Make find_fwd_decl a bit more robust 2bcb01fc2f9d fprintf: Handle single zero sized array member structs 0987266cd9e2 fprintf: Deal with zero sized arrays in the midle of a union 1101337a7450 fprintf: Deal with zero sized arrays in the middle of a struct 13aa13eb0ca4 fprintf: Don't reuse 'type' in multiple scopes in the same function c8fc6f5a7a46 core: Use unnatural alignment of struct embedded in another to infer __packed__ 6f0f9a881589 fprintf: Fixup multi-dimensional zero sized arrays const handling 9a4d71930467 fprintf: Allow suppressing the inferred __attribute__((__packed__)) ec935ee422b0 fprintf: Allow suppressing the output of force paddings at the end of structs 8471736f3c6c core: Cope with zero sized types when looking for natural alignment 986a3b58a869 fprintf: Only add bitfield forced paddings when alignment info available 49c27bdd6663 core: Allow the loaders to advertise features they have dc6b9437a3a0 emit: Handle structs with DW_AT_alignment=1 meaning __packed__ f78633cfb949 core: Infer __packed__ for union struct members 75c52de9c6df core: Move packed_attribute_inferred from 'class' to 'type' class 1bb4527220f4 fprintf: Fixup const pointers dc3d44196103 core: Improve the natural alignment calculation ac32e5e908ba codiff: Fix comparision of multi-cu against single-cu files f2641ce169d6 core: Take arrays into account when inferring if a struct is packed 85c99369631a fprintf: Do not add explicit padding when struct has __aligned__ attr b5e8fab596d3 emit: Cover void ** as a function parameter 28a3bc7addad fprintf: Support packed enums f77a442f09e3 fprintf: Do not print the __aligned__ attribute if asked ea583dac52f0 fprintf: Print zero sized flat arrays as [], not [0] f909f13dd724 fprintf: Fixup handling of unnamed bitfields 3247a777dcfc core: Infer if a struct is packed by the offsets/natural alignments 13e5b9fc00ee fprintf: Add unnamed bitfield padding at the end to rebuild original type ccd67bdb205b fprintf: Print "const" for class members more early, in type__fprintf() b42d77b0bbda fprintf: Print __attribute__((__aligned__(N))) for structs/classes 1c9c1d6bbd45 dwarf_loader: Store DW_AT_alignment if available in DW_TAG_{structure,union,class}_type 41c55858daf4 codiff: Add --quiet option a104eb1ea11d fprintf: Notice explicit bitfield alignment modifications 75f32a24c7c7 codiff: Improve the comparision of anonymous struct members 6b1e43f2c1ac codiff: When comparing against a file with just one CU don't bother finding by name 15a754f224f7 core: Add nr_entries member to 'struct cus' 99750f244cb8 pfunct: Generate a valid return type for the --compile bodies 881aabd6fc22 reorganize: Introduce class__for_each_member_from_safe() 1b2e3389f304 reorganize: Introduce class__for_each_member_reverse() 10fef2916dce reorganize: Introduce class__for_each_member_continue() e7a56ee8cc69 reorganize: Introduce class__for_each_member_from() 9a79bb6ced23 tag: Introduce tag__is_pointer_to() 45ad54594442 tag: Introduce tag__is_pointer() 89ce57a02e3a pdwtags: Find holes in structs ce6f393bc9ea fprintf: Fixup the printing of const parameters 7aec7dd6c29c pfunct: Do not reconstruct external functions 163b873f81c8 pfunct: Do not reconstruct inline expansions of functions ea83b780eca0 pfunct: Handle unnamed struct typedefs e7ebc05d12e1 emit: Unwind the definitions for typedefs in type__emit_definitions() 093135b0bfd5 pfunct: Do not emit a type multiple times 3ce2c5216612 pfunct: Ask for generating compilable output that generates DWARF for types e7a786540d83 pfunct: Make --expand_types/-b without -f expand types for all functions 9b2eadf97b44 pfunct: Follow const, restrict, volatile in --expand_types f3f86f2f89b0 pfunct: Reconstruct function return types for --expand_types a7d9c58cb81a fprintf: Add missing closing parens to the align attribute d83d9f578fa0 dwarf_loader: Handle DW_TAG_label in inline expansions 73e545b144b4 dwarf_loader: Handle unsupported_tag in die__process_inline_expansion fe590758cb3f class__find_holes: Zero out bit_hole/hole on member 863c2af6e9d7 reorganize: Disable the bitfield coalescing/moving steps b95961db697a fprintf: Show statistics about holes due to forced alignments ec772f21f681 fprintf: Show the number of forced alignments in a class 52d1c75ea437 btfdiff: Use --suppress_aligned_attribute with -F dwarf 6cd6a6bd8787 dwarves_fprintf: Allow suppressing the __attribute__((__aligned__(N)) f31ea292e3cb dwarf_loader: Store the DW_AT_alignment if available c002873c4479 dwarves_fprintf: Move invariant printing of ; to outside if block 8ce85a1ad7f0 reorganize: Use class__find_holes() to recalculate holes 5d1c4029bd45 dwarves: Fix classification of byte/bit hole for aligned bitfield 78c110a7ea24 dwarves: Revert semantics of member bit/byte hole b56fed297e5f dwarves_fprintf: Count bitfield member sizes separately c0fdc5e685e9 dwarf_loader: Use DWARF recommended uniform bit offset scheme 5104d1bef384 loaders: Record CU's endianness in dwarf/btf/ctf loaders 975757bc8867 dwarves: Use bit sizes and bit/byte hole info in __class__fprintf 1838d3d7623e dwarves: Revamp bit/byte holes detection logic 03d9b6ebcac7 dwarf_loader: Fix bitfield fixup logic for DWARF 4abc59553918 btf_loader: Adjust negative bitfield offsets early on 41cf0e3cba0c dwarf_loader: Don't recode enums and use real enum size in calculations 55c96aaed8ce loaders: Strip away volatile/const/restrict when fixing bitfields 7005757fd573 libbpf: Sync in latest libbpf sources 69970fc77ec5 pahole: Filter out unions when looking for packable structs fa963e1a8698 dwarves_fprintf: Print the bit_offset for inline enum bitfield class members bb8350acf52f dwarves: Switch type_id_t from uint16_t to uint32_t 5375d06faf26 dwarves: Introduce type_id_t for use with the type IDs f601f6725890 libctf: The type_ids returned are uint32_t fixup where it was uint16_t c9b2ef034f89 dwarf: Add cu__add_tag_with_id() to stop using id == -1 to allocate id 762e7b58f447 dwarves: Change ptr_table__add() signature to allow for uint32_t returns 079e6890b788 dwarf_loader: Mark tag__recode_dwarf_bitfield() static 3526ebebd3ab pahole: Use 32-bit integers for type ID iterations within CU 3bd8da5202e4 libbpf: update reference to bring in btf_dedup fixes a9afcc65fc8f btf_encoder: Don't special case packed enums 8f4f280163b7 btf_loader: Simplify fixup code by relying on BTF data more be5173b4df0f dwarves: Fixup sizeof(long double) in bits in base_type_name_to_size table 5081ed507095 dwarves: Add _Float128 base_type d52c9f9b9455 dwarf_loader: Fixup bitfield entry with same number of bits as its base_type 6586e423d4fa btf_loader: Fix bitfield fixup code 7daa4300d230 pahole: Complete list of base type names 6bcf0bd70305 btfdiff: Support specifying custom pahole location 88028b5d0c32 btfdiff: Use --show_private_classes with DWARF e6c59bd11d3d libbpf: Build as PIC and statically link into libdwarves cf4f3e282d64 cmake: Bump miminum required version to use OBJECT feature 5148be53dc65 btfdiff: Rename tmp files to contain the format used dd3a7d3ab3e8 btf_encoder: run BTF deduplication before writing out to ELF 54106025cd14 libbtf: Fixup temp filename to .btf, not .btfe e6dfd10bcbf3 libbpf: Build as shared lib c234b6ca6e55 libbpf: Pull latest libbpf fe4e1f799c55 btf_elf: Rename btf_elf__free() to btf_elf__delete() 6780c4334d55 btf: Rename 'struct btf' to 'struct btf_elf' ca86e9416b8b pahole: use btf.h directly from libbpf 21507cd3e97b pahole: add libbpf as submodule under lib/bpf c25ada500ddc pahole: Add build dir, config.h to .gitignore a58c746c4c7e Fixup copyright notices for BTF files authored by Facebook engineers e714d2eaa150 Adopt SPDX-License-Identifier c86960dce55d btf_loader: We can set class_member->type_offset earlier 278b64c3eee0 btfdiff: Use diff's -p option to show the struct/union 1182664d6aa6 dwarves_fprintf: Handle negative bit_offsets in packed structs with bitfields b0cf845e02c6 dwarves: Change type of bitfield_offset from uint8_t to int8_t 06e364bc62e7 btfdiff: Add utility to compare pahole output produced from DWARF and BTF b79db4cab41c dwarves: add __int128 types in base_type_name_to_size de3459cc0ebe btf_loader: BTF encodes the size of enums as bytes not bits 693347f8def7 btf_encoder: Fix void handling in FUNC_PROTO. 2d0b70664f3e dwarves_fprintf: Separate basic type stats into separate type__fprintf() method 18f5910f96e0 dwarves: Add type to tag helper f2092f56586a btf: recognize BTF_KIND_FUNC in btf_loader 11766614096c btf: Fix kind_flag usage in btf_loader 68b93e6858ae dutil: Add missing string.h header include 851ef335e328 dutil: Drop 'noreturn' attribute for ____ilog2_NaN() ab0cb33e54e8 btf_loader: Fixup class_member->bit_offset for !big_endian files b24718fe27d3 dwarves: Fix documentation for class_memer->bitfield_size 3ffe5ba93b63 pahole: Do not apply 'struct class' filters to 'struct type' da18bb340bee dwarves: Check if the tag is a 'struct class' in class__find_holes() 2a82d593be81 btf: Add kind_flag support for btf_loader 472256d3c57b btf_loader: Introduce a loader for the BTF format 93d6d0016523 dwarves: No need to print the "signed ", the name has it already 0a9bac9a3e8e dwarves: Relookup when searching for signed base types a2cdc6c2a0a3 dutil: Adopt strstart() from the linux perf tools sources 3aa3fd506e6c btf: add func_proto support 8630ce404287 btf: fix struct/union/fwd types with kind_flag 65bd17abc72c btf: Allow multiple cu's in dwarf->btf conversion d843945ba514 pahole: Search for unions as well with '-C' da632a36862c dwarves: Introduce {cu,cus}__find_struct_or_union_by_name() methods 31664d60ad41 pahole: Show tagged enums as well when no class is specified b18354f64cc2 btf: Generate correct struct bitfield member types 70ef8c7f07ff dwarves_fprintf: Set conf.cachelinep in union__fprintf() too bfdea37668c6 dwarves_fprintf: Print the scope of variables 465110ec99d3 dwarves: Add the DWARF location to struct variable c65f2cf4361e dwarves: Rename variable->location to ->scope 0d2511fd1d8e btf: Fix bitfield encoding 92417082aad3 MANIFEST: Add missing COPYING file eb6bd05766f5 dwarf_loader: Process DW_AT_count in DW_TAG_subrange_type v1.12 Thu Aug 16 2018 1ca2e351dfa1 README.btf: Add section on validating the .BTF section via the kernel 9eda5e8163ce README.btf: No need to use 'llvm.opts = -mattr=dwarfris' with elfutils >= 0.173 7818af53f64a dwarves: Add a README.btf file with steps to test the BTF encoder f727c22191d0 dwarf_loader: Initial support for DW_TAG_partial_unit e975ff247aa8 dwarves_fprintf: Print cacheline boundaries in multiple union members 68645f7facc2 btf: Add BTF support 81466af0d4f8 pahole: Show the file where a struct was used 2dd87be78bb2 dwarves_fprintf: Show offsets at union members 66cf3983e1ac README.DEBUG: Add an extra step to make the instructions cut'n'exec 2a092d61453c dwarves: Fix cus__load_files() success return value 02a456f5f54c pahole: Search and use running kernel vmlinux when no file is passed 5f057919a0c0 man-pages: Add entry for --hex v1.11 Wed Jun 2017 5a57eb074170 man-pages: Update URL to the dwarves paper b52386d041fa dwarves_fprintf: Find holes when expanding types 103e89bb257d dwarves_fprintf: Find holes on structs embedded in other structs ab97c07a7ebe dwarves_fprintf: Fixup cacheline boundary printing on expanded structs 046ad67af383 dwarves_fprintf: Shorten class__fprintf() sig 44130bf70e1c dwarves: Update e-mail address 327757975b94 dwarf_loader: Add URL for template tags description f4d5e21fd1b2 dwarf_loader: Tidy up template tags usage e12bf9999944 dwarf_loader: Do not hash unsupported tags 3afcfbec9e08 dwarf_loader: Add DW_TAG_GNU_formal_parameter_pack stub in process_function 55d9b20dbaf6 dwarf_loader: Ignore DW_TAG_dwarf_procedure when processing functions 45618c7ec122 dwarf_loader: Initial support for DW_TAG_unspecified_type 658a238b9890 dwarf_loader: Stop emitting warnings about DW_TAG_call_site 0fbb39291d59 dwarf_loader: Add support for DW_TAG_restrict_type 9df42c68265d dwarves: Initial support for rvalue_reference_type 8af5ccd86d21 dwarves: Use cus__fprintf_load_files_err() in the remaining tools 10515a7c4db7 dwarves: Introduce cus__fprintf_load_files_err() 0e6463635082 pahole: Show more informative message when errno is properly set on error 2566cc2c8715 pdwtags: Show proper error messages for files it can't handle 9f3f67e78679 dwarves: Fix cus__load_files() error return ae3a2720c3d3 dutil: Add ____ilog2_NaN declaration to silence compiler warning 0b81b5ad4743 Update version in CMakeLists.txt 79536f4f9587 cmake: Use INTERFACE_LINK_LIBRARIES 1decb1bc4a41 dwarf_loader: Check cu__find_type_by_ref result 956343d05a41 Add instructions on how to build with debug info e71353c3fa0a dwarf_loader: Ignore DW_TAG_dwarf_procedure 189695907242 dwarves_fprintf: Add the missing GNU_ suffix to DWARF_TAG_ created by the GNU project d973b1d5daf0 dwarf_fprintf: Handle DW_TAG_GNU_call_site{_parameter} c23eab4b1253 dwarf_loader: Print unknown tags as an hex number 943a0de0679a dwarves_fprintf: DW_TAG_mutable_type doesn't exist. a8e562a15767 dwarf_loader: Use obstack_zalloc when allocating tag fd3838ae9aa3 dwarves: Stop using 'self' 5ecf1aab9e10 dwarf_loader: Support DW_FORM_data{4,8} for reading class member offsets c4ccdd5ae63b dwarves_reorganize: Fix member type fixup e31fda3063e3 dwarves_reorganize: Fixup calculation of bytes needed for bitfield 1e461ec7e0e8 dwarves_fprintf: Fix printf types on 64bit linux 222f0067a9c3 dwarves_fprintf: Don't ignore virtual data members e512e3f9b36b dwarves: Update git url 8c6378fd8834 dwarves: Support static class data members a54515fa6ee4 dwarves: Stop using 'self' 6035b0d91f19 rpm: Add missing BuildRequires: zlib-devel be7b691756ff dwarf_loader: Don't stop processing after finding unsupported tag v1.10 Wed May 30 2012 . Initial DWARF4 support, by Tom Tromey . Add stubs for some new GNU Tags . Fix build on older systems . Fix a crash when pahole is called with -R -S, from Tal Kelrich v1.9: Ignore DW_TAG_template_{type,value}_parameter, fixing a bug reported at: https://bugzilla.redhat.com/show_bug.cgi?id=654471 More work is needed to properly support these tags. ----------------------------------------- After a long time without a new release because I was trying to get the CTF support completed, and due to the very strong gravity force in the Linux kernel perf tools, here it is 1.8, with lots of performance improvements, bug fixes and changes to better use these tools in scripts. For full details please take a look at the git changesets, repo available at: http://git.kernel.org/cgit/devel/pahole/pahole.git - Arnaldo pahole: . Allow list of structs to be passed to pahole. E.g.: 'pahole -C str_node,strings' Suggested by Zack Weinberg , for scripting. . Introduce --hex to print offsets and sizes in hexadecimal codiff: . Improve detection of removal and addition of members in structs . Detect changes in padding and the number of holes/bit_holes pfunct: . --no_parm_names Because CTF doesn't encodes the names of the parameters and I want to test the upcoming CTF function section code in ctftwdiff. . pfunct --addr Using an rbtree to find in which function the given addr is. libdwarves: . Greatly reduce the data structures footprint and lookup by recoding the IDs as short integers, that was done to facilitate support for CTF but benefited the core libraries greatly. . Handle GCC support for vector instructions So now it recognizes, as printed by pdwtags: 908 typedef int __m64 __attribute__ ((__vector_size__ (8))); size: 8 909 int array __attribute__ ((__vector_size__ (8))); size: 8 910 int array __attribute__ ((__vector_size__ (4))); size: 4 911 short int array __attribute__ ((__vector_size__ (2))); size: 2 912 char array __attribute__ ((__vector_size__ (1))); size: 1 . Destructors were added so that no leaks are left if this library is to be used in other tools that don't end the program when done using this lib. . Allow the tools to pass a callback that is used after loading each object file (CU/Compile Unit), so that we can more quickly find tags and stop the processing sooner, or at least delete the CU if it doesn't have anything needed by the tool. This _greatly_ speeded up most of the tools. . Tools now can pass a debug format "path", specifying the order it wants to try, so that if a file have both DWARF and CTF, specifying 'ctf,dwarf' will use the CTF info. . Now the formatting routines are in a separate file, dwarves_fprintf.c. This was done for organizational purposes but also to pave the way for multiple formatting backends, so that we can print, for instance, in CSV the structs, for easier scripting like done by several folks out there. . Handle volatile typedef bitfields, like: struct _GClosure { volatile guint ref_count:15; /* 0:17 4 */ volatile guint meta_marshal:1; /* 0:16 4 */ volatile guint n_guards:1; /* 0:15 4 */ . Load java 'interfaces' as a struct/class. . Fix buffer expansion bug, detected thanks to boost that provided things like: virtual int undefine(class grammar_helper > *); /* linkage=_ZN5boost6spirit4impl14grammar_helperINS0_7grammarINS_6detail5graph11dot_skipperENS0_14parser_contextINS0_5nil_tEEEEES6_NS0_7scannerINS0_10multi_passISt16istream_i */ . Allow optional addr information loading, speeding up some apps that don't use such addresses (or in modes where addrs aren't used) such as pahole. . Use a obstacks, speeding up apps as measured with the perf tools. . Support zero sized arrays in the middle of a struct. . Fix padding calculation in the reorganize routines. . Fix bitfield demotion in the reorganize routines. . Support "using" pointing to data members (C++). . Properly support pointers to const, reported by Jan Engelhardt : . Support more compact DW_AT_data_member_location form, pointed out by Mark Wielaard and reported by Mike Snitzer Experimental CTF support: libdwarves was reorganized so that it can support multiple debugging formats, with the first one being supported being the Compact C Type Format that comes from the OpenSolaris world. David S. Miller contributed an initial CTF decoder and from there I wrote an encoder. To test this a regression testing harness (regtest in the sources) that will take files with DWARF info and from there encode its contents in CTF in another ELF section in the same file (.SUN_ctf). Then it will decode both the DWARF and CTF sections and compare the results for pahole running with some new flags that cope with some subtleties in the way CTF encodes things. --flat_arrays We have just one dimension in CTF, with the total number of entries, in DWARF we can express this, but not in CTF: __u8 addr[0][6]; /* 4 0 */ So --flat_arrays will show it as: __u8 addr[0]; /* 4 0 */ --show_private_classes --fixup_silly_bitfields To cope with things like 'char foo:8' that since CTF has only the number of bits, can't be expressed as we don't know if it is a bitfield or just a char without the ':8' suffix. --first_obj_only Look only at the first object file in a file with multiple object files, like vmlinux. This is because the CTF support is not complete yet, needing the merging of types in multiple object files to be done. --classes_as_structs CTF has only a type for structs, not for classes like DWARF (DW_TAG_class_type is not present in CTF), so print them all as 'struct'. Running with the above limitations produce just a few mismatches, related to packed structs and enumerations and bitfields. dwarves-dfsg-1.15/README000066400000000000000000000006371350511416500147230ustar00rootroot00000000000000Build instructions: 1. install cmake 2. mkdir build 3. cd build 4. cmake -D__LIB=lib .. 5. make install Default is to be installed on /usr/local, see rpm spec file for installing on other places. Known to work scenarios: Mandriva Cooker: cmake 2.4.5-1mdv2007.1 libelfutils1-devel 0.123-1mdv2007.1 Debian Unstable: cmake 2.4.5-1 libdw-dev 0.123-2 Fedora Core 6: cmake 2.4.5-2.fc6 elfutils-devel 0.126-1.fc6 dwarves-dfsg-1.15/README.DEBUG000066400000000000000000000001301350511416500155340ustar00rootroot00000000000000rm -rf build mkdir build cd build cmake -DCMAKE_BUILD_TYPE=Debug .. cd .. make -C build dwarves-dfsg-1.15/README.btf000066400000000000000000000747211350511416500155020ustar00rootroot00000000000000We'll test the BTF encoder using perf's eBPF integration, but really we can plain use clang directly, setting up all its options. Using perf's integration will save some time here, to see all it does, use 'perf trace -vv' plus the options used below, then all the steps will be shown. Build perf from the latest kernel sources, use it with clang/llvm like: [root@seventh ~]# clang --version clang version 8.0.0 (http://llvm.org/git/clang.git 8587270a739ee30c926a76d5657e65e85b560f6e) (http://llvm.org/git/llvm.git 0566eefef9c3777bd780ec4cbb9efa764633b76c) Target: x86_64-unknown-linux-gnu Thread model: posix InstalledDir: /usr/local/bin [root@seventh ~]# llc --version | head -17 LLVM (http://llvm.org/): LLVM version 8.0.0svn DEBUG build with assertions. Default target: x86_64-unknown-linux-gnu Host CPU: skylake Registered Targets: aarch64 - AArch64 (little endian) aarch64_be - AArch64 (big endian) amdgcn - AMD GCN GPUs arm - ARM arm64 - ARM64 (little endian) armeb - ARM (big endian) bpf - BPF (host endian) bpfeb - BPF (big endian) bpfel - BPF (little endian) hexagon - Hexagon [root@seventh ~]# Then enable saving the object file build as part of perf's handling of foo.c type events, i.e. eBPF programs that will be compiled with clang and then loaded with sys_bpf() to possibly insert events in perf's ring buffer via bpf_perf_event_output(), or interact with the system via bpf_trace_printk() or just work as filters, etc: # cat ~/.perfconfig [llvm] dump-obj = true Then run a simple example, found in the kernel sources: # perf trace -e tools/perf/examples/bpf/hello.c cat /etc/passwd > /dev/null LLVM: dumping tools/perf/examples/bpf/hello.o 0.000 __bpf_stdout__:Hello, world 0.028 __bpf_stdout__:Hello, world 0.291 __bpf_stdout__:Hello, world # Notice that "LLVM: dumping..." line, look at the ELF sections in that file: [root@seventh perf]# readelf -SW tools/perf/examples/bpf/hello.o There are 11 section headers, starting at offset 0x220: Section Headers: [Nr] Name Type Address Off Size ES Flg Lk Inf Al [ 0] NULL 0000000000000000 000000 000000 00 0 0 0 [ 1] .strtab STRTAB 0000000000000000 00018c 00008d 00 0 0 1 [ 2] .text PROGBITS 0000000000000000 000040 000000 00 AX 0 0 4 [ 3] syscalls:sys_enter_openat PROGBITS 0000000000000000 000040 000088 00 AX 0 0 8 [ 4] .relsyscalls:sys_enter_openat REL 0000000000000000 000178 000010 10 10 3 8 [ 5] maps PROGBITS 0000000000000000 0000c8 00001c 00 WA 0 0 4 [ 6] .rodata.str1.1 PROGBITS 0000000000000000 0000e4 00000e 01 AMS 0 0 1 [ 7] license PROGBITS 0000000000000000 0000f2 000004 00 WA 0 0 1 [ 8] version PROGBITS 0000000000000000 0000f8 000004 00 WA 0 0 4 [ 9] .llvm_addrsig LOOS+0xfff4c03 0000000000000000 000188 000004 00 E 10 0 1 [10] .symtab SYMTAB 0000000000000000 000100 000078 18 1 1 8 Key to Flags: W (write), A (alloc), X (execute), M (merge), S (strings), I (info), L (link order), O (extra OS processing required), G (group), T (TLS), C (compressed), x (unknown), o (OS specific), E (exclude), p (processor specific) [root@seventh perf]# No DWARF debugging info, so we need to further customize ~/.perfconfig LLVM section: [root@seventh perf]# cat ~/.perfconfig [llvm] dump-obj = true clang-opt = -g [root@seventh perf]# perf trace -e tools/perf/examples/bpf/hello.c cat /etc/passwd > /dev/null LLVM: dumping tools/perf/examples/bpf/hello.o 0.000 __bpf_stdout__:Hello, world 0.015 __bpf_stdout__:Hello, world 0.184 __bpf_stdout__:Hello, world [root@seventh perf]# [root@seventh perf]# readelf -SW tools/perf/examples/bpf/hello.o There are 26 section headers, starting at offset 0xe20: Section Headers: [Nr] Name Type Address Off Size ES Flg Lk Inf Al [ 0] NULL 0000000000000000 000000 000000 00 0 0 0 [ 1] .strtab STRTAB 0000000000000000 000cf4 000127 00 0 0 1 [ 2] .text PROGBITS 0000000000000000 000040 000000 00 AX 0 0 4 [ 3] syscalls:sys_enter_openat PROGBITS 0000000000000000 000040 000088 00 AX 0 0 8 [ 4] .relsyscalls:sys_enter_openat REL 0000000000000000 000a80 000010 10 25 3 8 [ 5] maps PROGBITS 0000000000000000 0000c8 00001c 00 WA 0 0 4 [ 6] .rodata.str1.1 PROGBITS 0000000000000000 0000e4 00000e 01 AMS 0 0 1 [ 7] license PROGBITS 0000000000000000 0000f2 000004 00 WA 0 0 1 [ 8] version PROGBITS 0000000000000000 0000f8 000004 00 WA 0 0 4 [ 9] .debug_str PROGBITS 0000000000000000 0000fc 0001d2 01 MS 0 0 1 [10] .debug_loc PROGBITS 0000000000000000 0002ce 000023 00 0 0 1 [11] .debug_abbrev PROGBITS 0000000000000000 0002f1 0000e3 00 0 0 1 [12] .debug_info PROGBITS 0000000000000000 0003d4 000182 00 0 0 1 [13] .rel.debug_info REL 0000000000000000 000a90 000210 10 25 12 8 [14] .debug_ranges PROGBITS 0000000000000000 000556 000030 00 0 0 1 [15] .debug_macinfo PROGBITS 0000000000000000 000586 000001 00 0 0 1 [16] .debug_pubnames PROGBITS 0000000000000000 000587 00006e 00 0 0 1 [17] .rel.debug_pubnames REL 0000000000000000 000ca0 000010 10 25 16 8 [18] .debug_pubtypes PROGBITS 0000000000000000 0005f5 000056 00 0 0 1 [19] .rel.debug_pubtypes REL 0000000000000000 000cb0 000010 10 25 18 8 [20] .debug_frame PROGBITS 0000000000000000 000650 000028 00 0 0 8 [21] .rel.debug_frame REL 0000000000000000 000cc0 000020 10 25 20 8 [22] .debug_line PROGBITS 0000000000000000 000678 0000a7 00 0 0 1 [23] .rel.debug_line REL 0000000000000000 000ce0 000010 10 25 22 8 [24] .llvm_addrsig LOOS+0xfff4c03 0000000000000000 000cf0 000004 00 E 25 0 1 [25] .symtab SYMTAB 0000000000000000 000720 000360 18 1 32 8 Key to Flags: W (write), A (alloc), X (execute), M (merge), S (strings), I (info), L (link order), O (extra OS processing required), G (group), T (TLS), C (compressed), x (unknown), o (OS specific), E (exclude), p (processor specific) [root@seventh perf]# Now lets use 'pahole --btf_encode' (or 'pahole -J') to add an ELF section to that object file with the conversion from the DWARF sections to a new one, for BTF: [root@seventh perf]# pahole --btf_encode tools/perf/examples/bpf/hello.o [root@seventh perf]# readelf -SW tools/perf/examples/bpf/hello.o There are 27 section headers, starting at offset 0x1080: Section Headers: [Nr] Name Type Address Off Size ES Flg Lk Inf Al [ 0] NULL 0000000000000000 000000 000000 00 0 0 0 [ 1] .text PROGBITS 0000000000000000 000040 000000 00 AX 0 0 4 [ 2] syscalls:sys_enter_openat PROGBITS 0000000000000000 000040 000088 00 AX 0 0 8 [ 3] maps PROGBITS 0000000000000000 0000c8 00001c 00 WA 0 0 4 [ 4] .rodata.str1.1 PROGBITS 0000000000000000 0000e4 00000e 01 AMS 0 0 1 [ 5] license PROGBITS 0000000000000000 0000f2 000004 00 WA 0 0 1 [ 6] version PROGBITS 0000000000000000 0000f8 000004 00 WA 0 0 4 [ 7] .debug_str PROGBITS 0000000000000000 0000fc 0001d2 01 MS 0 0 1 [ 8] .debug_loc PROGBITS 0000000000000000 0002ce 000023 00 0 0 1 [ 9] .debug_abbrev PROGBITS 0000000000000000 0002f1 0000e3 00 0 0 1 [10] .debug_info PROGBITS 0000000000000000 0003d4 000182 00 0 0 1 [11] .debug_ranges PROGBITS 0000000000000000 000556 000030 00 0 0 1 [12] .debug_macinfo PROGBITS 0000000000000000 000586 000001 00 0 0 1 [13] .debug_pubnames PROGBITS 0000000000000000 000587 00006e 00 0 0 1 [14] .debug_pubtypes PROGBITS 0000000000000000 0005f5 000056 00 0 0 1 [15] .debug_frame PROGBITS 0000000000000000 000650 000028 00 0 0 8 [16] .debug_line PROGBITS 0000000000000000 000678 0000a7 00 0 0 1 [17] .symtab SYMTAB 0000000000000000 000720 000360 18 25 32 8 [18] .relsyscalls:sys_enter_openat REL 0000000000000000 000a80 000010 10 17 2 8 [19] .rel.debug_info REL 0000000000000000 000a90 000210 10 17 10 8 [20] .rel.debug_pubnames REL 0000000000000000 000ca0 000010 10 17 13 8 [21] .rel.debug_pubtypes REL 0000000000000000 000cb0 000010 10 17 14 8 [22] .rel.debug_frame REL 0000000000000000 000cc0 000020 10 17 15 8 [23] .rel.debug_line REL 0000000000000000 000ce0 000010 10 17 16 8 [24] .llvm_addrsig LOOS+0xfff4c03 0000000000000000 000cf0 000004 00 E 0 0 1 [25] .strtab STRTAB 0000000000000000 000cf4 00019c 00 0 0 1 [26] .BTF PROGBITS 0000000000000000 000e90 0001ea 00 0 0 1 Key to Flags: W (write), A (alloc), X (execute), M (merge), S (strings), I (info), L (link order), O (extra OS processing required), G (group), T (TLS), C (compressed), x (unknown), o (OS specific), E (exclude), p (processor specific) readelf: tools/perf/examples/bpf/hello.o: Warning: possibly corrupt ELF header - it has a non-zero program header offset, but no program headers [root@seventh perf]# That new ".BTF" section should then be parseable by the kernel, that has a BTF decoder, something not available for pahole at this time, but that will come in a later version. When pahole tries to read the DWARF info in that BPF ELF file, hello.o, we can se a problem that will require us to add another option to the .perfconfig llvm section: # pahole tools/perf/examples/bpf/hello.o struct clang version 8.0.0 (http://llvm.org/git/clang.git 8587270a739ee30c926a76d5657e65e85b560f6e) (http://llvm.org/git/llvm.git 0566eefef9c3777bd780ec4cbb9efa764633b76c) { clang version 8.0.0 (http://llvm.org/git/clang.git 8587270a739ee30c926a76d5657e65e85b560f6e) (http://llvm.org/git/llvm.git 0566ec377 clang version 8.0.0 (http://llvm.org/git/clang.git 8587270a739ee30c926a76d5657e65e85b560f6e) (http://llvm.org/git/llvm.git 0566eefef9c3777bd780ec4cbb9efa764633b76c); /* 0 4 */ clang version 8.0.0 (http://llvm.org/git/clang.git 8587270a739ee30c926a76d5657e65e85b560f6e) (http://llvm.org/git/llvm.git 0566ec377 clang version 8.0.0 (http://llvm.org/git/clang.git 8587270a739ee30c926a76d5657e65e85b560f6e) (http://llvm.org/git/llvm.git 0566eefef9c3777bd780ec4cbb9efa764633b76c); /* 4 4 */ clang version 8.0.0 (http://llvm.org/git/clang.git 8587270a739ee30c926a76d5657e65e85b560f6e) (http://llvm.org/git/llvm.git 0566ec377 clang version 8.0.0 (http://llvm.org/git/clang.git 8587270a739ee30c926a76d5657e65e85b560f6e) (http://llvm.org/git/llvm.git 0566eefef9c3777bd780ec4cbb9efa764633b76c); /* 8 4 */ clang version 8.0.0 (http://llvm.org/git/clang.git 8587270a739ee30c926a76d5657e65e85b560f6e) (http://llvm.org/git/llvm.git 0566ec377 clang version 8.0.0 (http://llvm.org/git/clang.git 8587270a739ee30c926a76d5657e65e85b560f6e) (http://llvm.org/git/llvm.git 0566eefef9c3777bd780ec4cbb9efa764633b76c); /* 12 4 */ clang version 8.0.0 (http://llvm.org/git/clang.git 8587270a739ee30c926a76d5657e65e85b560f6e) (http://llvm.org/git/llvm.git 0566ec377 clang version 8.0.0 (http://llvm.org/git/clang.git 8587270a739ee30c926a76d5657e65e85b560f6e) (http://llvm.org/git/llvm.git 0566eefef9c3777bd780ec4cbb9efa764633b76c); /* 16 4 */ clang version 8.0.0 (http://llvm.org/git/clang.git 8587270a739ee30c926a76d5657e65e85b560f6e) (http://llvm.org/git/llvm.git 0566ec377 clang version 8.0.0 (http://llvm.org/git/clang.git 8587270a739ee30c926a76d5657e65e85b560f6e) (http://llvm.org/git/llvm.git 0566eefef9c3777bd780ec4cbb9efa764633b76c); /* 20 4 */ clang version 8.0.0 (http://llvm.org/git/clang.git 8587270a739ee30c926a76d5657e65e85b560f6e) (http://llvm.org/git/llvm.git 0566ec377 clang version 8.0.0 (http://llvm.org/git/clang.git 8587270a739ee30c926a76d5657e65e85b560f6e) (http://llvm.org/git/llvm.git 0566eefef9c3777bd780ec4cbb9efa764633b76c); /* 24 4 */ /* size: 28, cachelines: 1, members: 7 */ /* last cacheline: 28 bytes */ }; # We need to pass some options to llvm, via the llvm.opts variable in ~/.perfconfig: [root@seventh perf]# cat ~/.perfconfig [llvm] dump-obj = true clang-opt = -g opts = -mattr=dwarfris [root@seventh perf]# perf trace -e tools/perf/examples/bpf/hello.c cat /etc/passwd > /dev/null LLVM: dumping tools/perf/examples/bpf/hello.o 0.000 __bpf_stdout__:Hello, world 0.018 __bpf_stdout__:Hello, world 0.209 __bpf_stdout__:Hello, world [root@seventh perf]# pahole tools/perf/examples/bpf/hello.o struct bpf_map { unsigned int type; /* 0 4 */ unsigned int key_size; /* 4 4 */ unsigned int value_size; /* 8 4 */ unsigned int max_entries; /* 12 4 */ unsigned int map_flags; /* 16 4 */ unsigned int inner_map_idx; /* 20 4 */ unsigned int numa_node; /* 24 4 */ /* size: 28, cachelines: 1, members: 7 */ /* last cacheline: 28 bytes */ }; [root@seventh perf]# This is not needed when using elfutils >= 0.173, pahole will just work as above. Now we need to go test the kernel, and to load that file with a BTF section we can also use perf, passing the .o file instead of the .c one, skipping the compilation phase and using the modified .o file, we will also run in system wide mode, so taht we can keep that BPF object loaded and attached to the tracepoint, so that we can use the kernel facilities to inspect the BTF file as read and processed by the kernel: # perf trace -e tools/perf/examples/bpf/hello.c 2> /dev/null Now to look if the kernel has the bpf filesystem: [acme@jouet perf]$ grep bpf /proc/filesystems nodev bpf [acme@jouet perf]$ [root@jouet ~]# mount -t bpf nodev /sys/fs/bpf [root@jouet ~]# mount | grep bpf nodev on /sys/fs/bpf type bpf (rw,relatime) [root@jouet ~]# cd /sys/fs/bpf [root@jouet bpf]# ls -la total 0 drwxrwxrwt. 2 root root 0 Aug 15 17:42 . drwxr-xr-x. 10 root root 0 Aug 13 15:04 .. [root@jouet bpf]# Work is planned to allow using BTF info to pretty print from the bpf fs, see: https://www.spinics.net/lists/netdev/msg518606.html Date: Sat, 11 Aug 2018 For bpftool, BTF pretty print support is missing for per-cpu maps. bpffs print for per-cpu hash/array maps need to be added as well. Will add them later. Acked-by: Yonghong Song To see what libbpf and its users, like perf, does when a ".BTF" ELF section is found in a BPF object being loaded via sys_bpf(), we can use 'perf ftrace' to show the sequence of events inside the kernel to load, validade and initialize data structures related to the request: # perf ftrace -G *btf* perf trace -e tools/perf/examples/bpf/hello.o cat /etc/passwd 3) | bpf_btf_load() { 3) | capable() { 3) | ns_capable_common() { 3) | security_capable() { 3) 0.048 us | cap_capable(); 3) | selinux_capable() { 3) 0.092 us | cred_has_capability(); 3) 0.444 us | } 3) 1.387 us | } 3) 1.764 us | } 3) 2.168 us | } 3) | btf_new_fd() { 3) | kmem_cache_alloc_trace() { 3) | _cond_resched() { 3) 0.041 us | rcu_all_qs(); 3) 0.407 us | } 3) 0.040 us | should_failslab(); 3) 0.161 us | prefetch_freepointer(); 3) 0.097 us | memcg_kmem_put_cache(); 3) 2.719 us | } 3) | kmem_cache_alloc_trace() { 3) | _cond_resched() { 3) 0.040 us | rcu_all_qs(); 3) 0.409 us | } 3) 0.040 us | should_failslab(); 3) 0.110 us | prefetch_freepointer(); 3) 0.099 us | memcg_kmem_put_cache(); 3) 2.296 us | } 3) 0.054 us | bpf_check_uarg_tail_zero(); 3) | __check_object_size() { 3) 0.152 us | __virt_addr_valid(); 3) 0.047 us | __check_heap_object(); 3) 0.040 us | check_stack_object(); 3) 1.465 us | } 3) 0.041 us | btf_sec_info_cmp(); 3) | kvmalloc_node() { 3) | __kmalloc_node() { 3) 0.051 us | kmalloc_slab(); 3) | _cond_resched() { 3) 0.042 us | rcu_all_qs(); 3) 0.401 us | } 3) 0.038 us | should_failslab(); 3) 0.040 us | memcg_kmem_put_cache(); 3) 2.168 us | } 3) 2.591 us | } 3) | __check_object_size() { 3) 0.108 us | __virt_addr_valid(); 3) 0.050 us | __check_heap_object(); 3) 0.039 us | check_stack_object(); 3) 1.469 us | } 3) | btf_struct_check_meta() { 3) 0.057 us | __btf_verifier_log_type(); 3) 0.057 us | btf_verifier_log_member(); 3) 0.043 us | btf_verifier_log_member(); 3) 0.042 us | btf_verifier_log_member(); 3) 0.043 us | btf_verifier_log_member(); 3) 0.043 us | btf_verifier_log_member(); 3) | btf_verifier_log_member() { 3) ==========> | 3) | smp_irq_work_interrupt() { 3) | irq_enter() { 3) | rcu_irq_enter() { 3) 0.038 us | rcu_nmi_enter(); 3) 0.412 us | } 3) 0.054 us | irqtime_account_irq(); 3) 1.409 us | } 3) | __wake_up() { 3) | __wake_up_common_lock() { 3) 0.040 us | _raw_spin_lock_irqsave(); 3) 0.051 us | __wake_up_common(); 3) 0.044 us | _raw_spin_unlock_irqrestore(); 3) 1.155 us | } 3) 1.508 us | } 3) | irq_exit() { 3) 0.062 us | irqtime_account_irq(); 3) 0.038 us | idle_cpu(); 3) | rcu_irq_exit() { 3) 0.038 us | rcu_nmi_exit(); 3) 0.419 us | } 3) 1.601 us | } 3) 6.230 us | } 3) <========== | 3) 0.088 us | } /* btf_verifier_log_member */ 3) 0.041 us | btf_verifier_log_member(); 3) + 10.759 us | } 3) | kvmalloc_node() { 3) | __kmalloc_node() { 3) 0.043 us | kmalloc_slab(); 3) | _cond_resched() { 3) 0.037 us | rcu_all_qs(); 3) 0.455 us | } 3) 0.040 us | should_failslab(); 3) 0.037 us | memcg_kmem_put_cache(); 3) 2.227 us | } 3) 2.624 us | } /* kvmalloc_node */ 3) | kvfree() { 3) 0.048 us | kfree(); 3) 0.662 us | } 3) | btf_int_check_meta() { 3) 0.043 us | __btf_verifier_log_type(); 3) 0.457 us | } 3) | btf_array_check_meta() { 3) 0.041 us | __btf_verifier_log_type(); 3) 0.393 us | } 3) | btf_int_check_meta() { 3) 0.094 us | __btf_verifier_log_type(); 3) 0.447 us | } 3) | btf_int_check_meta() { 3) 0.043 us | __btf_verifier_log_type(); 3) 0.573 us | } 3) | btf_int_check_meta() { 3) 0.085 us | __btf_verifier_log_type(); 3) 0.446 us | } 3) | btf_ref_type_check_meta() { 3) 0.042 us | __btf_verifier_log_type(); 3) 0.451 us | } 3) | btf_ref_type_check_meta() { 3) 0.042 us | __btf_verifier_log_type(); 3) 0.427 us | } 3) | btf_ref_type_check_meta() { 3) 0.042 us | __btf_verifier_log_type(); 3) 0.397 us | } 3) | btf_ref_type_check_meta() { 3) 0.041 us | __btf_verifier_log_type(); 3) 0.399 us | } 3) | btf_int_check_meta() { 3) 0.043 us | __btf_verifier_log_type(); 3) 0.602 us | } 3) | btf_ref_type_check_meta() { 3) 0.040 us | __btf_verifier_log_type(); 3) 0.733 us | } 3) | btf_array_check_meta() { 3) 0.094 us | __btf_verifier_log_type(); 3) 0.452 us | } 3) | kvmalloc_node() { 3) | __kmalloc_node() { 3) 0.039 us | kmalloc_slab(); 3) | _cond_resched() { 3) 0.041 us | rcu_all_qs(); 3) 0.579 us | } 3) 0.039 us | should_failslab(); 3) 0.042 us | memcg_kmem_put_cache(); 3) 2.538 us | } 3) 2.886 us | } 3) | kvmalloc_node() { 3) | __kmalloc_node() { 3) 0.041 us | kmalloc_slab(); 3) | _cond_resched() { 3) 0.038 us | rcu_all_qs(); 3) 0.708 us | } 3) 0.038 us | should_failslab(); 3) 0.040 us | memcg_kmem_put_cache(); 3) 2.483 us | } 3) 2.829 us | } 3) | kvmalloc_node() { 3) | __kmalloc_node() { 3) 0.057 us | kmalloc_slab(); 3) | _cond_resched() { 3) 0.040 us | rcu_all_qs(); 3) 0.533 us | } 3) 0.039 us | should_failslab(); 3) 0.038 us | memcg_kmem_put_cache(); 3) 2.680 us | } 3) 3.171 us | } 3) 0.054 us | env_stack_push(); 3) | btf_struct_resolve() { 3) 0.051 us | env_type_is_resolve_sink.isra.19(); 3) 0.039 us | btf_int_check_member(); 3) 0.039 us | env_type_is_resolve_sink.isra.19(); 3) 0.039 us | btf_int_check_member(); 3) 0.040 us | env_type_is_resolve_sink.isra.19(); 3) 0.040 us | btf_int_check_member(); 3) 0.039 us | env_type_is_resolve_sink.isra.19(); 3) 0.099 us | btf_int_check_member(); 3) 0.040 us | env_type_is_resolve_sink.isra.19(); 3) 0.042 us | btf_int_check_member(); 3) 0.040 us | env_type_is_resolve_sink.isra.19(); 3) 0.038 us | btf_int_check_member(); 3) 0.038 us | env_type_is_resolve_sink.isra.19(); 3) 0.039 us | btf_int_check_member(); 3) 6.545 us | } 3) 0.053 us | env_stack_push(); 3) | btf_array_resolve() { 3) 0.039 us | env_type_is_resolve_sink.isra.19(); 3) 0.090 us | btf_type_id_size(); 3) 0.060 us | btf_type_int_is_regular(); 3) 0.058 us | env_type_is_resolve_sink.isra.19(); 3) 0.051 us | btf_type_id_size(); 3) 0.055 us | btf_type_int_is_regular(); 3) 3.414 us | } 3) 0.041 us | btf_type_id_size(); 3) 0.057 us | env_stack_push(); 3) | btf_ptr_resolve() { 3) 0.056 us | env_type_is_resolve_sink.isra.19(); 3) 0.054 us | env_stack_push(); 3) 1.056 us | } 3) 0.063 us | btf_ptr_resolve(); 3) | btf_ptr_resolve() { 3) 0.049 us | env_type_is_resolve_sink.isra.19(); 3) 0.086 us | btf_type_id_size(); 3) 1.052 us | } 3) 0.045 us | env_stack_push(); 3) 0.060 us | btf_ptr_resolve(); 3) 0.045 us | env_stack_push(); 3) | btf_ptr_resolve() { 3) 0.039 us | env_type_is_resolve_sink.isra.19(); 3) 0.062 us | btf_type_id_size(); 3) 1.325 us | } 3) 0.054 us | env_stack_push(); 3) | btf_modifier_resolve() { 3) 0.061 us | env_type_is_resolve_sink.isra.19(); 3) 0.043 us | btf_type_id_size(); 3) 0.877 us | } 3) 0.052 us | env_stack_push(); 3) | btf_array_resolve() { 3) 0.060 us | env_type_is_resolve_sink.isra.19(); 3) 0.051 us | btf_type_id_size(); 3) 0.042 us | btf_type_int_is_regular(); 3) 0.040 us | env_type_is_resolve_sink.isra.19(); 3) 0.042 us | btf_type_id_size(); 3) 0.041 us | btf_type_int_is_regular(); 3) 2.822 us | } 3) 0.048 us | btf_type_id_size(); 3) | kvfree() { 3) 0.148 us | kfree(); 3) 0.685 us | } 3) 0.287 us | kfree(); 3) 0.042 us | _raw_spin_lock_bh(); 3) | kmem_cache_alloc() { 3) 0.040 us | should_failslab(); 3) 0.111 us | prefetch_freepointer(); 3) 0.094 us | memcg_kmem_put_cache(); 3) 2.139 us | } 3) | _raw_spin_unlock_bh() { 3) 0.079 us | __local_bh_enable_ip(); 3) 0.460 us | } 3) | anon_inode_getfd() { 3) | get_unused_fd_flags() { 3) | __alloc_fd() { 3) 0.040 us | _raw_spin_lock(); 3) 0.041 us | expand_files(); 3) 1.374 us | } 3) 1.759 us | } 3) | anon_inode_getfile() { 3) | d_alloc_pseudo() { 3) | __d_alloc() { 3) | kmem_cache_alloc() { 3) | _cond_resched() { 3) 0.035 us | rcu_all_qs(); 3) 0.507 us | } 3) 0.040 us | should_failslab(); 3) | memcg_kmem_get_cache() { 3) 0.091 us | get_mem_cgroup_from_mm(); 3) 0.633 us | } 3) 0.111 us | prefetch_freepointer(); 3) 0.082 us | memcg_kmem_put_cache(); 3) 4.178 us | } 3) 0.162 us | d_set_d_op(); 3) 5.545 us | } 3) 6.270 us | } 3) 0.112 us | mntget(); 3) 0.125 us | ihold(); 3) | d_instantiate() { 3) 0.120 us | security_d_instantiate(); 3) 0.106 us | _raw_spin_lock(); 3) | __d_instantiate() { 3) 0.069 us | d_flags_for_inode(); 3) 0.090 us | _raw_spin_lock(); 3) 1.483 us | } 3) 2.767 us | } 3) | alloc_file() { 3) | get_empty_filp() { 3) | kmem_cache_alloc() { 3) | _cond_resched() { 3) 0.039 us | rcu_all_qs(); 3)root:x:0:0:root:/root:/bin/bash bin:x:1:1:bin:/bin:/sbin/nologin daemon:x:2:2:daemon:/sbin:/sbin/nologin adm:x:3:4:adm:/var/adm:/sbin/nologin 0.382 us | } 3) 0.040 us | should_failslab(); 3) | memcg_kmem_get_cache() { 3) 0.039 us | get_mem_cgroup_from_mm(); 3) 0.626 us | } 3) 0.050 us | prefetch_freepointer(); 3) 0.059 us | memcg_kmem_put_cache(); 3) 3.280 us | } 3) | security_file_alloc() { 3) | selinux_file_alloc_security() { 3) | kmem_cache_alloc() { 3) | _cond_resched() { 3) 0.038 us | rcu_all_qs(); 3) 0.422 us | } 3) 0.040 us | should_failslab(); 3) 0.051 us | prefetch_freepointer(); 3) 0.054 us | memcg_kmem_put_cache(); 3) 2.660 us | } 3) 3.062 us | } 3) 3.548 us | } 3) 0.039 us | __mutex_init(); 3) 8.091 us | } 3) 8.617 us | } 3) + 20.810 us | } 3) | fd_install() { 3) 0.054 us | __fd_install(); 3) 0.723 us | } 3) + 24.438 us | } 3) ! 109.639 us | } 3) ! 112.925 us | } 3) | btf_release() { 3) | btf_put() { 3) 0.145 us | _raw_spin_lock_irqsave(); 3) | call_rcu_sched() { 3) | __call_rcu() { 3) 0.082 us | rcu_segcblist_enqueue(); 3) 1.323 us | } 3) 1.782 us | } 3) 0.069 us | _raw_spin_unlock_irqrestore(); 3) | call_rcu_sched() { 3) | __call_rcu() { 3) 0.069 us | rcu_segcblist_enqueue(); 3) 0.541 us | } 3) 0.984 us | } 3) 5.210 us | } 3) 5.954 us | } This should be enough for us to validate pahole's BTF encoder, and now one can use 'pahole -F btf' to obtain mostly the same results as with the default use of '-F dwarf', modulo things like explicit alignments that are not present in BTF and need some work to be inferred from existing non-natural alignment holes. - Arnaldo dwarves-dfsg-1.15/README.ctracer000066400000000000000000000031341350511416500163400ustar00rootroot00000000000000Basic instructions to use ctracer: 1. Install dwarves, if you are not that excited about building it I'm keeping rpms for Fedora Core 6 here: http://oops.ghostprotocols.net:81/acme/dwarves/rpm/ The .src.rpm is there in case you want to rebuild it for another rpm based distro. Since fedora 9 you just have to run: yum install dwarves 2. build the kernel with CONFIG_DEBUG_INFO=y, i.e. gcc -g, that will insert the DWARF info needed by all the pahole tools, ctracer, etc, or just install the kernel-debuginfo rpm package on FC6, other distros have it with a different name, its just the kernel built with debug info. 3. Assuming you installed the kernel-debuginfo package, to run ctracer on your workstation, just do the following steps: mkdir foo cd foo ln -s /usr/share/dwarves/runtime/* . make CLASS=sock # to trace struct sock methods, this one is safe, try others # and tell me your horror (or success :-) ) story. (kbuild gurus, send suggestions to simplify this procedure! :-) ) 4. load the resulting module: insmod ctracer.ko dmesg will show how many probes were successfully installed 5. Do some related activity (ssh, in the above example should do) 6. Make sure debugfs is mounted [root@filo ~]# mount -t debugfs none_debugfs /sys/kernel/debug/ 7. Get the log: cat /sys/kernel/debug/ctracer0 > /tmp/ctracer.log 8. Generate the callgraph! make callgraph 9. rmmod ctracer Change the shipped Makefile accordingly to build a module for qemu or another test machine. The relay transport is mostly ready and will be included in the upcoming changesets. dwarves-dfsg-1.15/btf_encoder.c000066400000000000000000000135131350511416500164560ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only Copyright (C) 2019 Facebook Derived from ctf_encoder.c, which is: Copyright (C) Arnaldo Carvalho de Melo Copyright (C) Red Hat Inc */ #include "dwarves.h" #include "libbtf.h" #include "lib/bpf/include/uapi/linux/btf.h" #include "hash.h" #include "elf_symtab.h" #include "btf_encoder.h" #include static int tag__check_id_drift(const struct tag *tag, uint32_t core_id, uint32_t btf_type_id, uint32_t type_id_off) { if (btf_type_id != (core_id + type_id_off)) { fprintf(stderr, "%s: %s id drift, core_id: %u, btf_type_id: %u, type_id_off: %u\n", __func__, dwarf_tag_name(tag->tag), core_id, btf_type_id, type_id_off); return -1; } return 0; } static int32_t structure_type__encode(struct btf_elf *btfe, struct tag *tag, uint32_t type_id_off) { struct type *type = tag__type(tag); struct class_member *pos; bool kind_flag = false; int32_t type_id; uint8_t kind; kind = (tag->tag == DW_TAG_union_type) ? BTF_KIND_UNION : BTF_KIND_STRUCT; /* Although no_bitfield_type_recode has been set true * in pahole.c if BTF encoding is requested, we still check * the value here. So if no_bitfield_type_recode is set * to false for whatever reason, we do not accidentally * set kind_flag incorrectly. */ if (no_bitfield_type_recode) { /* kind_flag only set where there is a bitfield * in the struct. */ type__for_each_data_member(type, pos) { if (pos->bitfield_size) { kind_flag = true; break; } } } type_id = btf_elf__add_struct(btfe, kind, type->namespace.name, kind_flag, type->size, type->nr_members); if (type_id < 0) return type_id; type__for_each_data_member(type, pos) { /* * dwarf_loader uses DWARF's recommended bit offset addressing * scheme, which conforms to BTF requirement, so no conversion * is required. */ if (btf_elf__add_member(btfe, pos->name, type_id_off + pos->tag.type, kind_flag, pos->bitfield_size, pos->bit_offset)) return -1; } return type_id; } static uint32_t array_type__nelems(struct tag *tag) { int i; uint32_t nelem = 1; struct array_type *array = tag__array_type(tag); for (i = array->dimensions - 1; i >= 0; --i) nelem *= array->nr_entries[i]; return nelem; } static int32_t enumeration_type__encode(struct btf_elf *btfe, struct tag *tag) { struct type *etype = tag__type(tag); struct enumerator *pos; int32_t type_id; type_id = btf_elf__add_enum(btfe, etype->namespace.name, etype->size, etype->nr_members); if (type_id < 0) return type_id; type__for_each_enumerator(etype, pos) if (btf_elf__add_enum_val(btfe, pos->name, pos->value)) return -1; return type_id; } static int tag__encode_btf(struct tag *tag, uint32_t core_id, struct btf_elf *btfe, uint32_t array_index_id, uint32_t type_id_off) { /* single out type 0 as it represents special type "void" */ uint32_t ref_type_id = tag->type == 0 ? 0 : type_id_off + tag->type; switch (tag->tag) { case DW_TAG_base_type: return btf_elf__add_base_type(btfe, tag__base_type(tag)); case DW_TAG_const_type: return btf_elf__add_ref_type(btfe, BTF_KIND_CONST, ref_type_id, 0, false); case DW_TAG_pointer_type: return btf_elf__add_ref_type(btfe, BTF_KIND_PTR, ref_type_id, 0, false); case DW_TAG_restrict_type: return btf_elf__add_ref_type(btfe, BTF_KIND_RESTRICT, ref_type_id, 0, false); case DW_TAG_volatile_type: return btf_elf__add_ref_type(btfe, BTF_KIND_VOLATILE, ref_type_id, 0, false); case DW_TAG_typedef: return btf_elf__add_ref_type(btfe, BTF_KIND_TYPEDEF, ref_type_id, tag__namespace(tag)->name, false); case DW_TAG_structure_type: case DW_TAG_union_type: case DW_TAG_class_type: if (tag__type(tag)->declaration) return btf_elf__add_ref_type(btfe, BTF_KIND_FWD, 0, tag__namespace(tag)->name, tag->tag == DW_TAG_union_type); else return structure_type__encode(btfe, tag, type_id_off); case DW_TAG_array_type: /* TODO: Encode one dimension at a time. */ return btf_elf__add_array(btfe, ref_type_id, array_index_id, array_type__nelems(tag)); case DW_TAG_enumeration_type: return enumeration_type__encode(btfe, tag); case DW_TAG_subroutine_type: return btf_elf__add_func_proto(btfe, tag__ftype(tag), type_id_off); default: fprintf(stderr, "Unsupported DW_TAG_%s(0x%x)\n", dwarf_tag_name(tag->tag), tag->tag); return -1; } } /* * FIXME: Its in the DWARF loader, we have to find a better handoff * mechanizm... */ extern struct strings *strings; static struct btf_elf *btfe; static uint32_t array_index_id; int btf_encoder__encode() { int err; err = btf_elf__encode(btfe, 0); btf_elf__delete(btfe); btfe = NULL; return err; } int cu__encode_btf(struct cu *cu, int verbose) { bool add_index_type = false; uint32_t type_id_off; uint32_t core_id; struct tag *pos; int err = 0; if (btfe && strcmp(btfe->filename, cu->filename)) { err = btf_encoder__encode(); if (err) goto out; /* Finished one file, add one empty line */ if (verbose) printf("\n"); } if (!btfe) { btfe = btf_elf__new(cu->filename, cu->elf); if (!btfe) return -1; btf_elf__set_strings(btfe, &strings->gb); /* cu__find_base_type_by_name() takes "type_id_t *id" */ type_id_t id; if (!cu__find_base_type_by_name(cu, "int", &id)) { add_index_type = true; id = cu->types_table.nr_entries; } array_index_id = id; if (verbose) printf("File %s:\n", btfe->filename); } btf_elf__verbose = verbose; type_id_off = btfe->type_index; cu__for_each_type(cu, core_id, pos) { int32_t btf_type_id = tag__encode_btf(pos, core_id, btfe, array_index_id, type_id_off); if (btf_type_id < 0 || tag__check_id_drift(pos, core_id, btf_type_id, type_id_off)) { err = -1; goto out; } } if (add_index_type) { struct base_type bt = {}; bt.name = 0; bt.bit_size = 32; btf_elf__add_base_type(btfe, &bt); } out: if (err) btf_elf__delete(btfe); return err; } dwarves-dfsg-1.15/btf_encoder.h000066400000000000000000000005331350511416500164610ustar00rootroot00000000000000#ifndef _BTF_ENCODER_H_ #define _BTF_ENCODER_H_ 1 /* SPDX-License-Identifier: GPL-2.0-only Copyright (C) 2019 Facebook Derived from ctf_encoder.h, which is: Copyright (C) Arnaldo Carvalho de Melo */ struct cu; int btf_encoder__encode(); int cu__encode_btf(struct cu *cu, int verbose); #endif /* _BTF_ENCODER_H_ */ dwarves-dfsg-1.15/btf_loader.c000066400000000000000000000346641350511416500163170ustar00rootroot00000000000000/* * btf_loader.c * * Copyright (C) 2018 Arnaldo Carvalho de Melo * * Based on ctf_loader.c that, in turn, was based on ctfdump.c: CTF dumper. * * Copyright (C) 2008 David S. Miller */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "libbtf.h" #include "lib/bpf/include/uapi/linux/btf.h" #include "dutil.h" #include "dwarves.h" /* * FIXME: We should just get the table from the BTF ELF section * and use it directly */ extern struct strings *strings; static void *tag__alloc(const size_t size) { struct tag *tag = zalloc(size); if (tag != NULL) tag->top_level = 1; return tag; } static int btf_elf__load_ftype(struct btf_elf *btfe, struct ftype *proto, uint32_t tag, uint32_t type, uint16_t vlen, struct btf_param *args, uint32_t id) { int i; proto->tag.tag = tag; proto->tag.type = type; INIT_LIST_HEAD(&proto->parms); for (i = 0; i < vlen; ++i) { struct btf_param param = { .name_off = btf_elf__get32(btfe, &args[i].name_off), .type = btf_elf__get32(btfe, &args[i].type), }; if (param.type == 0) proto->unspec_parms = 1; else { struct parameter *p = tag__alloc(sizeof(*p)); if (p == NULL) goto out_free_parameters; p->tag.tag = DW_TAG_formal_parameter; p->tag.type = param.type; p->name = param.name_off; ftype__add_parameter(proto, p); } } vlen *= sizeof(*args); cu__add_tag_with_id(btfe->priv, &proto->tag, id); return vlen; out_free_parameters: ftype__delete(proto, btfe->priv); return -ENOMEM; } static struct base_type *base_type__new(strings_t name, uint32_t attrs, uint8_t float_type, size_t size) { struct base_type *bt = tag__alloc(sizeof(*bt)); if (bt != NULL) { bt->name = name; bt->bit_size = size; bt->is_signed = attrs & BTF_INT_SIGNED; bt->is_bool = attrs & BTF_INT_BOOL; bt->name_has_encoding = false; bt->float_type = float_type; } return bt; } static void type__init(struct type *type, uint32_t tag, strings_t name, size_t size) { INIT_LIST_HEAD(&type->node); INIT_LIST_HEAD(&type->namespace.tags); type->size = size; type->namespace.tag.tag = tag; type->namespace.name = name; type->namespace.sname = 0; } static struct type *type__new(uint16_t tag, strings_t name, size_t size) { struct type *type = tag__alloc(sizeof(*type)); if (type != NULL) type__init(type, tag, name, size); return type; } static struct class *class__new(strings_t name, size_t size) { struct class *class = tag__alloc(sizeof(*class)); if (class != NULL) { type__init(&class->type, DW_TAG_structure_type, name, size); INIT_LIST_HEAD(&class->vtable); } return class; } static int create_new_base_type(struct btf_elf *btfe, void *ptr, struct btf_type *tp, uint32_t id) { uint32_t *enc = ptr; uint32_t eval = btf_elf__get32(btfe, enc); uint32_t attrs = BTF_INT_ENCODING(eval); strings_t name = btf_elf__get32(btfe, &tp->name_off); struct base_type *base = base_type__new(name, attrs, 0, BTF_INT_BITS(eval)); if (base == NULL) return -ENOMEM; base->tag.tag = DW_TAG_base_type; cu__add_tag_with_id(btfe->priv, &base->tag, id); return sizeof(*enc); } static int create_new_array(struct btf_elf *btfe, void *ptr, uint32_t id) { struct btf_array *ap = ptr; struct array_type *array = tag__alloc(sizeof(*array)); if (array == NULL) return -ENOMEM; /* FIXME: where to get the number of dimensions? * it it flattened? */ array->dimensions = 1; array->nr_entries = malloc(sizeof(uint32_t)); if (array->nr_entries == NULL) { free(array); return -ENOMEM; } array->nr_entries[0] = btf_elf__get32(btfe, &ap->nelems); array->tag.tag = DW_TAG_array_type; array->tag.type = btf_elf__get32(btfe, &ap->type); cu__add_tag_with_id(btfe->priv, &array->tag, id); return sizeof(*ap); } static int create_members(struct btf_elf *btfe, void *ptr, int vlen, struct type *class, bool kflag) { struct btf_member *mp = ptr; int i; for (i = 0; i < vlen; i++) { struct class_member *member = zalloc(sizeof(*member)); uint32_t offset; if (member == NULL) return -ENOMEM; member->tag.tag = DW_TAG_member; member->tag.type = btf_elf__get32(btfe, &mp[i].type); member->name = btf_elf__get32(btfe, &mp[i].name_off); offset = btf_elf__get32(btfe, &mp[i].offset); if (kflag) { member->bit_offset = BTF_MEMBER_BIT_OFFSET(offset); member->bitfield_size = BTF_MEMBER_BITFIELD_SIZE(offset); } else { member->bit_offset = offset; member->bitfield_size = 0; } member->byte_offset = member->bit_offset / 8; /* sizes and offsets will be corrected at class__fixup_btf_bitfields */ type__add_member(class, member); } return sizeof(*mp); } static int create_new_class(struct btf_elf *btfe, void *ptr, int vlen, struct btf_type *tp, uint64_t size, uint32_t id, bool kflag) { strings_t name = btf_elf__get32(btfe, &tp->name_off); struct class *class = class__new(name, size); int member_size = create_members(btfe, ptr, vlen, &class->type, kflag); if (member_size < 0) goto out_free; cu__add_tag_with_id(btfe->priv, &class->type.namespace.tag, id); return (vlen * member_size); out_free: class__delete(class, btfe->priv); return -ENOMEM; } static int create_new_union(struct btf_elf *btfe, void *ptr, int vlen, struct btf_type *tp, uint64_t size, uint32_t id, bool kflag) { strings_t name = btf_elf__get32(btfe, &tp->name_off); struct type *un = type__new(DW_TAG_union_type, name, size); int member_size = create_members(btfe, ptr, vlen, un, kflag); if (member_size < 0) goto out_free; cu__add_tag_with_id(btfe->priv, &un->namespace.tag, id); return (vlen * member_size); out_free: type__delete(un, btfe->priv); return -ENOMEM; } static struct enumerator *enumerator__new(strings_t name, uint32_t value) { struct enumerator *en = tag__alloc(sizeof(*en)); if (en != NULL) { en->name = name; en->value = value; en->tag.tag = DW_TAG_enumerator; } return en; } static int create_new_enumeration(struct btf_elf *btfe, void *ptr, int vlen, struct btf_type *tp, uint16_t size, uint32_t id) { struct btf_enum *ep = ptr; uint16_t i; struct type *enumeration = type__new(DW_TAG_enumeration_type, btf_elf__get32(btfe, &tp->name_off), size ? size * 8 : (sizeof(int) * 8)); if (enumeration == NULL) return -ENOMEM; for (i = 0; i < vlen; i++) { strings_t name = btf_elf__get32(btfe, &ep[i].name_off); uint32_t value = btf_elf__get32(btfe, &ep[i].val); struct enumerator *enumerator = enumerator__new(name, value); if (enumerator == NULL) goto out_free; enumeration__add(enumeration, enumerator); } cu__add_tag_with_id(btfe->priv, &enumeration->namespace.tag, id); return (vlen * sizeof(*ep)); out_free: enumeration__delete(enumeration, btfe->priv); return -ENOMEM; } static int create_new_subroutine_type(struct btf_elf *btfe, void *ptr, int vlen, struct btf_type *tp, uint32_t id) { struct btf_param *args = ptr; unsigned int type = btf_elf__get32(btfe, &tp->type); struct ftype *proto = tag__alloc(sizeof(*proto)); if (proto == NULL) return -ENOMEM; vlen = btf_elf__load_ftype(btfe, proto, DW_TAG_subroutine_type, type, vlen, args, id); return vlen < 0 ? -ENOMEM : vlen; } static int create_new_forward_decl(struct btf_elf *btfe, struct btf_type *tp, uint64_t size, uint32_t id) { strings_t name = btf_elf__get32(btfe, &tp->name_off); struct class *fwd = class__new(name, size); if (fwd == NULL) return -ENOMEM; fwd->type.declaration = 1; cu__add_tag_with_id(btfe->priv, &fwd->type.namespace.tag, id); return 0; } static int create_new_typedef(struct btf_elf *btfe, struct btf_type *tp, uint64_t size, uint32_t id) { strings_t name = btf_elf__get32(btfe, &tp->name_off); unsigned int type_id = btf_elf__get32(btfe, &tp->type); struct type *type = type__new(DW_TAG_typedef, name, size); if (type == NULL) return -ENOMEM; type->namespace.tag.type = type_id; cu__add_tag_with_id(btfe->priv, &type->namespace.tag, id); return 0; } static int create_new_tag(struct btf_elf *btfe, int type, struct btf_type *tp, uint32_t id) { unsigned int type_id = btf_elf__get32(btfe, &tp->type); struct tag *tag = zalloc(sizeof(*tag)); if (tag == NULL) return -ENOMEM; switch (type) { case BTF_KIND_CONST: tag->tag = DW_TAG_const_type; break; case BTF_KIND_PTR: tag->tag = DW_TAG_pointer_type; break; case BTF_KIND_RESTRICT: tag->tag = DW_TAG_restrict_type; break; case BTF_KIND_VOLATILE: tag->tag = DW_TAG_volatile_type; break; default: printf("%s: FOO %d\n\n", __func__, type); return 0; } tag->type = type_id; cu__add_tag_with_id(btfe->priv, tag, id); return 0; } void *btf_elf__get_buffer(struct btf_elf *btfe) { return btfe->data; } size_t btf_elf__get_size(struct btf_elf *btfe) { return btfe->size; } static int btf_elf__load_types(struct btf_elf *btfe) { void *btf_buffer = btf_elf__get_buffer(btfe); struct btf_header *hp = btf_buffer; void *btf_contents = btf_buffer + sizeof(*hp), *type_section = (btf_contents + btf_elf__get32(btfe, &hp->type_off)), *strings_section = (btf_contents + btf_elf__get32(btfe, &hp->str_off)); struct btf_type *type_ptr = type_section, *end = strings_section; uint32_t type_index = 0x0001; while (type_ptr < end) { uint32_t val = btf_elf__get32(btfe, &type_ptr->info); uint32_t type = BTF_INFO_KIND(val); int vlen = BTF_INFO_VLEN(val); void *ptr = type_ptr; uint32_t size = btf_elf__get32(btfe, &type_ptr->size); bool kflag = BTF_INFO_KFLAG(val); ptr += sizeof(struct btf_type); if (type == BTF_KIND_INT) { vlen = create_new_base_type(btfe, ptr, type_ptr, type_index); } else if (type == BTF_KIND_ARRAY) { vlen = create_new_array(btfe, ptr, type_index); } else if (type == BTF_KIND_STRUCT) { vlen = create_new_class(btfe, ptr, vlen, type_ptr, size, type_index, kflag); } else if (type == BTF_KIND_UNION) { vlen = create_new_union(btfe, ptr, vlen, type_ptr, size, type_index, kflag); } else if (type == BTF_KIND_ENUM) { vlen = create_new_enumeration(btfe, ptr, vlen, type_ptr, size, type_index); } else if (type == BTF_KIND_FWD) { vlen = create_new_forward_decl(btfe, type_ptr, size, type_index); } else if (type == BTF_KIND_TYPEDEF) { vlen = create_new_typedef(btfe, type_ptr, size, type_index); } else if (type == BTF_KIND_VOLATILE || type == BTF_KIND_PTR || type == BTF_KIND_CONST || type == BTF_KIND_RESTRICT) { vlen = create_new_tag(btfe, type, type_ptr, type_index); } else if (type == BTF_KIND_UNKN) { cu__table_nullify_type_entry(btfe->priv, type_index); fprintf(stderr, "BTF: idx: %d, off: %zd, Unknown\n", type_index, ((void *)type_ptr) - type_section); fflush(stderr); vlen = 0; } else if (type == BTF_KIND_FUNC_PROTO) { vlen = create_new_subroutine_type(btfe, ptr, vlen, type_ptr, type_index); } else if (type == BTF_KIND_FUNC) { /* BTF_KIND_FUNC corresponding to a defined subprogram. * This is not really a type and it won't be referred by any other types * either. Since types cannot be skipped, let us replace it with * a nullify_type_entry. * * No warning here since BTF_KIND_FUNC is a legal entry in BTF. */ cu__table_nullify_type_entry(btfe->priv, type_index); vlen = 0; } else { fprintf(stderr, "BTF: idx: %d, off: %zd, Unknown\n", type_index, ((void *)type_ptr) - type_section); fflush(stderr); vlen = 0; } if (vlen < 0) return vlen; type_ptr = ptr + vlen; type_index++; } return 0; } static int btf_elf__load_sections(struct btf_elf *btfe) { return btf_elf__load_types(btfe); } static int class__fixup_btf_bitfields(struct tag *tag, struct cu *cu, struct btf_elf *btfe) { struct class_member *pos; struct type *tag_type = tag__type(tag); type__for_each_data_member(tag_type, pos) { struct tag *type = tag__strip_typedefs_and_modifiers(&pos->tag, cu); if (type == NULL) /* FIXME: C++ BTF... */ continue; pos->bitfield_offset = 0; pos->byte_size = tag__size(type, cu); pos->bit_size = pos->byte_size * 8; /* bitfield fixup is needed for enums and base types only */ if (type->tag != DW_TAG_base_type && type->tag != DW_TAG_enumeration_type) continue; /* if BTF data is incorrect and has size == 0, skip field, * instead of crashing */ if (pos->byte_size == 0) { continue; } if (pos->bitfield_size) { /* bitfields seem to be always aligned, no matter the packing */ pos->byte_offset = pos->bit_offset / pos->bit_size * pos->bit_size / 8; pos->bitfield_offset = pos->bit_offset - pos->byte_offset * 8; /* re-adjust bitfield offset if it is negative */ if (pos->bitfield_offset < 0) { pos->bitfield_offset += pos->bit_size; pos->byte_offset -= pos->byte_size; pos->bit_offset = pos->byte_offset * 8 + pos->bitfield_offset; } } else { pos->byte_offset = pos->bit_offset / 8; } } return 0; } static int cu__fixup_btf_bitfields(struct cu *cu, struct btf_elf *btfe) { int err = 0; struct tag *pos; list_for_each_entry(pos, &cu->tags, node) if (tag__is_struct(pos) || tag__is_union(pos)) { err = class__fixup_btf_bitfields(pos, cu, btfe); if (err) break; } return err; } static void btf_elf__cu_delete(struct cu *cu) { btf_elf__delete(cu->priv); cu->priv = NULL; } static const char *btf_elf__strings_ptr(const struct cu *cu, strings_t s) { return btf_elf__string(cu->priv, s); } struct debug_fmt_ops btf_elf__ops; int btf_elf__load_file(struct cus *cus, struct conf_load *conf, const char *filename) { int err; struct btf_elf *btfe = btf_elf__new(filename, NULL); if (btfe == NULL) return -1; struct cu *cu = cu__new(filename, btfe->wordsize, NULL, 0, filename); if (cu == NULL) return -1; cu->language = LANG_C; cu->uses_global_strings = false; cu->little_endian = !btfe->is_big_endian; cu->dfops = &btf_elf__ops; cu->priv = btfe; btfe->priv = cu; if (btf_elf__load(btfe) != 0) return -1; err = btf_elf__load_sections(btfe); if (err != 0) { cu__delete(cu); return err; } err = cu__fixup_btf_bitfields(cu, btfe); /* * The app stole this cu, possibly deleting it, * so forget about it */ if (conf && conf->steal && conf->steal(cu, conf)) return 0; cus__add(cus, cu); return err; } struct debug_fmt_ops btf_elf__ops = { .name = "btf", .load_file = btf_elf__load_file, .strings__ptr = btf_elf__strings_ptr, .cu__delete = btf_elf__cu_delete, }; dwarves-dfsg-1.15/btfdiff000077500000000000000000000020411350511416500153640ustar00rootroot00000000000000#!/bin/bash # SPDX-License-Identifier: GPL-2.0-only # Copyright © 2019 Red Hat Inc, Arnaldo Carvalho de Melo # Use pahole to produce output from BTF and from DWARF, then do a diff # Use --flat_arrays with DWARF as BTF, like CTF, flattens arrays. # Use --show_private_classes as BTF shows all structs, while pahole knows # if some struct is defined only inside another struct/class or in a function, # this information is not available when loading from BTF. if [ $# -eq 0 ] ; then echo "Usage: btfdiff " exit 1 fi file=$1 btf_output=$(mktemp /tmp/btfdiff.btf.XXXXXX) dwarf_output=$(mktemp /tmp/btfdiff.dwarf.XXXXXX) pahole_bin=${PAHOLE-"pahole"} ${pahole_bin} -F dwarf \ --flat_arrays \ --suppress_aligned_attribute \ --suppress_force_paddings \ --suppress_packed \ --show_private_classes $file > $dwarf_output ${pahole_bin} -F btf \ --suppress_packed \ $file > $btf_output diff -up $dwarf_output $btf_output rm -f $btf_output $dwarf_output exit 0 dwarves-dfsg-1.15/changes-v1.13000066400000000000000000000173561350511416500161520ustar00rootroot00000000000000Here is a summary of changes for the 1.13 version of pahole and its friends: - BTF - Use of the recently introduced BTF deduplication algorithm present in the Linux kernel's libbpf library, which allows for all the types in a multi compile unit binary such as vmlinux to be compactly stored, without duplicates. E.g.: from roughly: $ readelf -SW ../build/v5.1-rc4+/vmlinux | grep .debug_info.*PROGBITS [63] .debug_info PROGBITS 0000000000000000 1d80be0 c3c18b9 00 0 0 1 $ 195 MiB to: $ time pahole --btf_encode ../build/v5.1-rc4+/vmlinux real 0m19.168s user 0m17.707s # On a Lenovo t480s (i7-8650U) SSD sys 0m1.337s $ $ readelf -SW ../build/v5.1-rc4+/vmlinux | grep .BTF.*PROGBITS [78] .BTF PROGBITS 0000000000000000 27b49f61 1e23c3 00 0 0 1 $ ~2 MiB - Introduce a 'btfdiff' utility that prints the output from DWARF and from BTF, comparing the pretty printed outputs, running it on various linux kernel images, such as an allyesconfig for ppc64. Running it on the above 5.1-rc4+ vmlinux: $ btfdiff ../build/v5.1-rc4+/vmlinux $ No differences from the types generated from the DWARF ELF sections to the ones generated from the BTF ELF section. - Add a BTF loader, i.e. 'pahole -F btf' allows pretty printing of structs and unions in the same fashion as with DWARF info, and since BTF is way more compact, using it is much faster than using DWARF. $ cat ../build/v5.1-rc4+/vmlinux > /dev/null $ perf stat -e cycles pahole -F btf ../build/v5.1-rc4+/vmlinux > /dev/null Performance counter stats for 'pahole -F btf ../build/v5.1-rc4+/vmlinux': 229,712,692 cycles:u 0.063379597 seconds time elapsed 0.056265000 seconds user 0.006911000 seconds sys $ perf stat -e cycles pahole -F dwarf ../build/v5.1-rc4+/vmlinux > /dev/null Performance counter stats for 'pahole -F dwarf ../build/v5.1-rc4+/vmlinux': 49,579,679,466 cycles:u 13.063487352 seconds time elapsed 12.612512000 seconds user 0.426226000 seconds sys $ - Better union support: - Allow unions to be specified in pahole in the same fashion as structs $ pahole -C thread_union ../build/v5.1-rc4+/net/ipv4/tcp.o union thread_union { struct task_struct task __attribute__((__aligned__(64))); /* 0 11008 */ long unsigned int stack[2048]; /* 0 16384 */ }; $ - Infer __attribute__((__packed__)) when structs have no alignment holes and violate basic types (integer, longs, short integer) natural alignment requirements. Several heuristics are used to infer the __packed__ attribute, see the changeset log for descriptions. $ pahole -F btf -C boot_e820_entry ../build/v5.1-rc4+/vmlinux struct boot_e820_entry { __u64 addr; /* 0 8 */ __u64 size; /* 8 8 */ __u32 type; /* 16 4 */ /* size: 20, cachelines: 1, members: 3 */ /* last cacheline: 20 bytes */ } __attribute__((__packed__)); $ $ pahole -F btf -C lzma_header ../build/v5.1-rc4+/vmlinux struct lzma_header { uint8_t pos; /* 0 1 */ uint32_t dict_size; /* 1 4 */ uint64_t dst_size; /* 5 8 */ /* size: 13, cachelines: 1, members: 3 */ /* last cacheline: 13 bytes */ } __attribute__((__packed__)); - Support DWARF5's DW_AT_alignment, which, together with the __packed__ attribute inference algorithms produce output that, when compiled, should produce structures with layouts that match the original source code. See it in action with 'struct task_struct', which will also show some of the new information at the struct summary, at the end of the struct: $ pahole -C task_struct ../build/v5.1-rc4+/vmlinux | tail -19 /* --- cacheline 103 boundary (6592 bytes) --- */ struct vm_struct * stack_vm_area; /* 6592 8 */ refcount_t stack_refcount; /* 6600 4 */ /* XXX 4 bytes hole, try to pack */ void * security; /* 6608 8 */ /* XXX 40 bytes hole, try to pack */ /* --- cacheline 104 boundary (6656 bytes) --- */ struct thread_struct thread __attribute__((__aligned__(64))); /* 6656 4352 */ /* size: 11008, cachelines: 172, members: 207 */ /* sum members: 10902, holes: 16, sum holes: 98 */ /* sum bitfield members: 10 bits, bit holes: 2, sum bit holes: 54 bits */ /* paddings: 3, sum paddings: 14 */ /* forced alignments: 6, forced holes: 1, sum forced holes: 40 */ } __attribute__((__aligned__(64))); $ - Add a '--compile' option to 'pfunct' that produces compileable output for the function prototypes in an object file. There are still some bugs but the vast majority of the kernel single compilation unit files the ones produced from a single .c file are working, see the new 'fullcircle' utility that uses this feature. Example of it in action: $ pfunct --compile=static_key_false ../build/v5.1-rc4+/net/ipv4/tcp.o typedef _Bool bool; typedef struct { int counter; /* 0 4 */ /* size: 4, cachelines: 1, members: 1 */ /* last cacheline: 4 bytes */ } atomic_t; struct jump_entry; struct static_key_mod; struct static_key { atomic_t enabled; /* 0 4 */ /* XXX 4 bytes hole, try to pack */ union { long unsigned int type; /* 8 8 */ struct jump_entry * entries; /* 8 8 */ struct static_key_mod * next; /* 8 8 */ }; /* 8 8 */ /* size: 16, cachelines: 1, members: 2 */ /* sum members: 12, holes: 1, sum holes: 4 */ /* last cacheline: 16 bytes */ }; bool static_key_false(struct static_key * key) { return *(bool *)1; } $ The generation of compilable code from the type information and its use in the new tool 'fullcircle, helps validate all the parts of this codebase, finding bugs that were lurking forever, go read the csets to find all sorts of curious C language features that are rarely seen, like unnamed zero sized bitfields and the way people have been using it over the years in a codebase like the linux kernel. Certainly there are several other features, changes and fixes that I forgot to mention! Now lemme release this version so that we can use it more extensively together with a recent patch merged for 5.2: [PATCH bpf-next] kbuild: add ability to generate BTF type info for vmlinux With it BTF will be always available for all the types of the kernel, which will open a pandora box of cool new features that are in the works, and, for people already using pahole, will greatly speed up its usage. Please try to alias it to use btf, i.e. alias pahole='pahole -F btf' Please report any problems you may find with this new version or with the BTF loader or any errors in the layout generated/pretty printed. Thanks to the fine BTF guys at Facebook for the patches and help in testing, fixing bugs and getting this out of the door, the stats for this release are: Changesets: 157 113 Arnaldo Carvalho de Melo Red Hat 32 Andrii Nakryiko Facebook 10 Yonghong Song Facebook 1 Martin Lau Facebook 1 Domenico Andreoli dwarves-dfsg-1.15/cmake/000077500000000000000000000000001350511416500151155ustar00rootroot00000000000000dwarves-dfsg-1.15/cmake/modules/000077500000000000000000000000001350511416500165655ustar00rootroot00000000000000dwarves-dfsg-1.15/cmake/modules/FindDWARF.cmake000066400000000000000000000102711350511416500212340ustar00rootroot00000000000000# - Find Dwarf # Find the dwarf.h header from elf utils # # DWARF_INCLUDE_DIR - where to find dwarf.h, etc. # DWARF_LIBRARIES - List of libraries when using elf utils. # DWARF_FOUND - True if fdo found. message(STATUS "Checking availability of DWARF and ELF development libraries") INCLUDE(CheckLibraryExists) if (DWARF_INCLUDE_DIR AND LIBDW_INCLUDE_DIR AND DWARF_LIBRARY AND ELF_LIBRARY) # Already in cache, be silent set(DWARF_FIND_QUIETLY TRUE) endif (DWARF_INCLUDE_DIR AND LIBDW_INCLUDE_DIR AND DWARF_LIBRARY AND ELF_LIBRARY) find_path(DWARF_INCLUDE_DIR dwarf.h /usr/include /usr/local/include /usr/include/libdwarf ~/usr/local/include ) find_path(LIBDW_INCLUDE_DIR elfutils/libdw.h /usr/include /usr/local/include ~/usr/local/include ) find_library(DWARF_LIBRARY NAMES dw dwarf PATHS /usr/lib /usr/local/lib /usr/lib64 /usr/local/lib64 ~/usr/local/lib ~/usr/local/lib64 ) find_library(ELF_LIBRARY NAMES elf PATHS /usr/lib /usr/local/lib /usr/lib64 /usr/local/lib64 ~/usr/local/lib ~/usr/local/lib64 ) find_library(EBL_LIBRARY NAMES ebl PATHS /usr/lib /usr/local/lib /usr/lib64 /usr/local/lib64 ~/usr/local/lib ~/usr/local/lib64 ) if (DWARF_INCLUDE_DIR AND LIBDW_INCLUDE_DIR AND DWARF_LIBRARY AND ELF_LIBRARY AND EBL_LIBRARY) set(DWARF_FOUND TRUE) set(DWARF_LIBRARIES ${DWARF_LIBRARY} ${ELF_LIBRARY} ${EBL_LIBRARY}) set(CMAKE_REQUIRED_LIBRARIES ${DWARF_LIBRARIES}) # check if libdw have the dwfl_module_build_id routine, i.e. if it supports the buildid # mechanism to match binaries to detached debug info sections (the -debuginfo packages # in distributions such as fedora). We do it against libelf because, IIRC, some distros # include libdw linked statically into libelf. check_library_exists(elf dwfl_module_build_id "" HAVE_DWFL_MODULE_BUILD_ID) else (DWARF_INCLUDE_DIR AND LIBDW_INCLUDE_DIR AND DWARF_LIBRARY AND ELF_LIBRARY AND EBL_LIBRARY) set(DWARF_FOUND FALSE) set(DWARF_LIBRARIES) endif (DWARF_INCLUDE_DIR AND LIBDW_INCLUDE_DIR AND DWARF_LIBRARY AND ELF_LIBRARY AND EBL_LIBRARY) if (DWARF_FOUND) if (NOT DWARF_FIND_QUIETLY) message(STATUS "Found dwarf.h header: ${DWARF_INCLUDE_DIR}") message(STATUS "Found elfutils/libdw.h header: ${LIBDW_INCLUDE_DIR}") message(STATUS "Found libdw library: ${DWARF_LIBRARY}") message(STATUS "Found libelf library: ${ELF_LIBRARY}") message(STATUS "Found libebl library: ${EBL_LIBRARY}") endif (NOT DWARF_FIND_QUIETLY) else (DWARF_FOUND) if (DWARF_FIND_REQUIRED) # Check if we are in a Red Hat (RHEL) or Fedora system to tell # exactly which packages should be installed. Please send # patches for other distributions. find_path(FEDORA fedora-release /etc) find_path(REDHAT redhat-release /etc) if (FEDORA OR REDHAT) if (NOT DWARF_INCLUDE_DIR OR NOT LIBDW_INCLUDE_DIR OR NOT EBL_LIBRARY) message(STATUS "Please install the elfutils-devel package") endif (NOT DWARF_INCLUDE_DIR OR NOT LIBDW_INCLUDE_DIR OR NOT EBL_LIBRARY) if (NOT DWARF_LIBRARY) message(STATUS "Please install the elfutils-libs package") endif (NOT DWARF_LIBRARY) if (NOT ELF_LIBRARY) message(STATUS "Please install the elfutils-libelf package") endif (NOT ELF_LIBRARY) else (FEDORA OR REDHAT) if (NOT DWARF_INCLUDE_DIR) message(STATUS "Could NOT find dwarf include dir") endif (NOT DWARF_INCLUDE_DIR) if (NOT LIBDW_INCLUDE_DIR) message(STATUS "Could NOT find libdw include dir") endif (NOT LIBDW_INCLUDE_DIR) if (NOT EBL_LIBRARY) message(STATUS "Could NOT find libebl library") endif (NOT EBL_LIBRARY) if (NOT DWARF_LIBRARY) message(STATUS "Could NOT find libdw library") endif (NOT DWARF_LIBRARY) if (NOT ELF_LIBRARY) message(STATUS "Could NOT find libelf library") endif (NOT ELF_LIBRARY) endif (FEDORA OR REDHAT) message(FATAL_ERROR "Could NOT find some ELF and DWARF libraries, please install the missing packages") endif (DWARF_FIND_REQUIRED) endif (DWARF_FOUND) mark_as_advanced(DWARF_INCLUDE_DIR LIBDW_INCLUDE_DIR DWARF_LIBRARY ELF_LIBRARY EBL_LIBRARY) include_directories(${DWARF_INCLUDE_DIR} ${LIBDW_INCLUDE_DIR}) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/config.h.cmake ${CMAKE_CURRENT_SOURCE_DIR}/config.h) message(STATUS "Checking availability of DWARF and ELF development libraries - done") dwarves-dfsg-1.15/codiff.c000066400000000000000000000554241350511416500154450ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only Copyright (C) 2006 Mandriva Conectiva S.A. Copyright (C) 2006 Arnaldo Carvalho de Melo */ #include #include #include #include #include #include #include #include #include #include "dwarves.h" #include "dutil.h" static int show_struct_diffs; static int show_function_diffs; static int verbose; static int quiet; static int show_terse_type_changes; static struct conf_load conf_load = { .get_addr_info = true, }; static struct strlist *structs_printed; #define TCHANGEF__SIZE (1 << 0) #define TCHANGEF__NR_MEMBERS (1 << 1) #define TCHANGEF__TYPE (1 << 2) #define TCHANGEF__OFFSET (1 << 3) #define TCHANGEF__BIT_OFFSET (1 << 4) #define TCHANGEF__BIT_SIZE (1 << 5) #define TCHANGEF__PADDING (1 << 6) #define TCHANGEF__NR_HOLES (1 << 7) #define TCHANGEF__NR_BIT_HOLES (1 << 8) static uint32_t terse_type_changes; static uint32_t total_cus_changed; static uint32_t total_nr_functions_changed; static uint32_t total_function_bytes_added; static uint32_t total_function_bytes_removed; struct diff_info { const struct tag *tag; const struct cu *cu; int32_t diff; }; static struct diff_info *diff_info__new(const struct tag *twin, const struct cu *cu, int32_t diff) { struct diff_info *dinfo = malloc(sizeof(*dinfo)); if (dinfo == NULL) { puts("out of memory!"); exit(1); } dinfo->tag = twin; dinfo->cu = cu; dinfo->diff = diff; return dinfo; } static void cu__check_max_len_changed_item(struct cu *cu, const char *name, uint8_t addend) { const uint32_t len = strlen(name) + addend; if (len > cu->max_len_changed_item) cu->max_len_changed_item = len; } static void diff_function(const struct cu *new_cu, struct function *function, struct cu *cu) { struct tag *new_tag; const char *name; if (function->inlined || function->abstract_origin != 0) return; name = function__name(function, cu); new_tag = cu__find_function_by_name(new_cu, name); if (new_tag != NULL) { struct function *new_function = tag__function(new_tag); int32_t diff = (function__size(new_function) - function__size(function)); if (diff != 0) { function->priv = diff_info__new(&new_function->proto.tag, new_cu, diff); cu__check_max_len_changed_item(cu, name, 0); ++cu->nr_functions_changed; if (diff > 0) cu->function_bytes_added += diff; else cu->function_bytes_removed += -diff; } else { char proto[1024], twin_proto[1024]; if (strcmp(function__prototype(function, cu, proto, sizeof(proto)), function__prototype(new_function, new_cu, twin_proto, sizeof(twin_proto))) != 0) { ++cu->nr_functions_changed; function->priv = diff_info__new(function__tag(new_function), new_cu, 0); } } } else { const uint32_t diff = -function__size(function); cu__check_max_len_changed_item(cu, name, 0); function->priv = diff_info__new(NULL, NULL, diff); ++cu->nr_functions_changed; cu->function_bytes_removed += -diff; } } static int check_print_change(const struct class_member *old, const struct cu *old_cu, const struct class_member *new, const struct cu *new_cu, int print) { size_t old_size, new_size; char old_type_name[128], new_type_name[128]; const struct tag *old_type = cu__type(old_cu, old->tag.type); const struct tag *new_type = cu__type(new_cu, new->tag.type); int changes = 0; if (old_type == NULL || new_type == NULL) return 0; old_size = old->byte_size; new_size = new->byte_size; if (old_size != new_size) changes = 1; if (old->byte_offset != new->byte_offset) { changes = 1; terse_type_changes |= TCHANGEF__OFFSET; } if (old->bitfield_offset != new->bitfield_offset) { changes = 1; terse_type_changes |= TCHANGEF__BIT_OFFSET; } if (old->bitfield_size != new->bitfield_size) { changes = 1; terse_type_changes |= TCHANGEF__BIT_SIZE; } if (strcmp(tag__name(old_type, old_cu, old_type_name, sizeof(old_type_name), NULL), tag__name(new_type, new_cu, new_type_name, sizeof(new_type_name), NULL)) != 0) { changes = 1; terse_type_changes |= TCHANGEF__TYPE; } if (changes && print && !show_terse_type_changes) printf(" %s\n" " from: %-21s /* %5u(%2u) %5zd(%2d) */\n" " to: %-21s /* %5u(%2u) %5zd(%2u) */\n", class_member__name(old, old_cu), old_type_name, old->byte_offset, old->bitfield_offset, old_size, old->bitfield_size, new_type_name, new->byte_offset, new->bitfield_offset, new_size, new->bitfield_size); return changes; } static struct class_member *class__find_pair_member(const struct class *structure, const struct cu *cu, const struct class_member *pair_member, const struct cu *pair_cu, int *nr_anonymousp) { const char *member_name = class_member__name(pair_member, pair_cu); struct class_member *member; if (member_name) return class__find_member_by_name(structure, cu, member_name); int nr_anonymous = ++*nr_anonymousp; /* Unnamed struct or union, lets look for the first unammed matchin tag.type */ type__for_each_member(&structure->type, member) { if (member->tag.tag == pair_member->tag.tag && /* Both are class/union/struct (unnamed) */ class_member__name(member, cu) == member_name && /* Both are NULL? */ --nr_anonymous == 0) return member; } return NULL; } static int check_print_members_changes(const struct class *structure, const struct cu *cu, const struct class *new_structure, const struct cu *new_cu, int print) { int changes = 0, nr_anonymous = 0; struct class_member *member; uint16_t nr_twins_found = 0; type__for_each_member(&structure->type, member) { struct class_member *twin = class__find_pair_member(new_structure, new_cu, member, cu, &nr_anonymous); if (twin != NULL) { twin->tag.visited = 1; ++nr_twins_found; if (check_print_change(member, cu, twin, new_cu, print)) changes = 1; } else { changes = 1; if (print) { char name[128]; struct tag *type; type = cu__type(cu, member->tag.type); printf(" %s\n" " removed: %-21s /* %5u(%2u) %5zd(%2d) */\n", class_member__name(member, cu), tag__name(type, cu, name, sizeof(name), NULL), member->byte_offset, member->bitfield_offset, member->byte_size, member->bitfield_size); } } } if (nr_twins_found == (new_structure->type.nr_members + new_structure->type.nr_static_members)) goto out; changes = 1; if (!print) goto out; type__for_each_member(&new_structure->type, member) { if (!member->tag.visited) { char name[128]; struct tag *type; type = cu__type(new_cu, member->tag.type); printf(" %s\n" " added: %-21s /* %5u(%2u) %5zd(%2d) */\n", class_member__name(member, new_cu), tag__name(type, new_cu, name, sizeof(name), NULL), member->byte_offset, member->bitfield_offset, member->byte_size, member->bitfield_size); } } out: return changes; } static void diff_struct(const struct cu *new_cu, struct class *structure, struct cu *cu) { struct tag *new_tag; struct class *new_structure = NULL; int32_t diff; assert(class__is_struct(structure)); if (class__size(structure) == 0 || class__name(structure, cu) == NULL) return; new_tag = cu__find_struct_by_name(new_cu, class__name(structure, cu), 0, NULL); if (new_tag == NULL) return; new_structure = tag__class(new_tag); if (class__size(new_structure) == 0) return; assert(class__is_struct(new_structure)); diff = class__size(structure) != class__size(new_structure) || class__nr_members(structure) != class__nr_members(new_structure) || check_print_members_changes(structure, cu, new_structure, new_cu, 0) || structure->padding != new_structure->padding || structure->nr_holes != new_structure->nr_holes || structure->nr_bit_holes != new_structure->nr_bit_holes; if (diff == 0) return; ++cu->nr_structures_changed; cu__check_max_len_changed_item(cu, class__name(structure, cu), sizeof("struct")); structure->priv = diff_info__new(class__tag(new_structure), new_cu, diff); } static struct cu *cus__find_pair(struct cus *cus, const char *name) { if (cus->nr_entries == 1) return list_first_entry(&cus->cus, struct cu, node); return cus__find_cu_by_name(cus, name); } static int cu_find_new_tags_iterator(struct cu *new_cu, void *old_cus) { struct cu *old_cu = cus__find_pair(old_cus, new_cu->name); if (old_cu != NULL && cu__same_build_id(old_cu, new_cu)) return 0; struct function *function; uint32_t id; cu__for_each_function(new_cu, id, function) { /* * We're not interested in aliases, just real function definitions, * where we'll know if the kind of inlining */ if (function->abstract_origin || function->inlined) continue; const char *name = function__name(function, new_cu); struct tag *old_function = cu__find_function_by_name(old_cu, name); if (old_function != NULL && !tag__function(old_function)->inlined) continue; const int32_t diff = function__size(function); cu__check_max_len_changed_item(new_cu, name, 0); ++new_cu->nr_functions_changed; new_cu->function_bytes_added += diff; function->priv = diff_info__new(old_function, new_cu, diff); } struct class *class; cu__for_each_struct(new_cu, id, class) { const char *name = class__name(class, new_cu); if (name == NULL || class__size(class) == 0 || cu__find_struct_by_name(old_cu, name, 0, NULL)) continue; class->priv = diff_info__new(NULL, NULL, 1); ++new_cu->nr_structures_changed; cu__check_max_len_changed_item(new_cu, name, sizeof("struct")); } return 0; } static int cu_diff_iterator(struct cu *cu, void *new_cus) { struct cu *new_cu = cus__find_pair(new_cus, cu->name); if (new_cu != NULL && cu__same_build_id(cu, new_cu)) return 0; uint32_t id; struct class *class; cu__for_each_struct(cu, id, class) diff_struct(new_cu, class, cu); struct function *function; cu__for_each_function(cu, id, function) diff_function(new_cu, function, cu); return 0; } static void show_diffs_function(struct function *function, const struct cu *cu, const void *cookie) { const struct diff_info *di = function->priv; printf(" %-*.*s | %+4d", (int)cu->max_len_changed_item, (int)cu->max_len_changed_item, function__name(function, cu), di->diff); if (!verbose) { putchar('\n'); return; } if (di->tag == NULL) puts(cookie ? " (added)" : " (removed)"); else { struct function *twin = tag__function(di->tag); if (twin->inlined) puts(cookie ? " (uninlined)" : " (inlined)"); else if (strcmp(function__name(function, cu), function__name(twin, di->cu)) != 0) printf("%s: BRAIN FART ALERT: comparing %s to %s, " "should be the same name\n", __FUNCTION__, function__name(function, cu), function__name(twin, di->cu)); else { char proto[1024], twin_proto[1024]; printf(" # %d -> %d", function__size(function), function__size(twin)); if (function->lexblock.nr_lexblocks != twin->lexblock.nr_lexblocks) printf(", lexblocks: %d -> %d", function->lexblock.nr_lexblocks, twin->lexblock.nr_lexblocks); if (function->lexblock.nr_inline_expansions != twin->lexblock.nr_inline_expansions) printf(", # inlines: %d -> %d", function->lexblock.nr_inline_expansions, twin->lexblock.nr_inline_expansions); if (function->lexblock.size_inline_expansions != twin->lexblock.size_inline_expansions) printf(", size inlines: %d -> %d", function->lexblock.size_inline_expansions, twin->lexblock.size_inline_expansions); if (strcmp(function__prototype(function, cu, proto, sizeof(proto)), function__prototype(twin, di->cu, twin_proto, sizeof(twin_proto))) != 0) printf(", prototype: %s -> %s", proto, twin_proto); putchar('\n'); } } } static void show_changed_member(char change, const struct class_member *member, const struct cu *cu) { const struct tag *type = cu__type(cu, member->tag.type); char bf[128]; tag__assert_search_result(type); printf(" %c%-26s %-21s /* %5u %5zd */\n", change, tag__name(type, cu, bf, sizeof(bf), NULL), class_member__name(member, cu), member->byte_offset, member->byte_size); } static void show_nr_members_changes(const struct class *structure, const struct cu *cu, const struct class *new_structure, const struct cu *new_cu) { struct class_member *member; int nr_anonymous = 0; /* Find the removed ones */ type__for_each_member(&structure->type, member) { struct class_member *twin = class__find_pair_member(new_structure, new_cu, member, cu, &nr_anonymous); if (twin == NULL) show_changed_member('-', member, cu); } nr_anonymous = 0; /* Find the new ones */ type__for_each_member(&new_structure->type, member) { struct class_member *twin = class__find_pair_member(structure, cu, member, new_cu, &nr_anonymous); if (twin == NULL) show_changed_member('+', member, new_cu); } } static void print_terse_type_changes(struct class *structure, const struct cu *cu) { const char *sep = ""; printf("struct %s: ", class__name(structure, cu)); if (terse_type_changes & TCHANGEF__SIZE) { fputs("size", stdout); sep = ", "; } if (terse_type_changes & TCHANGEF__NR_MEMBERS) { printf("%snr_members", sep); sep = ", "; } if (terse_type_changes & TCHANGEF__TYPE) { printf("%stype", sep); sep = ", "; } if (terse_type_changes & TCHANGEF__OFFSET) { printf("%soffset", sep); sep = ", "; } if (terse_type_changes & TCHANGEF__BIT_OFFSET) { printf("%sbit_offset", sep); sep = ", "; } if (terse_type_changes & TCHANGEF__BIT_SIZE) { printf("%sbit_size", sep); sep = ", "; } if (terse_type_changes & TCHANGEF__PADDING) { printf("%spadding", sep); sep = ", "; } if (terse_type_changes & TCHANGEF__NR_HOLES) { printf("%snr_holes", sep); sep = ", "; } if (terse_type_changes & TCHANGEF__NR_BIT_HOLES) printf("%snr_bit_holes", sep); putchar('\n'); } static void show_diffs_structure(struct class *structure, const struct cu *cu) { const struct diff_info *di = structure->priv; const struct class *new_structure; int diff; /* * This is when the struct was not present in the new object file. * Meaning that it either was not referenced or that it was completely * removed. */ if (di == NULL) return; new_structure = tag__class(di->tag); /* * If there is a diff_info but its di->tag is NULL we have a new structure, * one that didn't appears in the old object. See find_new_classes_iterator. */ if (new_structure == NULL) diff = class__size(structure); else diff = class__size(new_structure) - class__size(structure); terse_type_changes = 0; if (!show_terse_type_changes) printf(" struct %-*.*s | %+4d\n", (int)(cu->max_len_changed_item - sizeof("struct")), (int)(cu->max_len_changed_item - sizeof("struct")), class__name(structure, cu), diff); if (diff != 0) terse_type_changes |= TCHANGEF__SIZE; if (!verbose && !show_terse_type_changes) return; if (new_structure == NULL) diff = -class__nr_members(structure); else diff = (class__nr_members(new_structure) - class__nr_members(structure)); if (diff != 0) { terse_type_changes |= TCHANGEF__NR_MEMBERS; if (!show_terse_type_changes) { printf(" nr_members: %+d\n", diff); if (new_structure != NULL) show_nr_members_changes(structure, cu, new_structure, di->cu); } } if (new_structure != NULL) { diff = (int)new_structure->padding - (int)structure->padding; if (diff) { terse_type_changes |= TCHANGEF__PADDING; if (!show_terse_type_changes) printf(" padding: %+d\n", diff); } diff = (int)new_structure->nr_holes - (int)structure->nr_holes; if (diff) { terse_type_changes |= TCHANGEF__NR_HOLES; if (!show_terse_type_changes) printf(" nr_holes: %+d\n", diff); } diff = ((int)new_structure->nr_bit_holes - (int)structure->nr_bit_holes); if (structure->nr_bit_holes != new_structure->nr_bit_holes) { terse_type_changes |= TCHANGEF__NR_BIT_HOLES; if (!show_terse_type_changes) printf(" nr_bit_holes: %+d\n", diff); } check_print_members_changes(structure, cu, new_structure, di->cu, 1); } if (show_terse_type_changes) print_terse_type_changes(structure, cu); } static void show_structure_diffs_iterator(struct class *class, struct cu *cu) { if (class->priv != NULL) { const char *name = class__name(class, cu); if (!strlist__has_entry(structs_printed, name)) { show_diffs_structure(class, cu); strlist__add(structs_printed, name); } } } static int cu_show_diffs_iterator(struct cu *cu, void *cookie) { static int first_cu_printed; if (cu->nr_functions_changed == 0 && cu->nr_structures_changed == 0) return 0; if (first_cu_printed) { if (!quiet) putchar('\n'); } else { first_cu_printed = 1; } ++total_cus_changed; if (!quiet) printf("%s:\n", cu->name); uint32_t id; struct class *class; if (show_terse_type_changes) { cu__for_each_struct(cu, id, class) show_structure_diffs_iterator(class, cu); return 0; } if (cu->nr_structures_changed != 0 && show_struct_diffs) { cu__for_each_struct(cu, id, class) show_structure_diffs_iterator(class, cu); printf(" %u struct%s changed\n", cu->nr_structures_changed, cu->nr_structures_changed > 1 ? "s" : ""); } if (cu->nr_functions_changed != 0 && show_function_diffs) { total_nr_functions_changed += cu->nr_functions_changed; struct function *function; cu__for_each_function(cu, id, function) { if (function->priv != NULL) show_diffs_function(function, cu, cookie); } printf(" %u function%s changed", cu->nr_functions_changed, cu->nr_functions_changed > 1 ? "s" : ""); if (cu->function_bytes_added != 0) { total_function_bytes_added += cu->function_bytes_added; printf(", %zd bytes added", cu->function_bytes_added); } if (cu->function_bytes_removed != 0) { total_function_bytes_removed += cu->function_bytes_removed; printf(", %zd bytes removed", cu->function_bytes_removed); } printf(", diff: %+zd", cu->function_bytes_added - cu->function_bytes_removed); putchar('\n'); } return 0; } static int cu_delete_priv(struct cu *cu, void *cookie __unused) { struct class *c; struct function *f; uint32_t id; cu__for_each_struct(cu, id, c) free(c->priv); cu__for_each_function(cu, id, f) free(f->priv); return 0; } static void print_total_function_diff(const char *filename) { printf("\n%s:\n", filename); printf(" %u function%s changed", total_nr_functions_changed, total_nr_functions_changed > 1 ? "s" : ""); if (total_function_bytes_added != 0) printf(", %u bytes added", total_function_bytes_added); if (total_function_bytes_removed != 0) printf(", %u bytes removed", total_function_bytes_removed); printf(", diff: %+d", (total_function_bytes_added - total_function_bytes_removed)); putchar('\n'); } /* Name and version of program. */ ARGP_PROGRAM_VERSION_HOOK_DEF = dwarves_print_version; static const struct argp_option codiff__options[] = { { .key = 's', .name = "structs", .doc = "show struct diffs", }, { .key = 'f', .name = "functions", .doc = "show function diffs", }, { .name = "format_path", .key = 'F', .arg = "FORMAT_LIST", .doc = "List of debugging formats to try" }, { .key = 't', .name = "terse_type_changes", .doc = "show terse type changes", }, { .key = 'V', .name = "verbose", .doc = "show diffs details", }, { .key = 'q', .name = "quiet", .doc = "Show only differences, no difference? No output", }, { .name = NULL, } }; static error_t codiff__options_parser(int key, char *arg __unused, struct argp_state *state __unused) { switch (key) { case 'f': show_function_diffs = 1; break; case 'F': conf_load.format_path = arg; break; case 's': show_struct_diffs = 1; break; case 't': show_terse_type_changes = 1; break; case 'V': verbose = 1; break; case 'q': quiet = 1; break; default: return ARGP_ERR_UNKNOWN; } return 0; } static const char codiff__args_doc[] = "OLD_FILE NEW_FILE"; static struct argp codiff__argp = { .options = codiff__options, .parser = codiff__options_parser, .args_doc = codiff__args_doc, }; int main(int argc, char *argv[]) { int remaining, err, rc = EXIT_FAILURE; char *old_filename, *new_filename; char *filenames[2]; struct stat st; if (argp_parse(&codiff__argp, argc, argv, 0, &remaining, NULL) || remaining < argc) { switch (argc - remaining) { case 2: old_filename = argv[remaining++]; new_filename = argv[remaining++]; break; case 1: default: goto failure; } } else { failure: argp_help(&codiff__argp, stderr, ARGP_HELP_SEE, argv[0]); goto out; } if (dwarves__init(0)) { fputs("codiff: insufficient memory\n", stderr); goto out; } if (show_function_diffs == 0 && show_struct_diffs == 0 && show_terse_type_changes == 0) show_function_diffs = show_struct_diffs = 1; structs_printed = strlist__new(false); struct cus *old_cus = cus__new(), *new_cus = cus__new(); if (old_cus == NULL || new_cus == NULL || structs_printed == NULL) { fputs("codiff: insufficient memory\n", stderr); goto out_cus_delete; } if (stat(old_filename, &st) != 0) { fprintf(stderr, "codiff: %s (%s)\n", strerror(errno), old_filename); goto out_cus_delete; } filenames[1] = NULL; /* If old_file is a character device, leave its cus empty */ if (!S_ISCHR(st.st_mode)) { err = cus__load_file(old_cus, &conf_load, old_filename); if (err != 0) { cus__print_error_msg("codiff", old_cus, old_filename, err); goto out_cus_delete_priv; } } if (stat(new_filename, &st) != 0) { fprintf(stderr, "codiff: %s (%s)\n", strerror(errno), new_filename); goto out_cus_delete_priv; } /* If old_file is a character device, leave its cus empty */ if (!S_ISCHR(st.st_mode)) { err = cus__load_file(new_cus, &conf_load, new_filename); if (err != 0) { cus__print_error_msg("codiff", new_cus, new_filename, err); goto out_cus_delete_priv; } } cus__for_each_cu(old_cus, cu_diff_iterator, new_cus, NULL); cus__for_each_cu(new_cus, cu_find_new_tags_iterator, old_cus, NULL); cus__for_each_cu(old_cus, cu_show_diffs_iterator, NULL, NULL); if (new_cus->nr_entries > 1) cus__for_each_cu(new_cus, cu_show_diffs_iterator, (void *)1, NULL); if (total_cus_changed > 1) { if (show_function_diffs) print_total_function_diff(new_filename); } rc = EXIT_SUCCESS; out_cus_delete_priv: cus__for_each_cu(old_cus, cu_delete_priv, NULL, NULL); cus__for_each_cu(new_cus, cu_delete_priv, NULL, NULL); out_cus_delete: cus__delete(old_cus); cus__delete(new_cus); strlist__delete(structs_printed); dwarves__exit(); out: return rc; } dwarves-dfsg-1.15/config.h.cmake000066400000000000000000000004521350511416500165330ustar00rootroot00000000000000/* Copyright (C) 2007 Arnaldo Carvalho de Melo This program is free software; you can redistribute it and/or modify it under the terms of version 2 of the GNU General Public License as published by the Free Software Foundation. */ #cmakedefine HAVE_DWFL_MODULE_BUILD_ID dwarves-dfsg-1.15/ctf.h000066400000000000000000000107041350511416500147640ustar00rootroot00000000000000#ifndef _CTF_H #define _CTF_H /* SPDX-License-Identifier: GPL-2.0-only Copyright (C) 2019 Arnaldo Carvalho de Melo */ #include struct ctf_header { uint16_t ctf_magic; /* Header magic value */ #define CTF_MAGIC 0xcff1 #define CTF_MAGIC_SWAP 0xf1cf uint8_t ctf_version; /* Header version */ #define CTF_VERSION 2 uint8_t ctf_flags; /* Header flags */ #define CTF_FLAGS_COMPR 0x01 uint32_t ctf_parent_label; /* Label of parent CTF object */ uint32_t ctf_parent_name; /* Name of parent CTF object */ /* All offsets are in bytes are relative to the end of * this header. */ uint32_t ctf_label_off; /* Offset of label section */ uint32_t ctf_object_off; /* Offset of data object section */ uint32_t ctf_func_off; /* Offset of function section */ uint32_t ctf_type_off; /* Offset of type section */ uint32_t ctf_str_off; /* Offset of string section */ uint32_t ctf_str_len; /* Length of string section */ }; #define CTF_REF_OFFSET(REF) ((REF) & 0x7fffffff) #define CTF_REF_TBL_ID(REF) (((REF) >> 31) & 0x1) #define CTF_STR_TBL_ID_0 0 #define CTF_STR_TBL_ID_1 1 #define CTF_REF_ENCODE(TBL, OFF) (((TBL) << 31) | (OFF)) struct ctf_label_ent { uint32_t ctf_label_ref; uint32_t ctf_type_index; }; /* Types are encoded with ctf_short_type so long as the ctf_size * field can be fully represented in a uint16_t. If not, then * the ctf_size is given the value 0xffff and ctf_full_type is * used. */ struct ctf_short_type { uint32_t ctf_name; uint16_t ctf_info; union { uint16_t ctf_size; uint16_t ctf_type; }; }; struct ctf_full_type { struct ctf_short_type base; uint32_t ctf_size_high; uint32_t ctf_size_low; }; #define CTF_GET_KIND(VAL) (((VAL) >> 11) & 0x1f) #define CTF_GET_VLEN(VAL) ((VAL) & 0x3ff) #define CTF_ISROOT(VAL) (((VAL) & 0x400) != 0) #define CTF_INFO_ENCODE(KIND, VLEN, ISROOT) \ (((ISROOT) ? 0x400 : 0) | ((KIND) << 11) | (VLEN)) #define CTF_TYPE_KIND_UNKN 0 /* Unknown */ #define CTF_TYPE_KIND_INT 1 /* Integer */ #define CTF_TYPE_KIND_FLT 2 /* Float */ #define CTF_TYPE_KIND_PTR 3 /* Pointer */ #define CTF_TYPE_KIND_ARR 4 /* Array */ #define CTF_TYPE_KIND_FUNC 5 /* Function */ #define CTF_TYPE_KIND_STR 6 /* Struct */ #define CTF_TYPE_KIND_UNION 7 /* Union */ #define CTF_TYPE_KIND_ENUM 8 /* Enumeration */ #define CTF_TYPE_KIND_FWD 9 /* Forward */ #define CTF_TYPE_KIND_TYPDEF 10 /* Typedef */ #define CTF_TYPE_KIND_VOLATILE 11 /* Volatile */ #define CTF_TYPE_KIND_CONST 12 /* Const */ #define CTF_TYPE_KIND_RESTRICT 13 /* Restrict */ #define CTF_TYPE_KIND_MAX 31 #define CTF_TYPE_INT_ATTRS(VAL) ((VAL) >> 24) #define CTF_TYPE_INT_OFFSET(VAL) (((VAL) >> 16) & 0xff) #define CTF_TYPE_INT_BITS(VAL) ((VAL) & 0xffff) #define CTF_TYPE_INT_ENCODE(ATTRS, OFF, BITS) \ (((ATTRS) << 24) | ((OFF) << 16) | (BITS)) /* Integer type attributes */ #define CTF_TYPE_INT_SIGNED 0x1 #define CTF_TYPE_INT_CHAR 0x2 #define CTF_TYPE_INT_BOOL 0x4 #define CTF_TYPE_INT_VARARGS 0x8 #define CTF_TYPE_FP_ATTRS(VAL) ((VAL) >> 24) #define CTF_TYPE_FP_OFFSET(VAL) (((VAL) >> 16) & 0xff) #define CTF_TYPE_FP_BITS(VAL) ((VAL) & 0xffff) #define CTF_TYPE_FP_ENCODE(ATTRS, OFF, BITS) \ (((ATTRS) << 24) | ((OFF) << 16) | (BITS)) /* Possible values for the float type attribute field */ #define CTF_TYPE_FP_SINGLE 1 #define CTF_TYPE_FP_DOUBLE 2 #define CTF_TYPE_FP_CMPLX 3 #define CTF_TYPE_FP_CMPLX_DBL 4 #define CTF_TYPE_FP_CMPLX_LDBL 5 #define CTF_TYPE_FP_LDBL 6 #define CTF_TYPE_FP_INTVL 7 #define CTF_TYPE_FP_INTVL_DBL 8 #define CTF_TYPE_FP_INTVL_LDBL 9 #define CTF_TYPE_FP_IMGRY 10 #define CTF_TYPE_FP_IMGRY_DBL 11 #define CTF_TYPE_FP_IMGRY_LDBL 12 #define CTF_TYPE_FP_MAX 12 struct ctf_enum { uint32_t ctf_enum_name; uint32_t ctf_enum_val; }; struct ctf_array { uint16_t ctf_array_type; uint16_t ctf_array_index_type; uint32_t ctf_array_nelems; }; /* Struct members are encoded with either ctf_short_member or * ctf_full_member, depending upon the 'size' of the struct or * union being defined. If it is less than CTF_SHORT_MEMBER_LIMIT * then ctf_short_member objects are used to encode, else * ctf_full_member is used. */ #define CTF_SHORT_MEMBER_LIMIT 8192 struct ctf_short_member { uint32_t ctf_member_name; uint16_t ctf_member_type; uint16_t ctf_member_offset; }; struct ctf_full_member { uint32_t ctf_member_name; uint16_t ctf_member_type; uint16_t ctf_member_unused; uint32_t ctf_member_offset_high; uint32_t ctf_member_offset_low; }; #endif /* _CTF_H */ dwarves-dfsg-1.15/ctf_encoder.c000066400000000000000000000211771350511416500164640ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only Copyright (C) 2009 Red Hat Inc. Copyright (C) 2009 Arnaldo Carvalho de Melo */ #include "dwarves.h" #include "libctf.h" #include "ctf.h" #include "hash.h" #include "elf_symtab.h" #include static int tag__check_id_drift(const struct tag *tag, uint32_t core_id, uint32_t ctf_id) { if (ctf_id != core_id) { fprintf(stderr, "%s: %s id drift, core: %u, libctf: %d\n", __func__, dwarf_tag_name(tag->tag), core_id, ctf_id); return -1; } return 0; } static int dwarf_to_ctf_type(uint16_t tag) { switch (tag) { case DW_TAG_const_type: return CTF_TYPE_KIND_CONST; case DW_TAG_pointer_type: return CTF_TYPE_KIND_PTR; case DW_TAG_restrict_type: return CTF_TYPE_KIND_RESTRICT; case DW_TAG_volatile_type: return CTF_TYPE_KIND_VOLATILE; case DW_TAG_class_type: case DW_TAG_structure_type: return CTF_TYPE_KIND_STR; case DW_TAG_union_type: return CTF_TYPE_KIND_UNION; } return 0xffff; } static int base_type__encode(struct tag *tag, uint32_t core_id, struct ctf *ctf) { struct base_type *bt = tag__base_type(tag); uint32_t ctf_id = ctf__add_base_type(ctf, bt->name, bt->bit_size); if (tag__check_id_drift(tag, core_id, ctf_id)) return -1; return 0; } static int pointer_type__encode(struct tag *tag, uint32_t core_id, struct ctf *ctf) { uint32_t ctf_id = ctf__add_short_type(ctf, dwarf_to_ctf_type(tag->tag), tag->type, 0); if (tag__check_id_drift(tag, core_id, ctf_id)) return -1; return 0; } static int typedef__encode(struct tag *tag, uint32_t core_id, struct ctf *ctf) { uint32_t ctf_id = ctf__add_short_type(ctf, CTF_TYPE_KIND_TYPDEF, tag->type, tag__namespace(tag)->name); if (tag__check_id_drift(tag, core_id, ctf_id)) return -1; return 0; } static int fwd_decl__encode(struct tag *tag, uint32_t core_id, struct ctf *ctf) { uint32_t ctf_id = ctf__add_fwd_decl(ctf, tag__namespace(tag)->name); if (tag__check_id_drift(tag, core_id, ctf_id)) return -1; return 0; } static int structure_type__encode(struct tag *tag, uint32_t core_id, struct ctf *ctf) { struct type *type = tag__type(tag); int64_t position; uint32_t ctf_id = ctf__add_struct(ctf, dwarf_to_ctf_type(tag->tag), type->namespace.name, type->size, type->nr_members, &position); if (tag__check_id_drift(tag, core_id, ctf_id)) return -1; const bool is_short = type->size < CTF_SHORT_MEMBER_LIMIT; struct class_member *pos; type__for_each_data_member(type, pos) { if (is_short) ctf__add_short_member(ctf, pos->name, pos->tag.type, pos->bit_offset, &position); else ctf__add_full_member(ctf, pos->name, pos->tag.type, pos->bit_offset, &position); } return 0; } static uint32_t array_type__nelems(struct tag *tag) { int i; uint32_t nelem = 1; struct array_type *array = tag__array_type(tag); for (i = array->dimensions - 1; i >= 0; --i) nelem *= array->nr_entries[i]; return nelem; } static int array_type__encode(struct tag *tag, uint32_t core_id, struct ctf *ctf) { const uint32_t nelems = array_type__nelems(tag); uint32_t ctf_id = ctf__add_array(ctf, tag->type, 0, nelems); if (tag__check_id_drift(tag, core_id, ctf_id)) return -1; return 0; } static int subroutine_type__encode(struct tag *tag, uint32_t core_id, struct ctf *ctf) { struct parameter *pos; int64_t position; struct ftype *ftype = tag__ftype(tag); uint32_t ctf_id = ctf__add_function_type(ctf, tag->type, ftype->nr_parms, ftype->unspec_parms, &position); if (tag__check_id_drift(tag, core_id, ctf_id)) return -1; ftype__for_each_parameter(ftype, pos) ctf__add_parameter(ctf, pos->tag.type, &position); return 0; } static int enumeration_type__encode(struct tag *tag, uint32_t core_id, struct ctf *ctf) { struct type *etype = tag__type(tag); int64_t position; uint32_t ctf_id = ctf__add_enumeration_type(ctf, etype->namespace.name, etype->size, etype->nr_members, &position); if (tag__check_id_drift(tag, core_id, ctf_id)) return -1; struct enumerator *pos; type__for_each_enumerator(etype, pos) ctf__add_enumerator(ctf, pos->name, pos->value, &position); return 0; } static void tag__encode_ctf(struct tag *tag, uint32_t core_id, struct ctf *ctf) { switch (tag->tag) { case DW_TAG_base_type: base_type__encode(tag, core_id, ctf); break; case DW_TAG_const_type: case DW_TAG_pointer_type: case DW_TAG_restrict_type: case DW_TAG_volatile_type: pointer_type__encode(tag, core_id, ctf); break; case DW_TAG_typedef: typedef__encode(tag, core_id, ctf); break; case DW_TAG_structure_type: case DW_TAG_union_type: case DW_TAG_class_type: if (tag__type(tag)->declaration) fwd_decl__encode(tag, core_id, ctf); else structure_type__encode(tag, core_id, ctf); break; case DW_TAG_array_type: array_type__encode(tag, core_id, ctf); break; case DW_TAG_subroutine_type: subroutine_type__encode(tag, core_id, ctf); break; case DW_TAG_enumeration_type: enumeration_type__encode(tag, core_id, ctf); break; } } #define HASHADDR__BITS 8 #define HASHADDR__SIZE (1UL << HASHADDR__BITS) #define hashaddr__fn(key) hash_64(key, HASHADDR__BITS) static struct function *hashaddr__find_function(const struct hlist_head hashtable[], const uint64_t addr) { struct function *function; struct hlist_node *pos; uint16_t bucket = hashaddr__fn(addr); const struct hlist_head *head = &hashtable[bucket]; hlist_for_each_entry(function, pos, head, tool_hnode) { if (function->lexblock.ip.addr == addr) return function; } return NULL; } static struct variable *hashaddr__find_variable(const struct hlist_head hashtable[], const uint64_t addr) { struct variable *variable; struct hlist_node *pos; uint16_t bucket = hashaddr__fn(addr); const struct hlist_head *head = &hashtable[bucket]; hlist_for_each_entry(variable, pos, head, tool_hnode) { if (variable->ip.addr == addr) return variable; } return NULL; } /* * FIXME: Its in the DWARF loader, we have to find a better handoff * mechanizm... */ extern struct strings *strings; int cu__encode_ctf(struct cu *cu, int verbose) { int err = -1; struct ctf *ctf = ctf__new(cu->filename, cu->elf); if (ctf == NULL) goto out; if (cu__cache_symtab(cu) < 0) goto out_delete; ctf__set_strings(ctf, &strings->gb); uint32_t id; struct tag *pos; cu__for_each_type(cu, id, pos) tag__encode_ctf(pos, id, ctf); struct hlist_head hash_addr[HASHADDR__SIZE]; for (id = 0; id < HASHADDR__SIZE; ++id) INIT_HLIST_HEAD(&hash_addr[id]); struct function *function; cu__for_each_function(cu, id, function) { uint64_t addr = function->lexblock.ip.addr; struct hlist_head *head = &hash_addr[hashaddr__fn(addr)]; hlist_add_head(&function->tool_hnode, head); } uint64_t addr; GElf_Sym sym; const char *sym_name; cu__for_each_cached_symtab_entry(cu, id, sym, sym_name) { if (ctf__ignore_symtab_function(&sym, sym_name)) continue; addr = elf_sym__value(&sym); int64_t position; function = hashaddr__find_function(hash_addr, addr); if (function == NULL) { if (verbose) fprintf(stderr, "function %4d: %-20s %#" PRIx64 " %5u NOT FOUND!\n", id, sym_name, addr, elf_sym__size(&sym)); err = ctf__add_function(ctf, 0, 0, 0, &position); if (err != 0) goto out_err_ctf; continue; } const struct ftype *ftype = &function->proto; err = ctf__add_function(ctf, function->proto.tag.type, ftype->nr_parms, ftype->unspec_parms, &position); if (err != 0) goto out_err_ctf; struct parameter *pos; ftype__for_each_parameter(ftype, pos) ctf__add_function_parameter(ctf, pos->tag.type, &position); } for (id = 0; id < HASHADDR__SIZE; ++id) INIT_HLIST_HEAD(&hash_addr[id]); struct variable *var; cu__for_each_variable(cu, id, pos) { var = tag__variable(pos); if (variable__scope(var) != VSCOPE_GLOBAL) continue; struct hlist_head *head = &hash_addr[hashaddr__fn(var->ip.addr)]; hlist_add_head(&var->tool_hnode, head); } cu__for_each_cached_symtab_entry(cu, id, sym, sym_name) { if (ctf__ignore_symtab_object(&sym, sym_name)) continue; addr = elf_sym__value(&sym); var = hashaddr__find_variable(hash_addr, addr); if (var == NULL) { if (verbose) fprintf(stderr, "variable %4d: %-20s %#" PRIx64 " %5u NOT FOUND!\n", id, sym_name, addr, elf_sym__size(&sym)); err = ctf__add_object(ctf, 0); if (err != 0) goto out_err_ctf; continue; } err = ctf__add_object(ctf, var->ip.tag.type); if (err != 0) goto out_err_ctf; } ctf__encode(ctf, CTF_FLAGS_COMPR); err = 0; out_delete: ctf__delete(ctf); out: return err; out_err_ctf: fprintf(stderr, "%4d: %-20s %#llx %5u failed encoding, " "ABORTING!\n", id, sym_name, (unsigned long long)addr, elf_sym__size(&sym)); goto out_delete; } dwarves-dfsg-1.15/ctf_encoder.h000066400000000000000000000004361350511416500164640ustar00rootroot00000000000000#ifndef _CTF_ENCODER_H_ #define _CTF_ENCODER_H_ 1 /* SPDX-License-Identifier: GPL-2.0-only Copyright (C) 2009 Red Hat Inc. Copyright (C) 2009 Arnaldo Carvalho de Melo */ struct cu; int cu__encode_ctf(struct cu *cu, int verbose); #endif /* _CTF_ENCODER_H_ */ dwarves-dfsg-1.15/ctf_loader.c000066400000000000000000000457371350511416500163230ustar00rootroot00000000000000/* ctfdump.c: CTF dumper. * * Copyright (C) 2008 David S. Miller */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "libctf.h" #include "ctf.h" #include "dutil.h" #include "dwarves.h" /* * FIXME: We should just get the table from the CTF ELF section * and use it directly */ extern struct strings *strings; static void *tag__alloc(const size_t size) { struct tag *tag = zalloc(size); if (tag != NULL) tag->top_level = 1; return tag; } static int ctf__load_ftype(struct ctf *ctf, struct ftype *proto, uint16_t tag, uint16_t type, uint16_t vlen, uint16_t *args, long id) { proto->tag.tag = tag; proto->tag.type = type; INIT_LIST_HEAD(&proto->parms); int i; for (i = 0; i < vlen; i++) { uint16_t type = ctf__get16(ctf, &args[i]); if (type == 0) proto->unspec_parms = 1; else { struct parameter *p = tag__alloc(sizeof(*p)); if (p == NULL) goto out_free_parameters; p->tag.tag = DW_TAG_formal_parameter; p->tag.type = ctf__get16(ctf, &args[i]); ftype__add_parameter(proto, p); } } vlen *= sizeof(*args); /* Round up to next multiple of 4 to maintain * 32-bit alignment. */ if (vlen & 0x2) vlen += 0x2; if (id < 0) { uint32_t type_id; cu__add_tag(ctf->priv, &proto->tag, &type_id); } else { cu__add_tag_with_id(ctf->priv, &proto->tag, id); } return vlen; out_free_parameters: ftype__delete(proto, ctf->priv); return -ENOMEM; } static struct function *function__new(uint16_t **ptr, GElf_Sym *sym, struct ctf *ctf) { struct function *func = tag__alloc(sizeof(*func)); if (func != NULL) { func->lexblock.ip.addr = elf_sym__value(sym); func->lexblock.size = elf_sym__size(sym); func->name = sym->st_name; func->vtable_entry = -1; func->external = elf_sym__bind(sym) == STB_GLOBAL; INIT_LIST_HEAD(&func->vtable_node); INIT_LIST_HEAD(&func->tool_node); INIT_LIST_HEAD(&func->lexblock.tags); uint16_t val = ctf__get16(ctf, *ptr); uint16_t tag = CTF_GET_KIND(val); uint16_t vlen = CTF_GET_VLEN(val); ++*ptr; if (tag != CTF_TYPE_KIND_FUNC) { fprintf(stderr, "%s: Expected function type, got %u\n", __func__, tag); goto out_delete; } uint16_t type = ctf__get16(ctf, *ptr); long id = -1; /* FIXME: not needed for funcs... */ ++*ptr; if (ctf__load_ftype(ctf, &func->proto, DW_TAG_subprogram, type, vlen, *ptr, id) < 0) return NULL; /* * Round up to next multiple of 4 to maintain 32-bit alignment. */ if (vlen & 0x1) ++vlen; *ptr += vlen; } return func; out_delete: free(func); return NULL; } static int ctf__load_funcs(struct ctf *ctf) { struct ctf_header *hp = ctf__get_buffer(ctf); uint16_t *func_ptr = (ctf__get_buffer(ctf) + sizeof(*hp) + ctf__get32(ctf, &hp->ctf_func_off)); GElf_Sym sym; uint32_t idx; ctf__for_each_symtab_function(ctf, idx, sym) if (function__new(&func_ptr, &sym, ctf) == NULL) return -ENOMEM; return 0; } static struct base_type *base_type__new(strings_t name, uint32_t attrs, uint8_t float_type, size_t size) { struct base_type *bt = tag__alloc(sizeof(*bt)); if (bt != NULL) { bt->name = name; bt->bit_size = size; bt->is_signed = attrs & CTF_TYPE_INT_SIGNED; bt->is_bool = attrs & CTF_TYPE_INT_BOOL; bt->is_varargs = attrs & CTF_TYPE_INT_VARARGS; bt->name_has_encoding = false; bt->float_type = float_type; } return bt; } static void type__init(struct type *type, uint16_t tag, strings_t name, size_t size) { INIT_LIST_HEAD(&type->node); INIT_LIST_HEAD(&type->namespace.tags); type->size = size; type->namespace.tag.tag = tag; type->namespace.name = name; type->namespace.sname = 0; } static struct type *type__new(uint16_t tag, strings_t name, size_t size) { struct type *type = tag__alloc(sizeof(*type)); if (type != NULL) type__init(type, tag, name, size); return type; } static struct class *class__new(strings_t name, size_t size) { struct class *class = tag__alloc(sizeof(*class)); if (class != NULL) { type__init(&class->type, DW_TAG_structure_type, name, size); INIT_LIST_HEAD(&class->vtable); } return class; } static int create_new_base_type(struct ctf *ctf, void *ptr, struct ctf_full_type *tp, uint32_t id) { uint32_t *enc = ptr; uint32_t eval = ctf__get32(ctf, enc); uint32_t attrs = CTF_TYPE_INT_ATTRS(eval); strings_t name = ctf__get32(ctf, &tp->base.ctf_name); struct base_type *base = base_type__new(name, attrs, 0, CTF_TYPE_INT_BITS(eval)); if (base == NULL) return -ENOMEM; base->tag.tag = DW_TAG_base_type; cu__add_tag_with_id(ctf->priv, &base->tag, id); return sizeof(*enc); } static int create_new_base_type_float(struct ctf *ctf, void *ptr, struct ctf_full_type *tp, uint32_t id) { strings_t name = ctf__get32(ctf, &tp->base.ctf_name); uint32_t *enc = ptr, eval = ctf__get32(ctf, enc); struct base_type *base = base_type__new(name, 0, eval, CTF_TYPE_FP_BITS(eval)); if (base == NULL) return -ENOMEM; base->tag.tag = DW_TAG_base_type; cu__add_tag_with_id(ctf->priv, &base->tag, id); return sizeof(*enc); } static int create_new_array(struct ctf *ctf, void *ptr, uint32_t id) { struct ctf_array *ap = ptr; struct array_type *array = tag__alloc(sizeof(*array)); if (array == NULL) return -ENOMEM; /* FIXME: where to get the number of dimensions? * it it flattened? */ array->dimensions = 1; array->nr_entries = malloc(sizeof(uint32_t)); if (array->nr_entries == NULL) { free(array); return -ENOMEM; } array->nr_entries[0] = ctf__get32(ctf, &ap->ctf_array_nelems); array->tag.tag = DW_TAG_array_type; array->tag.type = ctf__get16(ctf, &ap->ctf_array_type); cu__add_tag_with_id(ctf->priv, &array->tag, id); return sizeof(*ap); } static int create_new_subroutine_type(struct ctf *ctf, void *ptr, int vlen, struct ctf_full_type *tp, uint32_t id) { uint16_t *args = ptr; unsigned int type = ctf__get16(ctf, &tp->base.ctf_type); struct ftype *proto = tag__alloc(sizeof(*proto)); if (proto == NULL) return -ENOMEM; vlen = ctf__load_ftype(ctf, proto, DW_TAG_subroutine_type, type, vlen, args, id); return vlen < 0 ? -ENOMEM : vlen; } static int create_full_members(struct ctf *ctf, void *ptr, int vlen, struct type *class) { struct ctf_full_member *mp = ptr; int i; for (i = 0; i < vlen; i++) { struct class_member *member = zalloc(sizeof(*member)); if (member == NULL) return -ENOMEM; member->tag.tag = DW_TAG_member; member->tag.type = ctf__get16(ctf, &mp[i].ctf_member_type); member->name = ctf__get32(ctf, &mp[i].ctf_member_name); member->bit_offset = (ctf__get32(ctf, &mp[i].ctf_member_offset_high) << 16) | ctf__get32(ctf, &mp[i].ctf_member_offset_low); /* sizes and offsets will be corrected at class__fixup_ctf_bitfields */ type__add_member(class, member); } return sizeof(*mp); } static int create_short_members(struct ctf *ctf, void *ptr, int vlen, struct type *class) { struct ctf_short_member *mp = ptr; int i; for (i = 0; i < vlen; i++) { struct class_member *member = zalloc(sizeof(*member)); if (member == NULL) return -ENOMEM; member->tag.tag = DW_TAG_member; member->tag.type = ctf__get16(ctf, &mp[i].ctf_member_type); member->name = ctf__get32(ctf, &mp[i].ctf_member_name); member->bit_offset = ctf__get16(ctf, &mp[i].ctf_member_offset); /* sizes and offsets will be corrected at class__fixup_ctf_bitfields */ type__add_member(class, member); } return sizeof(*mp); } static int create_new_class(struct ctf *ctf, void *ptr, int vlen, struct ctf_full_type *tp, uint64_t size, uint32_t id) { int member_size; strings_t name = ctf__get32(ctf, &tp->base.ctf_name); struct class *class = class__new(name, size); if (size >= CTF_SHORT_MEMBER_LIMIT) { member_size = create_full_members(ctf, ptr, vlen, &class->type); } else { member_size = create_short_members(ctf, ptr, vlen, &class->type); } if (member_size < 0) goto out_free; cu__add_tag_with_id(ctf->priv, &class->type.namespace.tag, id); return (vlen * member_size); out_free: class__delete(class, ctf->priv); return -ENOMEM; } static int create_new_union(struct ctf *ctf, void *ptr, int vlen, struct ctf_full_type *tp, uint64_t size, uint32_t id) { int member_size; strings_t name = ctf__get32(ctf, &tp->base.ctf_name); struct type *un = type__new(DW_TAG_union_type, name, size); if (size >= CTF_SHORT_MEMBER_LIMIT) { member_size = create_full_members(ctf, ptr, vlen, un); } else { member_size = create_short_members(ctf, ptr, vlen, un); } if (member_size < 0) goto out_free; cu__add_tag_with_id(ctf->priv, &un->namespace.tag, id); return (vlen * member_size); out_free: type__delete(un, ctf->priv); return -ENOMEM; } static struct enumerator *enumerator__new(strings_t name, uint32_t value) { struct enumerator *en = tag__alloc(sizeof(*en)); if (en != NULL) { en->name = name; en->value = value; en->tag.tag = DW_TAG_enumerator; } return en; } static int create_new_enumeration(struct ctf *ctf, void *ptr, int vlen, struct ctf_full_type *tp, uint16_t size, uint32_t id) { struct ctf_enum *ep = ptr; uint16_t i; struct type *enumeration = type__new(DW_TAG_enumeration_type, ctf__get32(ctf, &tp->base.ctf_name), size ?: (sizeof(int) * 8)); if (enumeration == NULL) return -ENOMEM; for (i = 0; i < vlen; i++) { strings_t name = ctf__get32(ctf, &ep[i].ctf_enum_name); uint32_t value = ctf__get32(ctf, &ep[i].ctf_enum_val); struct enumerator *enumerator = enumerator__new(name, value); if (enumerator == NULL) goto out_free; enumeration__add(enumeration, enumerator); } cu__add_tag_with_id(ctf->priv, &enumeration->namespace.tag, id); return (vlen * sizeof(*ep)); out_free: enumeration__delete(enumeration, ctf->priv); return -ENOMEM; } static int create_new_forward_decl(struct ctf *ctf, struct ctf_full_type *tp, uint64_t size, uint32_t id) { strings_t name = ctf__get32(ctf, &tp->base.ctf_name); struct class *fwd = class__new(name, size); if (fwd == NULL) return -ENOMEM; fwd->type.declaration = 1; cu__add_tag_with_id(ctf->priv, &fwd->type.namespace.tag, id); return 0; } static int create_new_typedef(struct ctf *ctf, struct ctf_full_type *tp, uint64_t size, uint32_t id) { strings_t name = ctf__get32(ctf, &tp->base.ctf_name); unsigned int type_id = ctf__get16(ctf, &tp->base.ctf_type); struct type *type = type__new(DW_TAG_typedef, name, size); if (type == NULL) return -ENOMEM; type->namespace.tag.type = type_id; cu__add_tag_with_id(ctf->priv, &type->namespace.tag, id); return 0; } static int create_new_tag(struct ctf *ctf, int type, struct ctf_full_type *tp, uint32_t id) { unsigned int type_id = ctf__get16(ctf, &tp->base.ctf_type); struct tag *tag = zalloc(sizeof(*tag)); if (tag == NULL) return -ENOMEM; switch (type) { case CTF_TYPE_KIND_CONST: tag->tag = DW_TAG_const_type; break; case CTF_TYPE_KIND_PTR: tag->tag = DW_TAG_pointer_type; break; case CTF_TYPE_KIND_RESTRICT: tag->tag = DW_TAG_restrict_type; break; case CTF_TYPE_KIND_VOLATILE: tag->tag = DW_TAG_volatile_type; break; default: printf("%s: FOO %d\n\n", __func__, type); return 0; } tag->type = type_id; cu__add_tag_with_id(ctf->priv, tag, id); return 0; } static int ctf__load_types(struct ctf *ctf) { void *ctf_buffer = ctf__get_buffer(ctf); struct ctf_header *hp = ctf_buffer; void *ctf_contents = ctf_buffer + sizeof(*hp), *type_section = (ctf_contents + ctf__get32(ctf, &hp->ctf_type_off)), *strings_section = (ctf_contents + ctf__get32(ctf, &hp->ctf_str_off)); struct ctf_full_type *type_ptr = type_section, *end = strings_section; uint32_t type_index = 0x0001; if (hp->ctf_parent_name || hp->ctf_parent_label) type_index += 0x8000; while (type_ptr < end) { uint16_t val = ctf__get16(ctf, &type_ptr->base.ctf_info); uint16_t type = CTF_GET_KIND(val); int vlen = CTF_GET_VLEN(val); void *ptr = type_ptr; uint16_t base_size = ctf__get16(ctf, &type_ptr->base.ctf_size); uint64_t size = base_size; if (base_size == 0xffff) { size = ctf__get32(ctf, &type_ptr->ctf_size_high); size <<= 32; size |= ctf__get32(ctf, &type_ptr->ctf_size_low); ptr += sizeof(struct ctf_full_type); } else ptr += sizeof(struct ctf_short_type); if (type == CTF_TYPE_KIND_INT) { vlen = create_new_base_type(ctf, ptr, type_ptr, type_index); } else if (type == CTF_TYPE_KIND_FLT) { vlen = create_new_base_type_float(ctf, ptr, type_ptr, type_index); } else if (type == CTF_TYPE_KIND_ARR) { vlen = create_new_array(ctf, ptr, type_index); } else if (type == CTF_TYPE_KIND_FUNC) { vlen = create_new_subroutine_type(ctf, ptr, vlen, type_ptr, type_index); } else if (type == CTF_TYPE_KIND_STR) { vlen = create_new_class(ctf, ptr, vlen, type_ptr, size, type_index); } else if (type == CTF_TYPE_KIND_UNION) { vlen = create_new_union(ctf, ptr, vlen, type_ptr, size, type_index); } else if (type == CTF_TYPE_KIND_ENUM) { vlen = create_new_enumeration(ctf, ptr, vlen, type_ptr, size, type_index); } else if (type == CTF_TYPE_KIND_FWD) { vlen = create_new_forward_decl(ctf, type_ptr, size, type_index); } else if (type == CTF_TYPE_KIND_TYPDEF) { vlen = create_new_typedef(ctf, type_ptr, size, type_index); } else if (type == CTF_TYPE_KIND_VOLATILE || type == CTF_TYPE_KIND_PTR || type == CTF_TYPE_KIND_CONST || type == CTF_TYPE_KIND_RESTRICT) { vlen = create_new_tag(ctf, type, type_ptr, type_index); } else if (type == CTF_TYPE_KIND_UNKN) { cu__table_nullify_type_entry(ctf->priv, type_index); fprintf(stderr, "CTF: idx: %d, off: %zd, root: %s Unknown\n", type_index, ((void *)type_ptr) - type_section, CTF_ISROOT(val) ? "yes" : "no"); vlen = 0; } else return -EINVAL; if (vlen < 0) return vlen; type_ptr = ptr + vlen; type_index++; } return 0; } static struct variable *variable__new(uint16_t type, GElf_Sym *sym, struct ctf *ctf) { struct variable *var = tag__alloc(sizeof(*var)); if (var != NULL) { var->scope = VSCOPE_GLOBAL; var->ip.addr = elf_sym__value(sym); var->name = sym->st_name; var->external = elf_sym__bind(sym) == STB_GLOBAL; var->ip.tag.tag = DW_TAG_variable; var->ip.tag.type = type; uint32_t id; /* FIXME: not needed for variables... */ cu__add_tag(ctf->priv, &var->ip.tag, &id); } return var; } static int ctf__load_objects(struct ctf *ctf) { struct ctf_header *hp = ctf__get_buffer(ctf); uint16_t *objp = (ctf__get_buffer(ctf) + sizeof(*hp) + ctf__get32(ctf, &hp->ctf_object_off)); GElf_Sym sym; uint32_t idx; ctf__for_each_symtab_object(ctf, idx, sym) { const uint16_t type = *objp; /* * Discard void objects, probably was an object * we didn't found DWARF info for when encoding. */ if (type && variable__new(type, &sym, ctf) == NULL) return -ENOMEM; ++objp; } return 0; } static int ctf__load_sections(struct ctf *ctf) { int err = ctf__load_symtab(ctf); if (err != 0) goto out; err = ctf__load_funcs(ctf); if (err == 0) err = ctf__load_types(ctf); if (err == 0) err = ctf__load_objects(ctf); out: return err; } static int class__fixup_ctf_bitfields(struct tag *tag, struct cu *cu) { struct class_member *pos; struct type *tag_type = tag__type(tag); type__for_each_data_member(tag_type, pos) { struct tag *type = tag__strip_typedefs_and_modifiers(&pos->tag, cu); if (type == NULL) /* FIXME: C++ CTF... */ continue; pos->bitfield_offset = 0; pos->bitfield_size = 0; pos->byte_offset = pos->bit_offset / 8; uint16_t type_bit_size; size_t integral_bit_size; switch (type->tag) { case DW_TAG_enumeration_type: type_bit_size = tag__type(type)->size; /* Best we can do to check if this is a packed enum */ if (is_power_of_2(type_bit_size)) integral_bit_size = roundup(type_bit_size, 8); else integral_bit_size = sizeof(int) * 8; break; case DW_TAG_base_type: { struct base_type *bt = tag__base_type(type); char name[256]; type_bit_size = bt->bit_size; integral_bit_size = base_type__name_to_size(bt, cu); if (integral_bit_size == 0) fprintf(stderr, "%s: unknown base type name \"%s\"!\n", __func__, base_type__name(bt, cu, name, sizeof(name))); } break; default: pos->byte_size = tag__size(type, cu); pos->bit_size = pos->byte_size * 8; continue; } /* * XXX: integral_bit_size can be zero if base_type__name_to_size doesn't * know about the base_type name, so one has to add there when * such base_type isn't found. pahole will put zero on the * struct output so it should be easy to spot the name when * such unlikely thing happens. */ pos->byte_size = integral_bit_size / 8; if (integral_bit_size == 0 || type_bit_size == integral_bit_size) { pos->bit_size = integral_bit_size; continue; } pos->bitfield_offset = pos->bit_offset % integral_bit_size; pos->bitfield_size = type_bit_size; pos->bit_size = type_bit_size; pos->byte_offset = (((pos->bit_offset / integral_bit_size) * integral_bit_size) / 8); } return 0; } static int cu__fixup_ctf_bitfields(struct cu *cu) { int err = 0; struct tag *pos; list_for_each_entry(pos, &cu->tags, node) if (tag__is_struct(pos) || tag__is_union(pos)) { err = class__fixup_ctf_bitfields(pos, cu); if (err) break; } return err; } static const char *ctf__function_name(struct function *func, const struct cu *cu) { struct ctf *ctf = cu->priv; return ctf->symtab->symstrs->d_buf + func->name; } static const char *ctf__variable_name(const struct variable *var, const struct cu *cu) { struct ctf *ctf = cu->priv; return ctf->symtab->symstrs->d_buf + var->name; } static void ctf__cu_delete(struct cu *cu) { ctf__delete(cu->priv); cu->priv = NULL; } static const char *ctf__strings_ptr(const struct cu *cu, strings_t s) { return ctf__string(cu->priv, s); } struct debug_fmt_ops ctf__ops; int ctf__load_file(struct cus *cus, struct conf_load *conf, const char *filename) { int err; struct ctf *state = ctf__new(filename, NULL); if (state == NULL) return -1; struct cu *cu = cu__new(filename, state->wordsize, NULL, 0, filename); if (cu == NULL) return -1; cu->language = LANG_C; cu->uses_global_strings = false; cu->little_endian = state->ehdr.e_ident[EI_DATA] == ELFDATA2LSB; cu->dfops = &ctf__ops; cu->priv = state; state->priv = cu; if (ctf__load(state) != 0) return -1; err = ctf__load_sections(state); if (err != 0) { cu__delete(cu); return err; } err = cu__fixup_ctf_bitfields(cu); /* * The app stole this cu, possibly deleting it, * so forget about it */ if (conf && conf->steal && conf->steal(cu, conf)) return 0; cus__add(cus, cu); return err; } struct debug_fmt_ops ctf__ops = { .name = "ctf", .function__name = ctf__function_name, .load_file = ctf__load_file, .variable__name = ctf__variable_name, .strings__ptr = ctf__strings_ptr, .cu__delete = ctf__cu_delete, }; dwarves-dfsg-1.15/ctfdwdiff000077500000000000000000000020171350511416500157230ustar00rootroot00000000000000#!/bin/bash results_dir=/tmp/ctfdwdiff diff_tool() { local tool=$1 local dwarf_options=$2 local ctf_options=$3 local obj=$4 diff=$results_dir/$obj.$tool.diff ctf=$results_dir/$obj.$tool.ctf.c dwarf=$results_dir/$obj.$tool.dwarf.c $tool -F ctf $ctf_options $obj > $ctf $tool -F dwarf $dwarf_options $obj > $dwarf diff -up $dwarf $ctf > $diff if [ -s $diff ] ; then [ $# -gt 4 ] && vim $diff exit 0 else rm -f $diff $ctf $dwarf fi } diff_one() { local obj=$1 diff_tool "pahole" "--flat_arrays --show_private_classes --fixup_silly_bitfields" " " $obj $2 diff_tool "pfunct" "-V --no_parm_names" "-V" $obj $2 } diff_dir() { find . -type d | \ while read dir ; do cd $dir ls *.o 2> /dev/null | while read obj ; do ncus=$(readelf -wi $obj | grep DW_TAG_compile_unit | wc -l) if [ $ncus -ne 1 ] ; then continue fi echo $obj pahole -Z $obj diff_one $obj $1 done cd - > /dev/null done } rm -rf $results_dir mkdir $results_dir if [ $# -lt 2 ] ; then diff_dir else diff_one $* fi dwarves-dfsg-1.15/ctracer.c000066400000000000000000000712441350511416500156340ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only Copyright (C) 2006 Mandriva Conectiva S.A. Copyright (C) 2006 Arnaldo Carvalho de Melo */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "dwarves_reorganize.h" #include "dwarves_emit.h" #include "dwarves.h" #include "dutil.h" #include "elf_symtab.h" /* * target class name */ static char *class_name; /* * List of compilation units being looked for functions with * pointers to the specified struct. */ static struct cus *methods_cus; /** * Mini class, the subset of the traced class that is collected at the probes */ static struct class *mini_class; /* * Directory where to generate source files */ static const char *src_dir = "."; /* * Where to print the ctracer_methods.stp file */ static FILE *fp_methods; /* * Where to print the ctracer_collector.c file */ static FILE *fp_collector; /* * Where to print the ctracer_classes.h file */ static FILE *fp_classes; /* * blacklist __init marked functions, i.e. functions that are * in the ".init.text" ELF section and are thus discarded after * boot. */ static struct strlist *init_blacklist; /* * List of definitions and forward declarations already emitted for * methods_cus, to avoid duplication. */ static struct type_emissions emissions; /* * CU blacklist: if a "blacklist.cu" file is present, don't consider the * CUs listed. Use a default of blacklist.cu. */ static const char *cu_blacklist_filename = "blacklist.cu"; static struct strlist *cu_blacklist; static struct cu *cu_filter(struct cu *cu) { if (strlist__has_entry(cu_blacklist, cu->name)) return NULL; return cu; } /* * List of probes and kretprobes already emitted, this is a hack to cope with * name space collisions, a better solution would be to in these cases to use the * compilation unit name (net/ipv4/tcp.o, for instance) as a prefix when a * static function has the same name in multiple compilation units (aka object * files). */ static void *probes_emitted; struct structure { struct list_head node; struct tag *class; struct cu *cu; }; static struct structure *structure__new(struct tag *class, struct cu *cu) { struct structure *st = malloc(sizeof(*st)); if (st != NULL) { st->class = class; st->cu = cu; } return st; } /* * structs that can be casted to the target class, e.g. i.e. that has the target * class at its first member. */ static LIST_HEAD(aliases); /* * structs have pointers to the target class. */ static LIST_HEAD(pointers); static const char *structure__name(const struct structure *st) { return class__name(tag__class(st->class), st->cu); } static struct structure *structures__find(struct list_head *list, const char *name) { struct structure *pos; if (name == NULL) return NULL; list_for_each_entry(pos, list, node) if (strcmp(structure__name(pos), name) == 0) return pos; return NULL; } static void structures__add(struct list_head *list, struct tag *class, struct cu *cu) { struct structure *str = structure__new(class, cu); if (str != NULL) list_add(&str->node, list); } static int methods__compare(const void *a, const void *b) { return strcmp(a, b); } static int methods__add(void **table, const char *str) { char **s = tsearch(str, table, methods__compare); if (s != NULL) { if (*s == str) { char *dup = strdup(str); if (dup != NULL) *s = dup; else { tdelete(str, table, methods__compare); return -1; } } else return -1; } else return -1; return 0; } static void method__add(struct cu *cu, struct function *function, uint32_t id) { list_add(&function->tool_node, &cu->tool_list); function->priv = (void *)(long)id; } /* * We want just the function tags that have as one of its parameters * a pointer to the specified "class" (a struct, unions can be added later). */ static struct function *function__filter(struct function *function, struct cu *cu, type_id_t target_type_id) { if (function__inlined(function) || function->abstract_origin != 0 || !list_empty(&function->tool_node) || !ftype__has_parm_of_type(&function->proto, target_type_id, cu) || strlist__has_entry(init_blacklist, function__name(function, cu))) { return NULL; } return function; } /* * Iterate thru all the tags in the compilation unit, looking just for the * function tags that have as one of its parameters a pointer to * the specified "class" (struct). */ static int cu_find_methods_iterator(struct cu *cu, void *cookie) { type_id_t target_type_id; uint32_t function_id; struct function *function; struct tag *target = cu__find_struct_by_name(cu, cookie, 0, &target_type_id); INIT_LIST_HEAD(&cu->tool_list); if (target == NULL) return 0; cu__for_each_function(cu, function_id, function) if (function__filter(function, cu, target_type_id)) method__add(cu, function, function_id); return 0; } static struct class_member *class_member__bitfield_tail(struct class_member *head, struct class *class) { struct class_member *tail = head, *member = list_prepare_entry(head, class__tags(class), tag.node); list_for_each_entry_continue(member, class__tags(class), tag.node) if (member->byte_offset == head->byte_offset) tail = member; else break; return tail; } /* * Bitfields are removed as one for simplification right now. */ static struct class_member *class__remove_member(struct class *class, const struct cu *cu, struct class_member *member) { size_t size = member->byte_size; struct class_member *bitfield_tail = NULL; struct list_head *next; uint16_t member_hole = member->hole; if (member->bitfield_size != 0) { bitfield_tail = class_member__bitfield_tail(member, class); member_hole = bitfield_tail->hole; } /* * Is this the first member? */ if (member->tag.node.prev == class__tags(class)) { class->type.size -= size + member_hole; class__subtract_offsets_from(class, bitfield_tail ?: member, size + member_hole); /* * Is this the last member? */ } else if (member->tag.node.next == class__tags(class)) { if (size + class->padding >= cu->addr_size) { class->type.size -= size + class->padding; class->padding = 0; } else class->padding += size; } else { if (size + member_hole >= cu->addr_size) { class->type.size -= size + member_hole; class__subtract_offsets_from(class, bitfield_tail ?: member, size + member_hole); } else { struct class_member *from_prev = list_entry(member->tag.node.prev, struct class_member, tag.node); if (from_prev->hole == 0) class->nr_holes++; from_prev->hole += size + member_hole; } } if (member_hole != 0) class->nr_holes--; if (bitfield_tail != NULL) { next = bitfield_tail->tag.node.next; list_del_range(&member->tag.node, &bitfield_tail->tag.node); if (bitfield_tail->bit_hole != 0) class->nr_bit_holes--; } else { next = member->tag.node.next; list_del(&member->tag.node); } return list_entry(next, struct class_member, tag.node); } static size_t class__find_biggest_member_name(const struct class *class, const struct cu *cu) { struct class_member *pos; size_t biggest_name_len = 0; type__for_each_data_member(&class->type, pos) { const size_t len = pos->name ? strlen(class_member__name(pos, cu)) : 0; if (len > biggest_name_len) biggest_name_len = len; } return biggest_name_len; } static void class__emit_class_state_collector(struct class *class, const struct cu *cu, struct class *clone) { struct class_member *pos; int len = class__find_biggest_member_name(clone, cu); fprintf(fp_collector, "void ctracer__class_state(const void *from, void *to)\n" "{\n" "\tconst struct %s *obj = from;\n" "\tstruct %s *mini_obj = to;\n\n", class__name(class, cu), class__name(clone, cu)); type__for_each_data_member(&clone->type, pos) fprintf(fp_collector, "\tmini_obj->%-*s = obj->%s;\n", len, class_member__name(pos, cu), class_member__name(pos, cu)); fputs("}\n\n", fp_collector); } static int tag__is_base_type(const struct tag *tag, const struct cu *cu) { switch (tag->tag) { case DW_TAG_base_type: return 1; case DW_TAG_typedef: { const struct tag *type = cu__type(cu, tag->type); if (type == NULL) return 0; return tag__is_base_type(type, cu); } } return 0; } static struct class *class__clone_base_types(const struct tag *tag, struct cu *cu, const char *new_class_name) { struct class *class = tag__class(tag); struct class_member *pos, *next; struct class *clone = class__clone(class, new_class_name, cu); if (clone == NULL) return NULL; type__for_each_data_member_safe(&clone->type, pos, next) { struct tag *member_type = cu__type(cu, pos->tag.type); tag__assert_search_result(member_type); if (!tag__is_base_type(member_type, cu)) { next = class__remove_member(clone, cu, pos); class_member__delete(pos, cu); } } class__fixup_alignment(clone, cu); class__reorganize(clone, cu, 0, NULL); return clone; } /** * Converter to the legacy ostra tables, will be much improved in the future. */ static void emit_struct_member_table_entry(FILE *fp, int field, const char *name, int traced, const char *hooks) { fprintf(fp, "%u:%s:", field, name); if (traced) fprintf(fp, "yes:%%object->%s:u:%s:none\n", name, hooks); else fprintf(fp, "no:None:None:%s:dev_null\n", hooks); } /** * Generates a converter to the ostra lebacy tables format, needef by * ostra-cg to preprocess the raw data collected from the debugfs/relay * channel. */ static int class__emit_ostra_converter(struct tag *tag, const struct cu *cu) { struct class *class = tag__class(tag); struct class_member *pos; struct type *type = &mini_class->type; int field = 0, first = 1; char filename[128]; char parm_list[1024]; char *p = parm_list; size_t n; size_t plen = sizeof(parm_list); FILE *fp_fields, *fp_converter; const char *name = class__name(class, cu); snprintf(filename, sizeof(filename), "%s/%s.fields", src_dir, name); fp_fields = fopen(filename, "w"); if (fp_fields == NULL) { fprintf(stderr, "ctracer: couldn't create %s\n", filename); exit(EXIT_FAILURE); } snprintf(filename, sizeof(filename), "%s/ctracer2ostra.c", src_dir); fp_converter = fopen(filename, "w"); if (fp_converter == NULL) { fprintf(stderr, "ctracer: couldn't create %s\n", filename); exit(EXIT_FAILURE); } fputs("#include \"ctracer_classes.h\"\n" "#include \n" "#include \n" "#include \"ctracer_relay.h\"\n\n", fp_converter); emit_struct_member_table_entry(fp_fields, field++, "action", 0, "entry,exit"); emit_struct_member_table_entry(fp_fields, field++, "function_id", 0, "entry,exit"); emit_struct_member_table_entry(fp_fields, field++, "object", 1, "entry,exit"); fprintf(fp_converter, "\n" "int main(void)\n" "{\n" "\twhile (1) {\n" "\t\tstruct trace_entry hdr;\n" "\t\tstruct ctracer__mini_%s obj;\n" "\n" "\t\tif (read(0, &hdr, sizeof(hdr)) != sizeof(hdr))\n" "\t\t\tbreak;\n" "\n" "\t\tfprintf(stdout, \"%%llu %%c:%%llu:%%p\",\n" "\t\t\thdr.nsec,\n" "\t\t\thdr.probe_type ? 'o' : 'i',\n" "\t\t\thdr.function_id,\n" "\t\t\thdr.object);\n" "\n" "\t\tif (read(0, &obj, sizeof(obj)) != sizeof(obj))\n" "\t\t\tbreak;\n" "\t\tfprintf(stdout,\n" "\t\t\t\":", name); type__for_each_data_member(type, pos) { if (first) first = 0; else { fputc(':', fp_converter); n = snprintf(p, plen, ",\n\t\t\t "); plen -= n; p += n; } fprintf(fp_converter, "%%u"); n = snprintf(p, plen, "obj.%s", class_member__name(pos, cu)); plen -= n; p += n; emit_struct_member_table_entry(fp_fields, field++, class_member__name(pos, cu), 1, "entry,exit"); } fprintf(fp_converter, "\\n\",\n\t\t\t %s);\n" "\t}\n" "\treturn 0;\n" "}\n", parm_list); fclose(fp_fields); fclose(fp_converter); return 0; } /* * We want just the DW_TAG_structure_type tags that have a member that is a pointer * to the target class. */ static struct tag *pointer_filter(struct tag *tag, struct cu *cu, type_id_t target_type_id) { struct type *type; struct class_member *pos; const char *class_name; if (!tag__is_struct(tag)) return NULL; type = tag__type(tag); if (type->nr_members == 0) return NULL; class_name = class__name(tag__class(tag), cu); if (class_name == NULL || structures__find(&pointers, class_name)) return NULL; type__for_each_member(type, pos) { struct tag *ctype = cu__type(cu, pos->tag.type); tag__assert_search_result(ctype); if (tag__is_pointer_to(ctype, target_type_id)) return tag; } return NULL; } /* * Iterate thru all the tags in the compilation unit, looking for classes * that have as one member that is a pointer to the target type. */ static int cu_find_pointers_iterator(struct cu *cu, void *class_name) { type_id_t target_type_id, id; struct tag *target = cu__find_struct_by_name(cu, class_name, 0, &target_type_id), *pos; if (target == NULL) return 0; cu__for_each_type(cu, id, pos) if (pointer_filter(pos, cu, target_type_id)) structures__add(&pointers, pos, cu); return 0; } static void class__find_pointers(const char *class_name) { cus__for_each_cu(methods_cus, cu_find_pointers_iterator, (void *)class_name, cu_filter); } /* * We want just the DW_TAG_structure_type tags that have as its first member * a struct of type target. */ static struct tag *alias_filter(struct tag *tag, const struct cu *cu, type_id_t target_type_id) { struct type *type; struct class_member *first_member; if (!tag__is_struct(tag)) return NULL; type = tag__type(tag); if (type->nr_members == 0) return NULL; first_member = list_first_entry(&type->namespace.tags, struct class_member, tag.node); if (first_member->tag.type != target_type_id) return NULL; if (structures__find(&aliases, class__name(tag__class(tag), cu))) return NULL; return tag; } static void class__find_aliases(const char *class_name); /* * Iterate thru all the tags in the compilation unit, looking for classes * that have as its first member the specified "class" (struct). */ static int cu_find_aliases_iterator(struct cu *cu, void *class_name) { type_id_t target_type_id, id; struct tag *target = cu__find_struct_by_name(cu, class_name, 0, &target_type_id), *pos; if (target == NULL) return 0; cu__for_each_type(cu, id, pos) { if (alias_filter(pos, cu, target_type_id)) { const char *alias_name = class__name(tag__class(pos), cu); structures__add(&aliases, pos, cu); /* * Now find aliases to this alias, e.g.: * * struct tcp_sock { * struct inet_connection_sock { * struct inet_sock { * struct sock { * } * } * } * } */ class__find_aliases(alias_name); } } return 0; } static void class__find_aliases(const char *class_name) { cus__for_each_cu(methods_cus, cu_find_aliases_iterator, (void *)class_name, cu_filter); } static void emit_list_of_types(struct list_head *list, const struct cu *cu) { struct structure *pos; list_for_each_entry(pos, list, node) { struct type *type = tag__type(pos->class); /* * Lets look at the other CUs, perhaps we have already * emmited this one */ if (type_emissions__find_definition(&emissions, cu, structure__name(pos))) { type->definition_emitted = 1; continue; } type__emit_definitions(pos->class, pos->cu, &emissions, fp_classes); type->definition_emitted = 1; type__emit(pos->class, pos->cu, NULL, NULL, fp_classes); tag__type(pos->class)->definition_emitted = 1; fputc('\n', fp_classes); } } static int class__emit_classes(struct tag *tag, struct cu *cu) { struct class *class = tag__class(tag); int err = -1; char mini_class_name[128]; snprintf(mini_class_name, sizeof(mini_class_name), "ctracer__mini_%s", class__name(class, cu)); mini_class = class__clone_base_types(tag, cu, mini_class_name); if (mini_class == NULL) goto out; type__emit_definitions(tag, cu, &emissions, fp_classes); type__emit(tag, cu, NULL, NULL, fp_classes); fputs("\n/* class aliases */\n\n", fp_classes); emit_list_of_types(&aliases, cu); fputs("\n/* class with pointers */\n\n", fp_classes); emit_list_of_types(&pointers, cu); class__fprintf(mini_class, cu, fp_classes); fputs(";\n\n", fp_classes); class__emit_class_state_collector(class, cu, mini_class); err = 0; out: return err; } /* * Emit the kprobes routine for one of the selected "methods", later we'll * put this into the 'kprobes' table, in cu_emit_kprobes_table_iterator. * * This marks the function entry, function__emit_kretprobes will emit the * probe for the function exit. */ static int function__emit_probes(struct function *func, uint32_t function_id, const struct cu *cu, const type_id_t target_type_id, int probe_type, const char *member) { struct parameter *pos; const char *name = function__name(func, cu); fprintf(fp_methods, "probe %s%s = kernel.function(\"%s@%s\")%s\n" "{\n" "}\n\n" "probe %s%s\n" "{\n", name, probe_type == 0 ? "" : "__return", name, cu->name, probe_type == 0 ? "" : ".return", name, probe_type == 0 ? "" : "__return"); list_for_each_entry(pos, &func->proto.parms, tag.node) { struct tag *type = cu__type(cu, pos->tag.type); tag__assert_search_result(type); if (!tag__is_pointer_to(type, target_type_id)) continue; if (member != NULL) fprintf(fp_methods, "\tif ($%s)\n\t", parameter__name(pos, cu)); fprintf(fp_methods, "\tctracer__method_hook(%d, %d, $%s%s%s, %d);\n", probe_type, function_id, parameter__name(pos, cu), member ? "->" : "", member ?: "", class__size(mini_class)); break; } fputs("}\n\n", fp_methods); fflush(fp_methods); return 0; } /* * Iterate thru the list of methods previously collected by * cu_find_methods_iterator, emitting the probes for function entry. */ static int cu_emit_probes_iterator(struct cu *cu, void *cookie) { type_id_t target_type_id; struct tag *target = cu__find_struct_by_name(cu, cookie, 0, &target_type_id); struct function *pos; /* OK, this type is not present in this compile unit */ if (target == NULL) return 0; list_for_each_entry(pos, &cu->tool_list, tool_node) { uint32_t function_id = (long)pos->priv; if (methods__add(&probes_emitted, function__name(pos, cu)) != 0) continue; function__emit_probes(pos, function_id, cu, target_type_id, 0, NULL); /* entry */ function__emit_probes(pos, function_id, cu, target_type_id, 1, NULL); /* exit */ } return 0; } /* * Iterate thru the list of methods previously collected by * cu_find_methods_iterator, emitting the probes for function entry. */ static int cu_emit_pointer_probes_iterator(struct cu *cu, void *cookie) { type_id_t target_type_id, pointer_id; struct tag *target, *pointer; struct function *pos_tag; struct class_member *pos_member; /* This CU doesn't have our classes */ if (list_empty(&cu->tool_list)) return 0; target = cu__find_struct_by_name(cu, class_name, 1, &target_type_id); pointer = cu__find_struct_by_name(cu, cookie, 0, &pointer_id); /* OK, this type is not present in this compile unit */ if (target == NULL || pointer == NULL) return 0; /* for now just for the first member that is a pointer */ type__for_each_member(tag__type(pointer), pos_member) { struct tag *ctype = cu__type(cu, pos_member->tag.type); tag__assert_search_result(ctype); if (tag__is_pointer_to(ctype, target_type_id)) break; } list_for_each_entry(pos_tag, &cu->tool_list, tool_node) { uint32_t function_id = (long)pos_tag->priv; if (methods__add(&probes_emitted, function__name(pos_tag, cu)) != 0) continue; function__emit_probes(pos_tag, function_id, cu, target_type_id, 0, class_member__name(pos_member, cu)); /* entry */ function__emit_probes(pos_tag, function_id, cu, target_type_id, 1, class_member__name(pos_member, cu)); /* exit */ } return 0; } /* * Iterate thru the list of methods previously collected by * cu_find_methods_iterator, creating the functions table that will * be used by ostra-cg */ static int cu_emit_functions_table(struct cu *cu, void *fp) { struct function *pos; list_for_each_entry(pos, &cu->tool_list, tool_node) if (pos->priv != NULL) { uint32_t function_id = (long)pos->priv; fprintf(fp, "%d:%s\n", function_id, function__name(pos, cu)); pos->priv = NULL; } return 0; } static int elf__open(const char *filename) { int fd = open(filename, O_RDONLY); if (fd < 0) return -1; int err = -1; if (elf_version(EV_CURRENT) == EV_NONE) { fprintf(stderr, "%s: cannot set libelf version.\n", __func__); goto out_close; } Elf *elf = elf_begin(fd, ELF_C_READ_MMAP, NULL); if (elf == NULL) { fprintf(stderr, "%s: cannot read %s ELF file.\n", __func__, filename); goto out_close; } GElf_Ehdr ehdr; if (gelf_getehdr(elf, &ehdr) == NULL) { fprintf(stderr, "%s: cannot get elf header.\n", __func__); goto out_elf_end; } GElf_Shdr shdr; size_t init_index; Elf_Scn *init = elf_section_by_name(elf, &ehdr, &shdr, ".init.text", &init_index); if (init == NULL) goto out_elf_end; struct elf_symtab *symtab = elf_symtab__new(".symtab", elf, &ehdr); if (symtab == NULL) goto out_elf_end; init_blacklist = strlist__new(true); if (init_blacklist == NULL) goto out_elf_symtab_delete; uint32_t index; GElf_Sym sym; elf_symtab__for_each_symbol(symtab, index, sym) { if (!elf_sym__is_local_function(&sym)) continue; if (elf_sym__section(&sym) != init_index) continue; err = strlist__add(init_blacklist, elf_sym__name(&sym, symtab)); if (err == -ENOMEM) { fprintf(stderr, "failed for %s(%d,%zd)\n", elf_sym__name(&sym, symtab),elf_sym__section(&sym),init_index); goto out_delete_blacklist; } } err = 0; out_elf_symtab_delete: elf_symtab__delete(symtab); out_elf_end: elf_end(elf); out_close: close(fd); return err; out_delete_blacklist: strlist__delete(init_blacklist); goto out_elf_symtab_delete; } /* Name and version of program. */ ARGP_PROGRAM_VERSION_HOOK_DEF = dwarves_print_version; static const struct argp_option ctracer__options[] = { { .key = 'd', .name = "src_dir", .arg = "SRC_DIR", .doc = "generate source files in this directory", }, { .key = 'C', .name = "cu_blacklist", .arg = "FILE", .doc = "Blacklist the CUs in FILE", }, { .key = 'D', .name = "dir", .arg = "DIR", .doc = "load files in this directory", }, { .key = 'g', .name = "glob", .arg = "GLOB", .doc = "file mask to load", }, { .key = 'r', .name = "recursive", .doc = "recursively load files", }, { .name = NULL, } }; static const char *dirname, *glob; static int recursive; static error_t ctracer__options_parser(int key, char *arg, struct argp_state *state __unused) { switch (key) { case 'd': src_dir = arg; break; case 'C': cu_blacklist_filename = arg; break; case 'D': dirname = arg; break; case 'g': glob = arg; break; case 'r': recursive = 1; break; default: return ARGP_ERR_UNKNOWN; } return 0; } static const char ctracer__args_doc[] = "FILE CLASS"; static struct argp ctracer__argp = { .options = ctracer__options, .parser = ctracer__options_parser, .args_doc = ctracer__args_doc, }; int main(int argc, char *argv[]) { int remaining, err; struct tag *class; struct cu *cu; char *filename; char functions_filename[PATH_MAX]; char methods_filename[PATH_MAX]; char collector_filename[PATH_MAX]; char classes_filename[PATH_MAX]; struct structure *pos; FILE *fp_functions; int rc = EXIT_FAILURE; if (dwarves__init(0)) { fputs("ctracer: insufficient memory\n", stderr); goto out; } if (argp_parse(&ctracer__argp, argc, argv, 0, &remaining, NULL) || remaining < argc) { switch (argc - remaining) { case 1: goto failure; case 2: filename = argv[remaining++]; class_name = argv[remaining++]; break; default: goto failure; } } else { failure: argp_help(&ctracer__argp, stderr, ARGP_HELP_SEE, argv[0]); goto out; } type_emissions__init(&emissions); /* * Create the methods_cus (Compilation Units) object where we will * load the objects where we'll look for functions pointers to the * specified class, i.e. to find its "methods", where we'll insert * the entry and exit hooks. */ methods_cus = cus__new(); if (methods_cus == NULL) { fputs("ctracer: insufficient memory\n", stderr); goto out; } /* * if --dir/-D was specified, recursively traverse the path looking for * object files (compilation units) that match the glob specified (*.ko) * for kernel modules, but could be "*.o" in the future when we support * uprobes for user space tracing. */ if (dirname != NULL && cus__load_dir(methods_cus, NULL, dirname, glob, recursive) != 0) { fprintf(stderr, "ctracer: couldn't load DWARF info " "from %s dir with glob %s\n", dirname, glob); goto out; } /* * If a filename was specified, for instance "vmlinux", load it too. */ if (filename != NULL) { if (elf__open(filename)) { fprintf(stderr, "ctracer: couldn't load ELF symtab " "info from %s\n", filename); goto out; } err = cus__load_file(methods_cus, NULL, filename); if (err != 0) { cus__print_error_msg("ctracer", methods_cus, filename, err); goto out; } } /* * See if the specified struct exists: */ class = cus__find_struct_by_name(methods_cus, &cu, class_name, 0, NULL); if (class == NULL) { fprintf(stderr, "ctracer: struct %s not found!\n", class_name); goto out; } snprintf(functions_filename, sizeof(functions_filename), "%s/%s.functions", src_dir, class__name(tag__class(class), cu)); fp_functions = fopen(functions_filename, "w"); if (fp_functions == NULL) { fprintf(stderr, "ctracer: couldn't create %s\n", functions_filename); goto out; } snprintf(methods_filename, sizeof(methods_filename), "%s/ctracer_methods.stp", src_dir); fp_methods = fopen(methods_filename, "w"); if (fp_methods == NULL) { fprintf(stderr, "ctracer: couldn't create %s\n", methods_filename); goto out; } snprintf(collector_filename, sizeof(collector_filename), "%s/ctracer_collector.c", src_dir); fp_collector = fopen(collector_filename, "w"); if (fp_collector == NULL) { fprintf(stderr, "ctracer: couldn't create %s\n", collector_filename); goto out; } snprintf(classes_filename, sizeof(classes_filename), "%s/ctracer_classes.h", src_dir); fp_classes = fopen(classes_filename, "w"); if (fp_classes == NULL) { fprintf(stderr, "ctracer: couldn't create %s\n", classes_filename); goto out; } fputs("%{\n" "#include \n" "%}\n" "function ctracer__method_hook(probe_type, func, object, state_len)\n" "%{\n" "\tctracer__method_hook(_stp_gettimeofday_ns(), " "THIS->probe_type, THIS->func, " "(void *)(long)THIS->object, " "THIS->state_len);\n" "%}\n\n", fp_methods); fputs("\n#include \"ctracer_classes.h\"\n\n", fp_collector); class__find_aliases(class_name); class__find_pointers(class_name); class__emit_classes(class, cu); fputc('\n', fp_collector); class__emit_ostra_converter(class, cu); cu_blacklist = strlist__new(true); if (cu_blacklist != NULL) strlist__load(cu_blacklist, cu_blacklist_filename); cus__for_each_cu(methods_cus, cu_find_methods_iterator, class_name, cu_filter); cus__for_each_cu(methods_cus, cu_emit_probes_iterator, class_name, cu_filter); cus__for_each_cu(methods_cus, cu_emit_functions_table, fp_functions, cu_filter); list_for_each_entry(pos, &aliases, node) { const char *alias_name = structure__name(pos); cus__for_each_cu(methods_cus, cu_find_methods_iterator, (void *)alias_name, cu_filter); cus__for_each_cu(methods_cus, cu_emit_probes_iterator, (void *)alias_name, cu_filter); cus__for_each_cu(methods_cus, cu_emit_functions_table, fp_functions, cu_filter); } list_for_each_entry(pos, &pointers, node) { const char *pointer_name = structure__name(pos); cus__for_each_cu(methods_cus, cu_find_methods_iterator, (void *)pointer_name, cu_filter); cus__for_each_cu(methods_cus, cu_emit_pointer_probes_iterator, (void *)pointer_name, cu_filter); cus__for_each_cu(methods_cus, cu_emit_functions_table, fp_functions, cu_filter); } fclose(fp_methods); fclose(fp_collector); fclose(fp_functions); fclose(fp_classes); strlist__delete(cu_blacklist); rc = EXIT_SUCCESS; out: cus__delete(methods_cus); dwarves__exit(); return rc; } dwarves-dfsg-1.15/dtagnames.c000066400000000000000000000024001350511416500161400ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only Copyright (C) 2006 Mandriva Conectiva S.A. Copyright (C) 2006 Arnaldo Carvalho de Melo */ #include #include #include #include "dwarves.h" #include "dutil.h" static void print_malloc_stats(void) { struct mallinfo m = mallinfo(); fprintf(stderr, "size: %u\n", m.uordblks); } static int class__tag_name(struct tag *tag, struct cu *cu __unused, void *cookie __unused) { puts(dwarf_tag_name(tag->tag)); return 0; } static int cu__dump_class_tag_names(struct cu *cu, void *cookie __unused) { cu__for_all_tags(cu, class__tag_name, NULL); return 0; } static void cus__dump_class_tag_names(struct cus *cus) { cus__for_each_cu(cus, cu__dump_class_tag_names, NULL, NULL); } int main(int argc __unused, char *argv[]) { int err, rc = EXIT_FAILURE; struct cus *cus = cus__new(); if (dwarves__init(0) || cus == NULL) { fputs("dtagnames: insufficient memory\n", stderr); goto out; } err = cus__load_files(cus, NULL, argv + 1); if (err != 0) { cus__fprintf_load_files_err(cus, "dtagnames", argv + 1, err, stderr); goto out; } cus__dump_class_tag_names(cus); print_malloc_stats(); rc = EXIT_SUCCESS; out: cus__delete(cus); dwarves__exit(); return rc; } dwarves-dfsg-1.15/dutil.c000066400000000000000000000066501350511416500153310ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only Copyright (C) 2007 Arnaldo Carvalho de Melo */ #include "dutil.h" #include #include #include #include #include void *zalloc(const size_t size) { void *s = malloc(size); if (s != NULL) memset(s, 0, size); return s; } struct str_node *str_node__new(const char *s, bool dupstr) { struct str_node *snode = malloc(sizeof(*snode)); if (snode != NULL){ if (dupstr) { s = strdup(s); if (s == NULL) goto out_delete; } snode->s = s; } return snode; out_delete: free(snode); return NULL; } static void str_node__delete(struct str_node *snode, bool dupstr) { if (dupstr) free((void *)snode->s); free(snode); } int strlist__add(struct strlist *slist, const char *new_entry) { struct rb_node **p = &slist->entries.rb_node; struct rb_node *parent = NULL; struct str_node *sn; while (*p != NULL) { int rc; parent = *p; sn = rb_entry(parent, struct str_node, rb_node); rc = strcmp(sn->s, new_entry); if (rc > 0) p = &(*p)->rb_left; else if (rc < 0) p = &(*p)->rb_right; else return -EEXIST; } sn = str_node__new(new_entry, slist->dupstr); if (sn == NULL) return -ENOMEM; rb_link_node(&sn->rb_node, parent, p); rb_insert_color(&sn->rb_node, &slist->entries); return 0; } int strlist__load(struct strlist *slist, const char *filename) { char entry[1024]; int err = -1; FILE *fp = fopen(filename, "r"); if (fp == NULL) return -1; while (fgets(entry, sizeof(entry), fp) != NULL) { const size_t len = strlen(entry); if (len == 0) continue; entry[len - 1] = '\0'; if (strlist__add(slist, entry) != 0) goto out; } err = 0; out: fclose(fp); return err; } struct strlist *strlist__new(bool dupstr) { struct strlist *slist = malloc(sizeof(*slist)); if (slist != NULL) { slist->entries = RB_ROOT; slist->dupstr = dupstr; } return slist; } void strlist__delete(struct strlist *slist) { if (slist != NULL) { struct str_node *pos; struct rb_node *next = rb_first(&slist->entries); while (next) { pos = rb_entry(next, struct str_node, rb_node); next = rb_next(&pos->rb_node); strlist__remove(slist, pos); } slist->entries = RB_ROOT; free(slist); } } void strlist__remove(struct strlist *slist, struct str_node *sn) { rb_erase(&sn->rb_node, &slist->entries); str_node__delete(sn, slist->dupstr); } bool strlist__has_entry(struct strlist *slist, const char *entry) { struct rb_node **p = &slist->entries.rb_node; struct rb_node *parent = NULL; while (*p != NULL) { struct str_node *sn; int rc; parent = *p; sn = rb_entry(parent, struct str_node, rb_node); rc = strcmp(sn->s, entry); if (rc > 0) p = &(*p)->rb_left; else if (rc < 0) p = &(*p)->rb_right; else return true; } return false; } Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, GElf_Shdr *shp, const char *name, size_t *index) { Elf_Scn *sec = NULL; size_t cnt = 1; while ((sec = elf_nextscn(elf, sec)) != NULL) { char *str; gelf_getshdr(sec, shp); str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name); if (!strcmp(name, str)) { if (index) *index = cnt; break; } ++cnt; } return sec; } dwarves-dfsg-1.15/dutil.h000066400000000000000000000174601350511416500153370ustar00rootroot00000000000000#ifndef _DUTIL_H_ #define _DUTIL_H_ 1 /* SPDX-License-Identifier: GPL-2.0-only * Copyright (C) 2007..2009 Arnaldo Carvalho de Melo * * Some functions came from the Linux Kernel sources, copyrighted by a * cast of dozens, please see the Linux Kernel git history for details. */ #include #include #include #include #include #include #include "rbtree.h" #define BITS_PER_LONG __BITS_PER_LONG #ifndef __unused #define __unused __attribute__ ((unused)) #endif #ifndef __pure #define __pure __attribute__ ((pure)) #endif #define roundup(x,y) ((((x) + ((y) - 1)) / (y)) * (y)) static inline __attribute__((const)) bool is_power_of_2(unsigned long n) { return (n != 0 && ((n & (n - 1)) == 0)); } /** * fls - find last (most-significant) bit set * @x: the word to search * * This is defined the same way as ffs. * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. */ static __always_inline int fls(int x) { return x ? sizeof(x) * 8 - __builtin_clz(x) : 0; } /** * fls64 - find last set bit in a 64-bit word * @x: the word to search * * This is defined in a similar way as the libc and compiler builtin * ffsll, but returns the position of the most significant set bit. * * fls64(value) returns 0 if value is 0 or the position of the last * set bit if value is nonzero. The last (most significant) bit is * at position 64. */ #if BITS_PER_LONG == 32 static __always_inline int fls64(uint64_t x) { uint32_t h = x >> 32; if (h) return fls(h) + 32; return fls(x); } #elif BITS_PER_LONG == 64 /** * __fls - find last (most-significant) set bit in a long word * @word: the word to search * * Undefined if no set bit exists, so code should check against 0 first. */ static __always_inline unsigned long __fls(unsigned long word) { int num = BITS_PER_LONG - 1; #if BITS_PER_LONG == 64 if (!(word & (~0ul << 32))) { num -= 32; word <<= 32; } #endif if (!(word & (~0ul << (BITS_PER_LONG-16)))) { num -= 16; word <<= 16; } if (!(word & (~0ul << (BITS_PER_LONG-8)))) { num -= 8; word <<= 8; } if (!(word & (~0ul << (BITS_PER_LONG-4)))) { num -= 4; word <<= 4; } if (!(word & (~0ul << (BITS_PER_LONG-2)))) { num -= 2; word <<= 2; } if (!(word & (~0ul << (BITS_PER_LONG-1)))) num -= 1; return num; } static __always_inline int fls64(uint64_t x) { if (x == 0) return 0; return __fls(x) + 1; } #else #error BITS_PER_LONG not 32 or 64 #endif static inline unsigned fls_long(unsigned long l) { if (sizeof(l) == 4) return fls(l); return fls64(l); } /* * round up to nearest power of two */ static inline __attribute__((const)) unsigned long __roundup_pow_of_two(unsigned long n) { return 1UL << fls_long(n - 1); } /* * non-constant log of base 2 calculators * - the arch may override these in asm/bitops.h if they can be implemented * more efficiently than using fls() and fls64() * - the arch is not required to handle n==0 if implementing the fallback */ static inline __attribute__((const)) int __ilog2_u32(uint32_t n) { return fls(n) - 1; } static inline __attribute__((const)) int __ilog2_u64(uint64_t n) { return fls64(n) - 1; } /* * deal with unrepresentable constant logarithms */ extern __attribute__((const)) int ____ilog2_NaN(void); /** * ilog2 - log of base 2 of 32-bit or a 64-bit unsigned value * @n - parameter * * constant-capable log of base 2 calculation * - this can be used to initialise global variables from constant data, hence * the massive ternary operator construction * * selects the appropriately-sized optimised version depending on sizeof(n) */ #define ilog2(n) \ ( \ __builtin_constant_p(n) ? ( \ (n) < 1 ? ____ilog2_NaN() : \ (n) & (1ULL << 63) ? 63 : \ (n) & (1ULL << 62) ? 62 : \ (n) & (1ULL << 61) ? 61 : \ (n) & (1ULL << 60) ? 60 : \ (n) & (1ULL << 59) ? 59 : \ (n) & (1ULL << 58) ? 58 : \ (n) & (1ULL << 57) ? 57 : \ (n) & (1ULL << 56) ? 56 : \ (n) & (1ULL << 55) ? 55 : \ (n) & (1ULL << 54) ? 54 : \ (n) & (1ULL << 53) ? 53 : \ (n) & (1ULL << 52) ? 52 : \ (n) & (1ULL << 51) ? 51 : \ (n) & (1ULL << 50) ? 50 : \ (n) & (1ULL << 49) ? 49 : \ (n) & (1ULL << 48) ? 48 : \ (n) & (1ULL << 47) ? 47 : \ (n) & (1ULL << 46) ? 46 : \ (n) & (1ULL << 45) ? 45 : \ (n) & (1ULL << 44) ? 44 : \ (n) & (1ULL << 43) ? 43 : \ (n) & (1ULL << 42) ? 42 : \ (n) & (1ULL << 41) ? 41 : \ (n) & (1ULL << 40) ? 40 : \ (n) & (1ULL << 39) ? 39 : \ (n) & (1ULL << 38) ? 38 : \ (n) & (1ULL << 37) ? 37 : \ (n) & (1ULL << 36) ? 36 : \ (n) & (1ULL << 35) ? 35 : \ (n) & (1ULL << 34) ? 34 : \ (n) & (1ULL << 33) ? 33 : \ (n) & (1ULL << 32) ? 32 : \ (n) & (1ULL << 31) ? 31 : \ (n) & (1ULL << 30) ? 30 : \ (n) & (1ULL << 29) ? 29 : \ (n) & (1ULL << 28) ? 28 : \ (n) & (1ULL << 27) ? 27 : \ (n) & (1ULL << 26) ? 26 : \ (n) & (1ULL << 25) ? 25 : \ (n) & (1ULL << 24) ? 24 : \ (n) & (1ULL << 23) ? 23 : \ (n) & (1ULL << 22) ? 22 : \ (n) & (1ULL << 21) ? 21 : \ (n) & (1ULL << 20) ? 20 : \ (n) & (1ULL << 19) ? 19 : \ (n) & (1ULL << 18) ? 18 : \ (n) & (1ULL << 17) ? 17 : \ (n) & (1ULL << 16) ? 16 : \ (n) & (1ULL << 15) ? 15 : \ (n) & (1ULL << 14) ? 14 : \ (n) & (1ULL << 13) ? 13 : \ (n) & (1ULL << 12) ? 12 : \ (n) & (1ULL << 11) ? 11 : \ (n) & (1ULL << 10) ? 10 : \ (n) & (1ULL << 9) ? 9 : \ (n) & (1ULL << 8) ? 8 : \ (n) & (1ULL << 7) ? 7 : \ (n) & (1ULL << 6) ? 6 : \ (n) & (1ULL << 5) ? 5 : \ (n) & (1ULL << 4) ? 4 : \ (n) & (1ULL << 3) ? 3 : \ (n) & (1ULL << 2) ? 2 : \ (n) & (1ULL << 1) ? 1 : \ (n) & (1ULL << 0) ? 0 : \ ____ilog2_NaN() \ ) : \ (sizeof(n) <= 4) ? \ __ilog2_u32(n) : \ __ilog2_u64(n) \ ) /** * roundup_pow_of_two - round the given value up to nearest power of two * @n - parameter * * round the given value up to the nearest power of two * - the result is undefined when n == 0 * - this can be used to initialise global variables from constant data */ #define roundup_pow_of_two(n) \ ( \ __builtin_constant_p(n) ? ( \ (n == 1) ? 1 : \ (1UL << (ilog2((n) - 1) + 1)) \ ) : \ __roundup_pow_of_two(n) \ ) /* We need define two variables, argp_program_version_hook and argp_program_bug_address, in all programs. argp.h declares these variables as non-const (which is correct in general). But we can do better, it is not going to change. So we want to move them into the .rodata section. Define macros to do the trick. */ #define ARGP_PROGRAM_VERSION_HOOK_DEF \ void (*const apvh) (FILE *, struct argp_state *) \ __asm ("argp_program_version_hook") #define ARGP_PROGRAM_BUG_ADDRESS_DEF \ const char *const apba__ __asm ("argp_program_bug_address") struct str_node { struct rb_node rb_node; const char *s; }; struct strlist { struct rb_root entries; bool dupstr; }; struct strlist *strlist__new(bool dupstr); void strlist__delete(struct strlist *slist); void strlist__remove(struct strlist *slist, struct str_node *sn); int strlist__load(struct strlist *slist, const char *filename); int strlist__add(struct strlist *slist, const char *str); bool strlist__has_entry(struct strlist *slist, const char *entry); static inline bool strlist__empty(const struct strlist *slist) { return rb_first(&slist->entries) == NULL; } /** * strstarts - does @str start with @prefix? * @str: string to examine * @prefix: prefix to look for. */ static inline bool strstarts(const char *str, const char *prefix) { return strncmp(str, prefix, strlen(prefix)) == 0; } void *zalloc(const size_t size); Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, GElf_Shdr *shp, const char *name, size_t *index); #ifndef SHT_GNU_ATTRIBUTES /* Just a way to check if we're using an old elfutils version */ static inline int elf_getshdrstrndx(Elf *elf, size_t *dst) { return elf_getshstrndx(elf, dst); } #endif #endif /* _DUTIL_H_ */ dwarves-dfsg-1.15/dwarf_loader.c000066400000000000000000001740701350511416500166430ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only Copyright (C) 2008 Arnaldo Carvalho de Melo */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "config.h" #include "list.h" #include "dwarves.h" #include "dutil.h" #include "strings.h" #include "hash.h" struct strings *strings; #ifndef DW_AT_GNU_vector #define DW_AT_GNU_vector 0x2107 #endif #ifndef DW_TAG_GNU_call_site #define DW_TAG_GNU_call_site 0x4109 #define DW_TAG_GNU_call_site_parameter 0x410a #endif #define hashtags__fn(key) hash_64(key, HASHTAGS__BITS) bool no_bitfield_type_recode = true; static void __tag__print_not_supported(uint32_t tag, const char *func) { #ifdef STB_GNU_UNIQUE static bool dwarf_tags_warned[DW_TAG_rvalue_reference_type]; static bool dwarf_gnu_tags_warned[DW_TAG_GNU_formal_parameter_pack - DW_TAG_MIPS_loop]; #else static bool dwarf_tags_warned[DW_TAG_shared_type]; static bool dwarf_gnu_tags_warned[DW_TAG_class_template - DW_TAG_MIPS_loop]; #endif if (tag < DW_TAG_MIPS_loop) { if (dwarf_tags_warned[tag]) return; dwarf_tags_warned[tag] = true; } else { uint32_t t = tag - DW_TAG_MIPS_loop; if (dwarf_gnu_tags_warned[t]) return; dwarf_gnu_tags_warned[t] = true; } fprintf(stderr, "%s: tag not supported %#x (%s)!\n", func, tag, dwarf_tag_name(tag)); } #define tag__print_not_supported(tag) \ __tag__print_not_supported(tag, __func__) struct dwarf_off_ref { unsigned int from_types : 1; Dwarf_Off off; }; typedef struct dwarf_off_ref dwarf_off_ref; struct dwarf_tag { struct hlist_node hash_node; dwarf_off_ref type; Dwarf_Off id; union { dwarf_off_ref abstract_origin; dwarf_off_ref containing_type; }; struct tag *tag; uint32_t small_id; strings_t decl_file; uint16_t decl_line; }; static dwarf_off_ref dwarf_tag__spec(struct dwarf_tag *dtag) { return *(dwarf_off_ref *)(dtag + 1); } static void dwarf_tag__set_spec(struct dwarf_tag *dtag, dwarf_off_ref spec) { *(dwarf_off_ref *)(dtag + 1) = spec; } #define HASHTAGS__BITS 8 #define HASHTAGS__SIZE (1UL << HASHTAGS__BITS) #define obstack_chunk_alloc malloc #define obstack_chunk_free free static void *obstack_zalloc(struct obstack *obstack, size_t size) { void *o = obstack_alloc(obstack, size); if (o) memset(o, 0, size); return o; } struct dwarf_cu { struct hlist_head hash_tags[HASHTAGS__SIZE]; struct hlist_head hash_types[HASHTAGS__SIZE]; struct obstack obstack; struct cu *cu; struct dwarf_cu *type_unit; }; static void dwarf_cu__init(struct dwarf_cu *dcu) { unsigned int i; for (i = 0; i < HASHTAGS__SIZE; ++i) { INIT_HLIST_HEAD(&dcu->hash_tags[i]); INIT_HLIST_HEAD(&dcu->hash_types[i]); } obstack_init(&dcu->obstack); dcu->type_unit = NULL; } static void hashtags__hash(struct hlist_head *hashtable, struct dwarf_tag *dtag) { struct hlist_head *head = hashtable + hashtags__fn(dtag->id); hlist_add_head(&dtag->hash_node, head); } static struct dwarf_tag *hashtags__find(const struct hlist_head *hashtable, const Dwarf_Off id) { if (id == 0) return NULL; struct dwarf_tag *tpos; struct hlist_node *pos; uint16_t bucket = hashtags__fn(id); const struct hlist_head *head = hashtable + bucket; hlist_for_each_entry(tpos, pos, head, hash_node) { if (tpos->id == id) return tpos; } return NULL; } static void cu__hash(struct cu *cu, struct tag *tag) { struct dwarf_cu *dcu = cu->priv; struct hlist_head *hashtable = tag__is_tag_type(tag) ? dcu->hash_types : dcu->hash_tags; hashtags__hash(hashtable, tag->priv); } static struct dwarf_tag *dwarf_cu__find_tag_by_ref(const struct dwarf_cu *cu, const struct dwarf_off_ref *ref) { if (cu == NULL) return NULL; if (ref->from_types) { return NULL; } return hashtags__find(cu->hash_tags, ref->off); } static struct dwarf_tag *dwarf_cu__find_type_by_ref(const struct dwarf_cu *dcu, const struct dwarf_off_ref *ref) { if (dcu == NULL) return NULL; if (ref->from_types) { dcu = dcu->type_unit; if (dcu == NULL) { return NULL; } } return hashtags__find(dcu->hash_types, ref->off); } extern struct strings *strings; static void *memdup(const void *src, size_t len, struct cu *cu) { void *s = obstack_alloc(&cu->obstack, len); if (s != NULL) memcpy(s, src, len); return s; } /* Number decoding macros. See 7.6 Variable Length Data. */ #define get_uleb128_step(var, addr, nth, break) \ __b = *(addr)++; \ var |= (uintmax_t) (__b & 0x7f) << (nth * 7); \ if ((__b & 0x80) == 0) \ break #define get_uleb128_rest_return(var, i, addrp) \ do { \ for (; i < 10; ++i) { \ get_uleb128_step(var, *addrp, i, \ return var); \ } \ /* Other implementations set VALUE to UINT_MAX in this \ case. So we better do this as well. */ \ return UINT64_MAX; \ } while (0) static uint64_t __libdw_get_uleb128(uint64_t acc, uint32_t i, const uint8_t **addrp) { uint8_t __b; get_uleb128_rest_return (acc, i, addrp); } #define get_uleb128(var, addr) \ do { \ uint8_t __b; \ var = 0; \ get_uleb128_step(var, addr, 0, break); \ var = __libdw_get_uleb128 (var, 1, &(addr)); \ } while (0) static uint64_t attr_numeric(Dwarf_Die *die, uint32_t name) { Dwarf_Attribute attr; uint32_t form; if (dwarf_attr(die, name, &attr) == NULL) return 0; form = dwarf_whatform(&attr); switch (form) { case DW_FORM_addr: { Dwarf_Addr addr; if (dwarf_formaddr(&attr, &addr) == 0) return addr; } break; case DW_FORM_data1: case DW_FORM_data2: case DW_FORM_data4: case DW_FORM_data8: case DW_FORM_sdata: case DW_FORM_udata: { Dwarf_Word value; if (dwarf_formudata(&attr, &value) == 0) return value; } break; case DW_FORM_flag: case DW_FORM_flag_present: { bool value; if (dwarf_formflag(&attr, &value) == 0) return value; } break; default: fprintf(stderr, "DW_AT_<0x%x>=0x%x\n", name, form); break; } return 0; } static uint64_t dwarf_expr(const uint8_t *expr, uint32_t len __unused) { /* Common case: offset from start of the class */ if (expr[0] == DW_OP_plus_uconst || expr[0] == DW_OP_constu) { uint64_t result; ++expr; get_uleb128(result, expr); return result; } fprintf(stderr, "%s: unhandled %#x DW_OP_ operation\n", __func__, *expr); return UINT64_MAX; } static Dwarf_Off attr_offset(Dwarf_Die *die, const uint32_t name) { Dwarf_Attribute attr; Dwarf_Block block; if (dwarf_attr(die, name, &attr) == NULL) return 0; switch (dwarf_whatform(&attr)) { case DW_FORM_data1: case DW_FORM_data2: case DW_FORM_data4: case DW_FORM_data8: case DW_FORM_sdata: case DW_FORM_udata: { Dwarf_Word value; if (dwarf_formudata(&attr, &value) == 0) return value; break; } default: if (dwarf_formblock(&attr, &block) == 0) return dwarf_expr(block.data, block.length); } return 0; } static const char *attr_string(Dwarf_Die *die, uint32_t name) { Dwarf_Attribute attr; if (dwarf_attr(die, name, &attr) != NULL) return dwarf_formstring(&attr); return NULL; } static struct dwarf_off_ref attr_type(Dwarf_Die *die, uint32_t attr_name) { Dwarf_Attribute attr; struct dwarf_off_ref ref; if (dwarf_attr(die, attr_name, &attr) != NULL) { Dwarf_Die type_die; if (dwarf_formref_die(&attr, &type_die) != NULL) { ref.from_types = attr.form == DW_FORM_ref_sig8; ref.off = dwarf_dieoffset(&type_die); return ref; } } memset(&ref, 0, sizeof(ref)); return ref; } static int attr_location(Dwarf_Die *die, Dwarf_Op **expr, size_t *exprlen) { Dwarf_Attribute attr; if (dwarf_attr(die, DW_AT_location, &attr) != NULL) { if (dwarf_getlocation(&attr, expr, exprlen) == 0) return 0; } return 1; } static void *__tag__alloc(struct dwarf_cu *dcu, size_t size, bool spec) { struct dwarf_tag *dtag = obstack_zalloc(&dcu->obstack, (sizeof(*dtag) + (spec ? sizeof(dwarf_off_ref) : 0))); if (dtag == NULL) return NULL; struct tag *tag = obstack_zalloc(&dcu->cu->obstack, size); if (tag == NULL) return NULL; dtag->tag = tag; tag->priv = dtag; tag->type = 0; tag->top_level = 0; return tag; } static void *tag__alloc(struct cu *cu, size_t size) { return __tag__alloc(cu->priv, size, false); } static void *tag__alloc_with_spec(struct cu *cu, size_t size) { return __tag__alloc(cu->priv, size, true); } static void tag__init(struct tag *tag, struct cu *cu, Dwarf_Die *die) { struct dwarf_tag *dtag = tag->priv; tag->tag = dwarf_tag(die); dtag->id = dwarf_dieoffset(die); if (tag->tag == DW_TAG_imported_module || tag->tag == DW_TAG_imported_declaration) dtag->type = attr_type(die, DW_AT_import); else dtag->type = attr_type(die, DW_AT_type); dtag->abstract_origin = attr_type(die, DW_AT_abstract_origin); tag->recursivity_level = 0; if (cu->extra_dbg_info) { int32_t decl_line; const char *decl_file = dwarf_decl_file(die); static const char *last_decl_file; static uint32_t last_decl_file_idx; if (decl_file != last_decl_file) { last_decl_file_idx = strings__add(strings, decl_file); last_decl_file = decl_file; } dtag->decl_file = last_decl_file_idx; dwarf_decl_line(die, &decl_line); dtag->decl_line = decl_line; } INIT_LIST_HEAD(&tag->node); } static struct tag *tag__new(Dwarf_Die *die, struct cu *cu) { struct tag *tag = tag__alloc(cu, sizeof(*tag)); if (tag != NULL) tag__init(tag, cu, die); return tag; } static struct ptr_to_member_type *ptr_to_member_type__new(Dwarf_Die *die, struct cu *cu) { struct ptr_to_member_type *ptr = tag__alloc(cu, sizeof(*ptr)); if (ptr != NULL) { tag__init(&ptr->tag, cu, die); struct dwarf_tag *dtag = ptr->tag.priv; dtag->containing_type = attr_type(die, DW_AT_containing_type); } return ptr; } static struct base_type *base_type__new(Dwarf_Die *die, struct cu *cu) { struct base_type *bt = tag__alloc(cu, sizeof(*bt)); if (bt != NULL) { tag__init(&bt->tag, cu, die); bt->name = strings__add(strings, attr_string(die, DW_AT_name)); bt->bit_size = attr_numeric(die, DW_AT_byte_size) * 8; uint64_t encoding = attr_numeric(die, DW_AT_encoding); bt->is_bool = encoding == DW_ATE_boolean; bt->is_signed = encoding == DW_ATE_signed; bt->is_varargs = false; bt->name_has_encoding = true; } return bt; } static struct array_type *array_type__new(Dwarf_Die *die, struct cu *cu) { struct array_type *at = tag__alloc(cu, sizeof(*at)); if (at != NULL) { tag__init(&at->tag, cu, die); at->dimensions = 0; at->nr_entries = NULL; at->is_vector = dwarf_hasattr(die, DW_AT_GNU_vector); } return at; } static void namespace__init(struct namespace *namespace, Dwarf_Die *die, struct cu *cu) { tag__init(&namespace->tag, cu, die); INIT_LIST_HEAD(&namespace->tags); namespace->sname = 0; namespace->name = strings__add(strings, attr_string(die, DW_AT_name)); namespace->nr_tags = 0; namespace->shared_tags = 0; } static struct namespace *namespace__new(Dwarf_Die *die, struct cu *cu) { struct namespace *namespace = tag__alloc(cu, sizeof(*namespace)); if (namespace != NULL) namespace__init(namespace, die, cu); return namespace; } static void type__init(struct type *type, Dwarf_Die *die, struct cu *cu) { namespace__init(&type->namespace, die, cu); INIT_LIST_HEAD(&type->node); type->size = attr_numeric(die, DW_AT_byte_size); type->alignment = attr_numeric(die, DW_AT_alignment); type->declaration = attr_numeric(die, DW_AT_declaration); dwarf_tag__set_spec(type->namespace.tag.priv, attr_type(die, DW_AT_specification)); type->definition_emitted = 0; type->fwd_decl_emitted = 0; type->resized = 0; type->nr_members = 0; type->nr_static_members = 0; } static struct type *type__new(Dwarf_Die *die, struct cu *cu) { struct type *type = tag__alloc_with_spec(cu, sizeof(*type)); if (type != NULL) type__init(type, die, cu); return type; } static struct enumerator *enumerator__new(Dwarf_Die *die, struct cu *cu) { struct enumerator *enumerator = tag__alloc(cu, sizeof(*enumerator)); if (enumerator != NULL) { tag__init(&enumerator->tag, cu, die); enumerator->name = strings__add(strings, attr_string(die, DW_AT_name)); enumerator->value = attr_numeric(die, DW_AT_const_value); } return enumerator; } static enum vscope dwarf__location(Dwarf_Die *die, uint64_t *addr, struct location *location) { enum vscope scope = VSCOPE_UNKNOWN; if (attr_location(die, &location->expr, &location->exprlen) != 0) scope = VSCOPE_OPTIMIZED; else if (location->exprlen != 0) { Dwarf_Op *expr = location->expr; switch (expr->atom) { case DW_OP_addr: scope = VSCOPE_GLOBAL; *addr = expr[0].number; break; case DW_OP_reg1 ... DW_OP_reg31: case DW_OP_breg0 ... DW_OP_breg31: scope = VSCOPE_REGISTER; break; case DW_OP_fbreg: scope = VSCOPE_LOCAL; break; } } return scope; } enum vscope variable__scope(const struct variable *var) { return var->scope; } const char *variable__scope_str(const struct variable *var) { switch (var->scope) { case VSCOPE_LOCAL: return "local"; case VSCOPE_GLOBAL: return "global"; case VSCOPE_REGISTER: return "register"; case VSCOPE_OPTIMIZED: return "optimized"; default: break; }; return "unknown"; } static struct variable *variable__new(Dwarf_Die *die, struct cu *cu) { struct variable *var = tag__alloc(cu, sizeof(*var)); if (var != NULL) { tag__init(&var->ip.tag, cu, die); var->name = strings__add(strings, attr_string(die, DW_AT_name)); /* variable is visible outside of its enclosing cu */ var->external = dwarf_hasattr(die, DW_AT_external); /* non-defining declaration of an object */ var->declaration = dwarf_hasattr(die, DW_AT_declaration); var->scope = VSCOPE_UNKNOWN; var->ip.addr = 0; if (!var->declaration && cu->has_addr_info) var->scope = dwarf__location(die, &var->ip.addr, &var->location); } return var; } static int tag__recode_dwarf_bitfield(struct tag *tag, struct cu *cu, uint16_t bit_size) { int id; type_id_t short_id; struct tag *recoded; /* in all the cases the name is at the same offset */ strings_t name = tag__namespace(tag)->name; switch (tag->tag) { case DW_TAG_typedef: { const struct dwarf_tag *dtag = tag->priv; struct dwarf_tag *dtype = dwarf_cu__find_type_by_ref(cu->priv, &dtag->type); struct tag *type = dtype->tag; id = tag__recode_dwarf_bitfield(type, cu, bit_size); if (id < 0) return id; struct type *new_typedef = obstack_zalloc(&cu->obstack, sizeof(*new_typedef)); if (new_typedef == NULL) return -ENOMEM; recoded = (struct tag *)new_typedef; recoded->tag = DW_TAG_typedef; recoded->type = id; new_typedef->namespace.name = tag__namespace(tag)->name; } break; case DW_TAG_const_type: case DW_TAG_volatile_type: { const struct dwarf_tag *dtag = tag->priv; struct dwarf_tag *dtype = dwarf_cu__find_type_by_ref(cu->priv, &dtag->type); struct tag *type = dtype->tag; id = tag__recode_dwarf_bitfield(type, cu, bit_size); if (id == tag->type) return id; recoded = obstack_zalloc(&cu->obstack, sizeof(*recoded)); if (recoded == NULL) return -ENOMEM; recoded->tag = DW_TAG_volatile_type; recoded->type = id; } break; case DW_TAG_base_type: /* * Here we must search on the final, core cu, not on * the dwarf_cu as in dwarf there are no such things * as base_types of less than 8 bits, etc. */ recoded = cu__find_base_type_by_sname_and_size(cu, name, bit_size, &short_id); if (recoded != NULL) return short_id; struct base_type *new_bt = obstack_zalloc(&cu->obstack, sizeof(*new_bt)); if (new_bt == NULL) return -ENOMEM; recoded = (struct tag *)new_bt; recoded->tag = DW_TAG_base_type; recoded->top_level = 1; new_bt->name = name; new_bt->bit_size = bit_size; break; case DW_TAG_enumeration_type: /* * Here we must search on the final, core cu, not on * the dwarf_cu as in dwarf there are no such things * as enumeration_types of less than 8 bits, etc. */ recoded = cu__find_enumeration_by_sname_and_size(cu, name, bit_size, &short_id); if (recoded != NULL) return short_id; struct type *alias = tag__type(tag); struct type *new_enum = obstack_zalloc(&cu->obstack, sizeof(*new_enum)); if (new_enum == NULL) return -ENOMEM; recoded = (struct tag *)new_enum; recoded->tag = DW_TAG_enumeration_type; recoded->top_level = 1; new_enum->nr_members = alias->nr_members; /* * Share the tags */ new_enum->namespace.tags.next = &alias->namespace.tags; new_enum->namespace.shared_tags = 1; new_enum->namespace.name = name; new_enum->size = bit_size; break; default: fprintf(stderr, "%s: tag=%s, name=%s, bit_size=%d\n", __func__, dwarf_tag_name(tag->tag), strings__ptr(strings, name), bit_size); return -EINVAL; } uint32_t new_id; if (cu__add_tag(cu, recoded, &new_id) == 0) return new_id; obstack_free(&cu->obstack, recoded); return -ENOMEM; } int class_member__dwarf_recode_bitfield(struct class_member *member, struct cu *cu) { struct dwarf_tag *dtag = member->tag.priv; struct dwarf_tag *type = dwarf_cu__find_type_by_ref(cu->priv, &dtag->type); int recoded_type_id; if (type == NULL) return -ENOENT; recoded_type_id = tag__recode_dwarf_bitfield(type->tag, cu, member->bitfield_size); if (recoded_type_id < 0) return recoded_type_id; member->tag.type = recoded_type_id; return 0; } static struct class_member *class_member__new(Dwarf_Die *die, struct cu *cu, bool in_union) { struct class_member *member = tag__alloc(cu, sizeof(*member)); if (member != NULL) { tag__init(&member->tag, cu, die); member->name = strings__add(strings, attr_string(die, DW_AT_name)); member->is_static = !in_union && !dwarf_hasattr(die, DW_AT_data_member_location); member->const_value = attr_numeric(die, DW_AT_const_value); member->alignment = attr_numeric(die, DW_AT_alignment); member->byte_offset = attr_offset(die, DW_AT_data_member_location); /* * Bit offset calculated here is valid only for byte-aligned * fields. For bitfields on little-endian archs we need to * adjust them taking into account byte size of the field, * which might not be yet known. So we'll re-calculate bit * offset later, in class_member__cache_byte_size. */ member->bit_offset = member->byte_offset * 8; /* * If DW_AT_byte_size is not present, byte size will be * determined later in class_member__cache_byte_size using * base integer/enum type */ member->byte_size = attr_numeric(die, DW_AT_byte_size); member->bitfield_offset = attr_numeric(die, DW_AT_bit_offset); member->bitfield_size = attr_numeric(die, DW_AT_bit_size); member->bit_hole = 0; member->bitfield_end = 0; member->visited = 0; member->accessibility = attr_numeric(die, DW_AT_accessibility); member->virtuality = attr_numeric(die, DW_AT_virtuality); member->hole = 0; } return member; } static struct parameter *parameter__new(Dwarf_Die *die, struct cu *cu) { struct parameter *parm = tag__alloc(cu, sizeof(*parm)); if (parm != NULL) { tag__init(&parm->tag, cu, die); parm->name = strings__add(strings, attr_string(die, DW_AT_name)); } return parm; } static struct inline_expansion *inline_expansion__new(Dwarf_Die *die, struct cu *cu) { struct inline_expansion *exp = tag__alloc(cu, sizeof(*exp)); if (exp != NULL) { struct dwarf_tag *dtag = exp->ip.tag.priv; tag__init(&exp->ip.tag, cu, die); dtag->decl_file = strings__add(strings, attr_string(die, DW_AT_call_file)); dtag->decl_line = attr_numeric(die, DW_AT_call_line); dtag->type = attr_type(die, DW_AT_abstract_origin); exp->ip.addr = 0; exp->high_pc = 0; if (!cu->has_addr_info) goto out; if (dwarf_lowpc(die, &exp->ip.addr)) exp->ip.addr = 0; if (dwarf_lowpc(die, &exp->high_pc)) exp->high_pc = 0; exp->size = exp->high_pc - exp->ip.addr; if (exp->size == 0) { Dwarf_Addr base, start; ptrdiff_t offset = 0; while (1) { offset = dwarf_ranges(die, offset, &base, &start, &exp->high_pc); start = (unsigned long)start; exp->high_pc = (unsigned long)exp->high_pc; if (offset <= 0) break; exp->size += exp->high_pc - start; if (exp->ip.addr == 0) exp->ip.addr = start; } } } out: return exp; } static struct label *label__new(Dwarf_Die *die, struct cu *cu) { struct label *label = tag__alloc(cu, sizeof(*label)); if (label != NULL) { tag__init(&label->ip.tag, cu, die); label->name = strings__add(strings, attr_string(die, DW_AT_name)); if (!cu->has_addr_info || dwarf_lowpc(die, &label->ip.addr)) label->ip.addr = 0; } return label; } static struct class *class__new(Dwarf_Die *die, struct cu *cu) { struct class *class = tag__alloc_with_spec(cu, sizeof(*class)); if (class != NULL) { type__init(&class->type, die, cu); INIT_LIST_HEAD(&class->vtable); class->nr_vtable_entries = class->nr_holes = class->nr_bit_holes = class->padding = class->bit_padding = 0; class->priv = NULL; } return class; } static void lexblock__init(struct lexblock *block, struct cu *cu, Dwarf_Die *die) { Dwarf_Off high_pc; if (!cu->has_addr_info || dwarf_lowpc(die, &block->ip.addr)) { block->ip.addr = 0; block->size = 0; } else if (dwarf_highpc(die, &high_pc)) block->size = 0; else block->size = high_pc - block->ip.addr; INIT_LIST_HEAD(&block->tags); block->size_inline_expansions = block->nr_inline_expansions = block->nr_labels = block->nr_lexblocks = block->nr_variables = 0; } static struct lexblock *lexblock__new(Dwarf_Die *die, struct cu *cu) { struct lexblock *block = tag__alloc(cu, sizeof(*block)); if (block != NULL) { tag__init(&block->ip.tag, cu, die); lexblock__init(block, cu, die); } return block; } static void ftype__init(struct ftype *ftype, Dwarf_Die *die, struct cu *cu) { const uint16_t tag = dwarf_tag(die); assert(tag == DW_TAG_subprogram || tag == DW_TAG_subroutine_type); tag__init(&ftype->tag, cu, die); INIT_LIST_HEAD(&ftype->parms); ftype->nr_parms = 0; ftype->unspec_parms = 0; } static struct ftype *ftype__new(Dwarf_Die *die, struct cu *cu) { struct ftype *ftype = tag__alloc(cu, sizeof(*ftype)); if (ftype != NULL) ftype__init(ftype, die, cu); return ftype; } static struct function *function__new(Dwarf_Die *die, struct cu *cu) { struct function *func = tag__alloc_with_spec(cu, sizeof(*func)); if (func != NULL) { ftype__init(&func->proto, die, cu); lexblock__init(&func->lexblock, cu, die); func->name = strings__add(strings, attr_string(die, DW_AT_name)); func->linkage_name = strings__add(strings, attr_string(die, DW_AT_MIPS_linkage_name)); func->inlined = attr_numeric(die, DW_AT_inline); func->external = dwarf_hasattr(die, DW_AT_external); func->abstract_origin = dwarf_hasattr(die, DW_AT_abstract_origin); dwarf_tag__set_spec(func->proto.tag.priv, attr_type(die, DW_AT_specification)); func->accessibility = attr_numeric(die, DW_AT_accessibility); func->virtuality = attr_numeric(die, DW_AT_virtuality); INIT_LIST_HEAD(&func->vtable_node); INIT_LIST_HEAD(&func->tool_node); func->vtable_entry = -1; if (dwarf_hasattr(die, DW_AT_vtable_elem_location)) func->vtable_entry = attr_offset(die, DW_AT_vtable_elem_location); func->cu_total_size_inline_expansions = 0; func->cu_total_nr_inline_expansions = 0; func->priv = NULL; } return func; } static uint64_t attr_upper_bound(Dwarf_Die *die) { Dwarf_Attribute attr; if (dwarf_attr(die, DW_AT_upper_bound, &attr) != NULL) { Dwarf_Word num; if (dwarf_formudata(&attr, &num) == 0) { return (uintmax_t)num + 1; } } else if (dwarf_attr(die, DW_AT_count, &attr) != NULL) { Dwarf_Word num; if (dwarf_formudata(&attr, &num) == 0) { return (uintmax_t)num; } } return 0; } static void __cu__tag_not_handled(Dwarf_Die *die, const char *fn) { uint32_t tag = dwarf_tag(die); fprintf(stderr, "%s: DW_TAG_%s (%#x) @ <%#llx> not handled!\n", fn, dwarf_tag_name(tag), tag, (unsigned long long)dwarf_dieoffset(die)); } static struct tag unsupported_tag; #define cu__tag_not_handled(die) __cu__tag_not_handled(die, __FUNCTION__) static struct tag *__die__process_tag(Dwarf_Die *die, struct cu *cu, int toplevel, const char *fn); #define die__process_tag(die, cu, toplevel) \ __die__process_tag(die, cu, toplevel, __FUNCTION__) static struct tag *die__create_new_tag(Dwarf_Die *die, struct cu *cu) { struct tag *tag = tag__new(die, cu); if (tag != NULL) { if (dwarf_haschildren(die)) fprintf(stderr, "%s: %s WITH children!\n", __func__, dwarf_tag_name(tag->tag)); } return tag; } static struct tag *die__create_new_ptr_to_member_type(Dwarf_Die *die, struct cu *cu) { struct ptr_to_member_type *ptr = ptr_to_member_type__new(die, cu); return ptr ? &ptr->tag : NULL; } static int die__process_class(Dwarf_Die *die, struct type *class, struct cu *cu); static struct tag *die__create_new_class(Dwarf_Die *die, struct cu *cu) { Dwarf_Die child; struct class *class = class__new(die, cu); if (class != NULL && dwarf_haschildren(die) != 0 && dwarf_child(die, &child) == 0) { if (die__process_class(&child, &class->type, cu) != 0) { class__delete(class, cu); class = NULL; } } return class ? &class->type.namespace.tag : NULL; } static int die__process_namespace(Dwarf_Die *die, struct namespace *namespace, struct cu *cu); static struct tag *die__create_new_namespace(Dwarf_Die *die, struct cu *cu) { Dwarf_Die child; struct namespace *namespace = namespace__new(die, cu); if (namespace != NULL && dwarf_haschildren(die) != 0 && dwarf_child(die, &child) == 0) { if (die__process_namespace(&child, namespace, cu) != 0) { namespace__delete(namespace, cu); namespace = NULL; } } return namespace ? &namespace->tag : NULL; } static struct tag *die__create_new_union(Dwarf_Die *die, struct cu *cu) { Dwarf_Die child; struct type *utype = type__new(die, cu); if (utype != NULL && dwarf_haschildren(die) != 0 && dwarf_child(die, &child) == 0) { if (die__process_class(&child, utype, cu) != 0) { type__delete(utype, cu); utype = NULL; } } return utype ? &utype->namespace.tag : NULL; } static struct tag *die__create_new_base_type(Dwarf_Die *die, struct cu *cu) { struct base_type *base = base_type__new(die, cu); if (base == NULL) return NULL; if (dwarf_haschildren(die)) fprintf(stderr, "%s: DW_TAG_base_type WITH children!\n", __func__); return &base->tag; } static struct tag *die__create_new_typedef(Dwarf_Die *die, struct cu *cu) { struct type *tdef = type__new(die, cu); if (tdef == NULL) return NULL; if (dwarf_haschildren(die)) { struct dwarf_tag *dtag = tdef->namespace.tag.priv; fprintf(stderr, "%s: DW_TAG_typedef %llx WITH children!\n", __func__, (unsigned long long)dtag->id); } return &tdef->namespace.tag; } static struct tag *die__create_new_array(Dwarf_Die *die, struct cu *cu) { Dwarf_Die child; /* "64 dimensions will be enough for everybody." acme, 2006 */ const uint8_t max_dimensions = 64; uint32_t nr_entries[max_dimensions]; struct array_type *array = array_type__new(die, cu); if (array == NULL) return NULL; if (!dwarf_haschildren(die) || dwarf_child(die, &child) != 0) return &array->tag; die = &child; do { if (dwarf_tag(die) == DW_TAG_subrange_type) { nr_entries[array->dimensions++] = attr_upper_bound(die); if (array->dimensions == max_dimensions) { fprintf(stderr, "%s: only %u dimensions are " "supported!\n", __FUNCTION__, max_dimensions); break; } } else cu__tag_not_handled(die); } while (dwarf_siblingof(die, die) == 0); array->nr_entries = memdup(nr_entries, array->dimensions * sizeof(uint32_t), cu); if (array->nr_entries == NULL) goto out_free; return &array->tag; out_free: obstack_free(&cu->obstack, array); return NULL; } static struct tag *die__create_new_parameter(Dwarf_Die *die, struct ftype *ftype, struct lexblock *lexblock, struct cu *cu) { struct parameter *parm = parameter__new(die, cu); if (parm == NULL) return NULL; if (ftype != NULL) ftype__add_parameter(ftype, parm); else { /* * DW_TAG_formal_parameters on a non DW_TAG_subprogram nor * DW_TAG_subroutine_type tag happens sometimes, likely due to * compiler optimizing away a inline expansion (at least this * was observed in some cases, such as in the Linux kernel * current_kernel_time function circa 2.6.20-rc5), keep it in * the lexblock tag list because it can be referenced as an * DW_AT_abstract_origin in another DW_TAG_formal_parameter. */ lexblock__add_tag(lexblock, &parm->tag); } return &parm->tag; } static struct tag *die__create_new_label(Dwarf_Die *die, struct lexblock *lexblock, struct cu *cu) { struct label *label = label__new(die, cu); if (label == NULL) return NULL; lexblock__add_label(lexblock, label); return &label->ip.tag; } static struct tag *die__create_new_variable(Dwarf_Die *die, struct cu *cu) { struct variable *var = variable__new(die, cu); return var ? &var->ip.tag : NULL; } static struct tag *die__create_new_subroutine_type(Dwarf_Die *die, struct cu *cu) { Dwarf_Die child; struct ftype *ftype = ftype__new(die, cu); struct tag *tag; if (ftype == NULL) return NULL; if (!dwarf_haschildren(die) || dwarf_child(die, &child) != 0) goto out; die = &child; do { uint32_t id; switch (dwarf_tag(die)) { case DW_TAG_formal_parameter: tag = die__create_new_parameter(die, ftype, NULL, cu); break; case DW_TAG_unspecified_parameters: ftype->unspec_parms = 1; continue; default: tag = die__process_tag(die, cu, 0); if (tag == NULL) goto out_delete; if (cu__add_tag(cu, tag, &id) < 0) goto out_delete_tag; goto hash; } if (tag == NULL) goto out_delete; if (cu__table_add_tag(cu, tag, &id) < 0) goto out_delete_tag; hash: cu__hash(cu, tag); struct dwarf_tag *dtag = tag->priv; dtag->small_id = id; } while (dwarf_siblingof(die, die) == 0); out: return &ftype->tag; out_delete_tag: tag__delete(tag, cu); out_delete: ftype__delete(ftype, cu); return NULL; } static struct tag *die__create_new_enumeration(Dwarf_Die *die, struct cu *cu) { Dwarf_Die child; struct type *enumeration = type__new(die, cu); if (enumeration == NULL) return NULL; if (enumeration->size == 0) enumeration->size = sizeof(int) * 8; else enumeration->size *= 8; if (!dwarf_haschildren(die) || dwarf_child(die, &child) != 0) { /* Seen on libQtCore.so.4.3.4.debug, * class QAbstractFileEngineIterator, enum EntryInfoType */ goto out; } die = &child; do { struct enumerator *enumerator; if (dwarf_tag(die) != DW_TAG_enumerator) { cu__tag_not_handled(die); continue; } enumerator = enumerator__new(die, cu); if (enumerator == NULL) goto out_delete; enumeration__add(enumeration, enumerator); } while (dwarf_siblingof(die, die) == 0); out: return &enumeration->namespace.tag; out_delete: enumeration__delete(enumeration, cu); return NULL; } static int die__process_class(Dwarf_Die *die, struct type *class, struct cu *cu) { const bool is_union = tag__is_union(&class->namespace.tag); do { switch (dwarf_tag(die)) { #ifdef STB_GNU_UNIQUE case DW_TAG_GNU_formal_parameter_pack: case DW_TAG_GNU_template_parameter_pack: case DW_TAG_GNU_template_template_param: #endif case DW_TAG_template_type_parameter: case DW_TAG_template_value_parameter: /* * FIXME: probably we'll have to attach this as a list of * template parameters to use at class__fprintf time... * * See: * https://gcc.gnu.org/wiki/TemplateParmsDwarf */ tag__print_not_supported(dwarf_tag(die)); continue; case DW_TAG_inheritance: case DW_TAG_member: { struct class_member *member = class_member__new(die, cu, is_union); if (member == NULL) return -ENOMEM; if (cu__is_c_plus_plus(cu)) { uint32_t id; if (cu__table_add_tag(cu, &member->tag, &id) < 0) { class_member__delete(member, cu); return -ENOMEM; } struct dwarf_tag *dtag = member->tag.priv; dtag->small_id = id; } type__add_member(class, member); cu__hash(cu, &member->tag); } continue; default: { struct tag *tag = die__process_tag(die, cu, 0); if (tag == NULL) return -ENOMEM; uint32_t id; if (cu__table_add_tag(cu, tag, &id) < 0) { tag__delete(tag, cu); return -ENOMEM; } struct dwarf_tag *dtag = tag->priv; dtag->small_id = id; namespace__add_tag(&class->namespace, tag); cu__hash(cu, tag); if (tag__is_function(tag)) { struct function *fself = tag__function(tag); if (fself->vtable_entry != -1) class__add_vtable_entry(type__class(class), fself); } continue; } } } while (dwarf_siblingof(die, die) == 0); return 0; } static int die__process_namespace(Dwarf_Die *die, struct namespace *namespace, struct cu *cu) { struct tag *tag; do { tag = die__process_tag(die, cu, 0); if (tag == NULL) goto out_enomem; uint32_t id; if (cu__table_add_tag(cu, tag, &id) < 0) goto out_delete_tag; struct dwarf_tag *dtag = tag->priv; dtag->small_id = id; namespace__add_tag(namespace, tag); cu__hash(cu, tag); } while (dwarf_siblingof(die, die) == 0); return 0; out_delete_tag: tag__delete(tag, cu); out_enomem: return -ENOMEM; } static int die__process_function(Dwarf_Die *die, struct ftype *ftype, struct lexblock *lexblock, struct cu *cu); static int die__create_new_lexblock(Dwarf_Die *die, struct cu *cu, struct lexblock *father) { struct lexblock *lexblock = lexblock__new(die, cu); if (lexblock != NULL) { if (die__process_function(die, NULL, lexblock, cu) != 0) goto out_delete; } if (father != NULL) lexblock__add_lexblock(father, lexblock); return 0; out_delete: lexblock__delete(lexblock, cu); return -ENOMEM; } static struct tag *die__create_new_inline_expansion(Dwarf_Die *die, struct lexblock *lexblock, struct cu *cu); static int die__process_inline_expansion(Dwarf_Die *die, struct lexblock *lexblock, struct cu *cu) { Dwarf_Die child; struct tag *tag; if (!dwarf_haschildren(die) || dwarf_child(die, &child) != 0) return 0; die = &child; do { uint32_t id; switch (dwarf_tag(die)) { case DW_TAG_GNU_call_site: case DW_TAG_GNU_call_site_parameter: /* * FIXME: read http://www.dwarfstd.org/ShowIssue.php?issue=100909.2&type=open * and write proper support. * * From a quick read there is not much we can use in * the existing dwarves tools, so just stop warning the user, * developers will find these notes if wanting to use in a * new tool. */ continue; case DW_TAG_lexical_block: if (die__create_new_lexblock(die, cu, lexblock) != 0) goto out_enomem; continue; case DW_TAG_formal_parameter: /* * FIXME: * So far DW_TAG_inline_routine had just an * abstract origin, but starting with * /usr/lib/openoffice.org/basis3.0/program/libdbalx.so * I realized it really has to be handled as a * DW_TAG_function... Lets just get the types * for 1.8, then fix this properly. * * cu__tag_not_handled(die); */ continue; case DW_TAG_inlined_subroutine: tag = die__create_new_inline_expansion(die, lexblock, cu); break; case DW_TAG_label: tag = die__create_new_label(die, lexblock, cu); break; default: tag = die__process_tag(die, cu, 0); if (tag == NULL) goto out_enomem; if (tag == &unsupported_tag) continue; if (cu__add_tag(cu, tag, &id) < 0) goto out_delete_tag; goto hash; } if (tag == NULL) goto out_enomem; if (cu__table_add_tag(cu, tag, &id) < 0) goto out_delete_tag; hash: cu__hash(cu, tag); struct dwarf_tag *dtag = tag->priv; dtag->small_id = id; } while (dwarf_siblingof(die, die) == 0); return 0; out_delete_tag: tag__delete(tag, cu); out_enomem: return -ENOMEM; } static struct tag *die__create_new_inline_expansion(Dwarf_Die *die, struct lexblock *lexblock, struct cu *cu) { struct inline_expansion *exp = inline_expansion__new(die, cu); if (exp == NULL) return NULL; if (die__process_inline_expansion(die, lexblock, cu) != 0) { obstack_free(&cu->obstack, exp); return NULL; } if (lexblock != NULL) lexblock__add_inline_expansion(lexblock, exp); return &exp->ip.tag; } static int die__process_function(Dwarf_Die *die, struct ftype *ftype, struct lexblock *lexblock, struct cu *cu) { Dwarf_Die child; struct tag *tag; if (!dwarf_haschildren(die) || dwarf_child(die, &child) != 0) return 0; die = &child; do { uint32_t id; switch (dwarf_tag(die)) { case DW_TAG_GNU_call_site: case DW_TAG_GNU_call_site_parameter: /* * XXX: read http://www.dwarfstd.org/ShowIssue.php?issue=100909.2&type=open * and write proper support. * * From a quick read there is not much we can use in * the existing dwarves tools, so just stop warning the user, * developers will find these notes if wanting to use in a * new tool. */ continue; case DW_TAG_dwarf_procedure: /* * Ignore it, just scope expressions, that we have no use for (so far). */ continue; #ifdef STB_GNU_UNIQUE case DW_TAG_GNU_formal_parameter_pack: case DW_TAG_GNU_template_parameter_pack: case DW_TAG_GNU_template_template_param: #endif case DW_TAG_template_type_parameter: case DW_TAG_template_value_parameter: /* FIXME: probably we'll have to attach this as a list of * template parameters to use at class__fprintf time... * See die__process_class */ tag__print_not_supported(dwarf_tag(die)); continue; case DW_TAG_formal_parameter: tag = die__create_new_parameter(die, ftype, lexblock, cu); break; case DW_TAG_variable: tag = die__create_new_variable(die, cu); if (tag == NULL) goto out_enomem; lexblock__add_variable(lexblock, tag__variable(tag)); break; case DW_TAG_unspecified_parameters: if (ftype != NULL) ftype->unspec_parms = 1; continue; case DW_TAG_label: tag = die__create_new_label(die, lexblock, cu); break; case DW_TAG_inlined_subroutine: tag = die__create_new_inline_expansion(die, lexblock, cu); break; case DW_TAG_lexical_block: if (die__create_new_lexblock(die, cu, lexblock) != 0) goto out_enomem; continue; default: tag = die__process_tag(die, cu, 0); if (tag == NULL) goto out_enomem; if (tag == &unsupported_tag) continue; if (cu__add_tag(cu, tag, &id) < 0) goto out_delete_tag; goto hash; } if (tag == NULL) goto out_enomem; if (cu__table_add_tag(cu, tag, &id) < 0) goto out_delete_tag; hash: cu__hash(cu, tag); struct dwarf_tag *dtag = tag->priv; dtag->small_id = id; } while (dwarf_siblingof(die, die) == 0); return 0; out_delete_tag: tag__delete(tag, cu); out_enomem: return -ENOMEM; } static struct tag *die__create_new_function(Dwarf_Die *die, struct cu *cu) { struct function *function = function__new(die, cu); if (function != NULL && die__process_function(die, &function->proto, &function->lexblock, cu) != 0) { function__delete(function, cu); function = NULL; } return function ? &function->proto.tag : NULL; } static struct tag *__die__process_tag(Dwarf_Die *die, struct cu *cu, int top_level, const char *fn) { struct tag *tag; switch (dwarf_tag(die)) { case DW_TAG_array_type: tag = die__create_new_array(die, cu); break; case DW_TAG_base_type: tag = die__create_new_base_type(die, cu); break; case DW_TAG_const_type: case DW_TAG_imported_declaration: case DW_TAG_imported_module: case DW_TAG_pointer_type: case DW_TAG_reference_type: case DW_TAG_restrict_type: case DW_TAG_unspecified_type: case DW_TAG_volatile_type: tag = die__create_new_tag(die, cu); break; case DW_TAG_ptr_to_member_type: tag = die__create_new_ptr_to_member_type(die, cu); break; case DW_TAG_enumeration_type: tag = die__create_new_enumeration(die, cu); break; case DW_TAG_namespace: tag = die__create_new_namespace(die, cu); break; case DW_TAG_class_type: case DW_TAG_interface_type: case DW_TAG_structure_type: tag = die__create_new_class(die, cu); break; case DW_TAG_subprogram: tag = die__create_new_function(die, cu); break; case DW_TAG_subroutine_type: tag = die__create_new_subroutine_type(die, cu); break; case DW_TAG_rvalue_reference_type: case DW_TAG_typedef: tag = die__create_new_typedef(die, cu); break; case DW_TAG_union_type: tag = die__create_new_union(die, cu); break; case DW_TAG_variable: tag = die__create_new_variable(die, cu); break; default: __cu__tag_not_handled(die, fn); /* fall thru */ case DW_TAG_dwarf_procedure: /* * Ignore it, just scope expressions, that we have no use for (so far). */ tag = &unsupported_tag; break; } if (tag != NULL) tag->top_level = top_level; return tag; } static int die__process_unit(Dwarf_Die *die, struct cu *cu) { do { struct tag *tag = die__process_tag(die, cu, 1); if (tag == NULL) return -ENOMEM; if (tag == &unsupported_tag) continue; uint32_t id; cu__add_tag(cu, tag, &id); cu__hash(cu, tag); struct dwarf_tag *dtag = tag->priv; dtag->small_id = id; } while (dwarf_siblingof(die, die) == 0); return 0; } static void __tag__print_type_not_found(struct tag *tag, const char *func) { struct dwarf_tag *dtag = tag->priv; fprintf(stderr, "%s: couldn't find %#llx type for %#llx (%s)!\n", func, (unsigned long long)dtag->type.off, (unsigned long long)dtag->id, dwarf_tag_name(tag->tag)); } #define tag__print_type_not_found(tag) \ __tag__print_type_not_found(tag, __func__) static void ftype__recode_dwarf_types(struct tag *tag, struct cu *cu); static int namespace__recode_dwarf_types(struct tag *tag, struct cu *cu) { struct tag *pos; struct dwarf_cu *dcu = cu->priv; struct namespace *ns = tag__namespace(tag); namespace__for_each_tag(ns, pos) { struct dwarf_tag *dtype; struct dwarf_tag *dpos = pos->priv; if (tag__has_namespace(pos)) { if (namespace__recode_dwarf_types(pos, cu)) return -1; continue; } switch (pos->tag) { case DW_TAG_member: { struct class_member *member = tag__class_member(pos); /* * We may need to recode the type, possibly creating a * suitably sized new base_type */ if (member->bitfield_size != 0 && !no_bitfield_type_recode) { if (class_member__dwarf_recode_bitfield(member, cu)) return -1; continue; } } break; case DW_TAG_subroutine_type: case DW_TAG_subprogram: ftype__recode_dwarf_types(pos, cu); break; case DW_TAG_imported_module: dtype = dwarf_cu__find_tag_by_ref(dcu, &dpos->type); goto check_type; /* Can be for both types and non types */ case DW_TAG_imported_declaration: dtype = dwarf_cu__find_tag_by_ref(dcu, &dpos->type); if (dtype != NULL) goto next; goto find_type; } if (dpos->type.off == 0) /* void */ continue; find_type: dtype = dwarf_cu__find_type_by_ref(dcu, &dpos->type); check_type: if (dtype == NULL) { tag__print_type_not_found(pos); continue; } next: pos->type = dtype->small_id; } return 0; } static void type__recode_dwarf_specification(struct tag *tag, struct cu *cu) { struct dwarf_tag *dtype; struct type *t = tag__type(tag); dwarf_off_ref specification = dwarf_tag__spec(tag->priv); if (t->namespace.name != 0 || specification.off == 0) return; dtype = dwarf_cu__find_type_by_ref(cu->priv, &specification); if (dtype != NULL) t->namespace.name = tag__namespace(dtype->tag)->name; else { struct dwarf_tag *dtag = tag->priv; fprintf(stderr, "%s: couldn't find name for " "class %#llx, specification=%#llx\n", __func__, (unsigned long long)dtag->id, (unsigned long long)specification.off); } } static void __tag__print_abstract_origin_not_found(struct tag *tag, const char *func) { struct dwarf_tag *dtag = tag->priv; fprintf(stderr, "%s: couldn't find %#llx abstract_origin for %#llx (%s)!\n", func, (unsigned long long)dtag->abstract_origin.off, (unsigned long long)dtag->id, dwarf_tag_name(tag->tag)); } #define tag__print_abstract_origin_not_found(tag ) \ __tag__print_abstract_origin_not_found(tag, __func__) static void ftype__recode_dwarf_types(struct tag *tag, struct cu *cu) { struct parameter *pos; struct dwarf_cu *dcu = cu->priv; struct ftype *type = tag__ftype(tag); ftype__for_each_parameter(type, pos) { struct dwarf_tag *dpos = pos->tag.priv; struct dwarf_tag *dtype; if (dpos->type.off == 0) { if (dpos->abstract_origin.off == 0) { /* Function without parameters */ pos->tag.type = 0; continue; } dtype = dwarf_cu__find_tag_by_ref(dcu, &dpos->abstract_origin); if (dtype == NULL) { tag__print_abstract_origin_not_found(&pos->tag); continue; } pos->name = tag__parameter(dtype->tag)->name; pos->tag.type = dtype->tag->type; continue; } dtype = dwarf_cu__find_type_by_ref(dcu, &dpos->type); if (dtype == NULL) { tag__print_type_not_found(&pos->tag); continue; } pos->tag.type = dtype->small_id; } } static void lexblock__recode_dwarf_types(struct lexblock *tag, struct cu *cu) { struct tag *pos; struct dwarf_cu *dcu = cu->priv; list_for_each_entry(pos, &tag->tags, node) { struct dwarf_tag *dpos = pos->priv; struct dwarf_tag *dtype; switch (pos->tag) { case DW_TAG_lexical_block: lexblock__recode_dwarf_types(tag__lexblock(pos), cu); continue; case DW_TAG_inlined_subroutine: dtype = dwarf_cu__find_tag_by_ref(dcu, &dpos->type); if (dtype == NULL) { tag__print_type_not_found(pos); continue; } ftype__recode_dwarf_types(dtype->tag, cu); continue; case DW_TAG_formal_parameter: if (dpos->type.off != 0) break; struct parameter *fp = tag__parameter(pos); dtype = dwarf_cu__find_tag_by_ref(dcu, &dpos->abstract_origin); if (dtype == NULL) { tag__print_abstract_origin_not_found(pos); continue; } fp->name = tag__parameter(dtype->tag)->name; pos->type = dtype->tag->type; continue; case DW_TAG_variable: if (dpos->type.off != 0) break; struct variable *var = tag__variable(pos); if (dpos->abstract_origin.off == 0) { /* * DW_TAG_variable completely empty was * found on libQtGui.so.4.3.4.debug * <3>: Abbrev Number: 164 (DW_TAG_variable) */ continue; } dtype = dwarf_cu__find_tag_by_ref(dcu, &dpos->abstract_origin); if (dtype == NULL) { tag__print_abstract_origin_not_found(pos); continue; } var->name = tag__variable(dtype->tag)->name; pos->type = dtype->tag->type; continue; case DW_TAG_label: { struct label *l = tag__label(pos); if (dpos->abstract_origin.off == 0) continue; dtype = dwarf_cu__find_tag_by_ref(dcu, &dpos->abstract_origin); if (dtype != NULL) l->name = tag__label(dtype->tag)->name; else tag__print_abstract_origin_not_found(pos); } continue; } dtype = dwarf_cu__find_type_by_ref(dcu, &dpos->type); if (dtype == NULL) { tag__print_type_not_found(pos); continue; } pos->type = dtype->small_id; } } static int tag__recode_dwarf_type(struct tag *tag, struct cu *cu) { struct dwarf_tag *dtag = tag->priv; struct dwarf_tag *dtype; /* Check if this is an already recoded bitfield */ if (dtag == NULL) return 0; if (tag__is_type(tag)) type__recode_dwarf_specification(tag, cu); if (tag__has_namespace(tag)) return namespace__recode_dwarf_types(tag, cu); switch (tag->tag) { case DW_TAG_subprogram: { struct function *fn = tag__function(tag); if (fn->name == 0) { dwarf_off_ref specification = dwarf_tag__spec(dtag); if (dtag->abstract_origin.off == 0 && specification.off == 0) { /* * Found on libQtGui.so.4.3.4.debug * <3><1423de>: Abbrev Number: 209 (DW_TAG_subprogram) * <1423e0> DW_AT_declaration : 1 */ return 0; } dtype = dwarf_cu__find_tag_by_ref(cu->priv, &dtag->abstract_origin); if (dtype == NULL) dtype = dwarf_cu__find_tag_by_ref(cu->priv, &specification); if (dtype != NULL) fn->name = tag__function(dtype->tag)->name; else { fprintf(stderr, "%s: couldn't find name for " "function %#llx, abstract_origin=%#llx," " specification=%#llx\n", __func__, (unsigned long long)dtag->id, (unsigned long long)dtag->abstract_origin.off, (unsigned long long)specification.off); } } lexblock__recode_dwarf_types(&fn->lexblock, cu); } /* Fall thru */ case DW_TAG_subroutine_type: ftype__recode_dwarf_types(tag, cu); /* Fall thru, for the function return type */ break; case DW_TAG_lexical_block: lexblock__recode_dwarf_types(tag__lexblock(tag), cu); return 0; case DW_TAG_ptr_to_member_type: { struct ptr_to_member_type *pt = tag__ptr_to_member_type(tag); dtype = dwarf_cu__find_type_by_ref(cu->priv, &dtag->containing_type); if (dtype != NULL) pt->containing_type = dtype->small_id; else { fprintf(stderr, "%s: couldn't find type for " "containing_type %#llx, containing_type=%#llx\n", __func__, (unsigned long long)dtag->id, (unsigned long long)dtag->containing_type.off); } } break; case DW_TAG_namespace: return namespace__recode_dwarf_types(tag, cu); /* Damn, DW_TAG_inlined_subroutine is an special case as dwarf_tag->id is in fact an abtract origin, i.e. must be looked up in the tags_table, not in the types_table. The others also point to routines, so are in tags_table */ case DW_TAG_inlined_subroutine: case DW_TAG_imported_module: dtype = dwarf_cu__find_tag_by_ref(cu->priv, &dtag->type); goto check_type; /* Can be for both types and non types */ case DW_TAG_imported_declaration: dtype = dwarf_cu__find_tag_by_ref(cu->priv, &dtag->type); if (dtype != NULL) goto out; goto find_type; } if (dtag->type.off == 0) { tag->type = 0; /* void */ return 0; } find_type: dtype = dwarf_cu__find_type_by_ref(cu->priv, &dtag->type); check_type: if (dtype == NULL) { tag__print_type_not_found(tag); return 0; } out: tag->type = dtype->small_id; return 0; } static int cu__recode_dwarf_types_table(struct cu *cu, struct ptr_table *pt, uint32_t i) { for (; i < pt->nr_entries; ++i) { struct tag *tag = pt->entries[i]; if (tag != NULL) /* void, see cu__new */ if (tag__recode_dwarf_type(tag, cu)) return -1; } return 0; } static int cu__recode_dwarf_types(struct cu *cu) { if (cu__recode_dwarf_types_table(cu, &cu->types_table, 1) || cu__recode_dwarf_types_table(cu, &cu->tags_table, 0) || cu__recode_dwarf_types_table(cu, &cu->functions_table, 0)) return -1; return 0; } static const char *dwarf_tag__decl_file(const struct tag *tag, const struct cu *cu) { struct dwarf_tag *dtag = tag->priv; return cu->extra_dbg_info ? strings__ptr(strings, dtag->decl_file) : NULL; } static uint32_t dwarf_tag__decl_line(const struct tag *tag, const struct cu *cu) { struct dwarf_tag *dtag = tag->priv; return cu->extra_dbg_info ? dtag->decl_line : 0; } static unsigned long long dwarf_tag__orig_id(const struct tag *tag, const struct cu *cu) { struct dwarf_tag *dtag = tag->priv; return cu->extra_dbg_info ? dtag->id : 0; } static const char *dwarf__strings_ptr(const struct cu *cu __unused, strings_t s) { return strings__ptr(strings, s); } struct debug_fmt_ops dwarf__ops; static int die__process(Dwarf_Die *die, struct cu *cu) { Dwarf_Die child; const uint16_t tag = dwarf_tag(die); if (tag != DW_TAG_compile_unit && tag != DW_TAG_type_unit && tag != DW_TAG_partial_unit) { fprintf(stderr, "%s: DW_TAG_compile_unit, DW_TAG_type_unit or DW_TAG_partial_unit expected got %s!\n", __FUNCTION__, dwarf_tag_name(tag)); return -EINVAL; } cu->language = attr_numeric(die, DW_AT_language); if (dwarf_child(die, &child) == 0) { int err = die__process_unit(&child, cu); if (err) return err; } if (dwarf_siblingof(die, die) == 0) fprintf(stderr, "%s: got %s unexpected tag after " "DW_TAG_compile_unit!\n", __FUNCTION__, dwarf_tag_name(tag)); return 0; } static int die__process_and_recode(Dwarf_Die *die, struct cu *cu) { int ret = die__process(die, cu); if (ret != 0) return ret; return cu__recode_dwarf_types(cu); } static int class_member__cache_byte_size(struct tag *tag, struct cu *cu, void *cookie) { struct class_member *member = tag__class_member(tag); struct conf_load *conf_load = cookie; if (tag__is_class_member(tag)) { if (member->is_static) return 0; } else if (tag->tag != DW_TAG_inheritance) { return 0; } if (member->bitfield_size == 0) { member->byte_size = tag__size(tag, cu); member->bit_size = member->byte_size * 8; return 0; } /* * Try to figure out byte size, if it's not directly provided in DWARF */ if (member->byte_size == 0) { struct tag *type = tag__strip_typedefs_and_modifiers(&member->tag, cu); member->byte_size = tag__size(type, cu); if (member->byte_size == 0) { int bit_size; if (tag__is_enumeration(type)) { bit_size = tag__type(type)->size; } else { struct base_type *bt = tag__base_type(type); bit_size = bt->bit_size ? bt->bit_size : base_type__name_to_size(bt, cu); } member->byte_size = (bit_size + 7) / 8 * 8; } } member->bit_size = member->byte_size * 8; /* * XXX: after all the attemps to determine byte size, we might still * be unsuccessful, because base_type__name_to_size doesn't know about * the base_type name, so one has to add there when such base_type * isn't found. pahole will put zero on the struct output so it should * be easy to spot the name when such unlikely thing happens. */ if (member->byte_size == 0) { member->bitfield_offset = 0; return 0; } /* * For little-endian architectures, DWARF data emitted by gcc/clang * specifies bitfield offset as an offset from the highest-order bit * of an underlying integral type (e.g., int) to a highest-order bit * of a bitfield. E.g., for bitfield taking first 5 bits of int-backed * bitfield, bit offset will be 27 (sizeof(int) - 0 offset - 5 bit * size), which is very counter-intuitive and isn't a natural * extension of byte offset, which on little-endian points to * lowest-order byte. So here we re-adjust bitfield offset to be an * offset from lowest-order bit of underlying integral type to * a lowest-order bit of a bitfield. This makes bitfield offset * a natural extension of byte offset for bitfields and is uniform * with how big-endian bit offsets work. */ if (cu->little_endian) { member->bitfield_offset = member->bit_size - member->bitfield_offset - member->bitfield_size; } member->bit_offset = member->byte_offset * 8 + member->bitfield_offset; /* make sure bitfield offset is non-negative */ if (member->bitfield_offset < 0) { member->bitfield_offset += member->bit_size; member->byte_offset -= member->byte_size; member->bit_offset = member->byte_offset * 8 + member->bitfield_offset; } /* align on underlying base type natural alignment boundary */ member->bitfield_offset += (member->byte_offset % member->byte_size) * 8; member->byte_offset = member->bit_offset / member->bit_size * member->bit_size / 8; if (member->bitfield_offset >= member->bit_size) { member->bitfield_offset -= member->bit_size; member->byte_offset += member->byte_size; } if (conf_load && conf_load->fixup_silly_bitfields && member->byte_size == 8 * member->bitfield_size) { member->bitfield_size = 0; member->bitfield_offset = 0; } return 0; } static int finalize_cu(struct cus *cus, struct cu *cu, struct dwarf_cu *dcu, struct conf_load *conf) { base_type_name_to_size_table__init(strings); cu__for_all_tags(cu, class_member__cache_byte_size, conf); if (conf && conf->steal) { return conf->steal(cu, conf); } return LSK__KEEPIT; } static int finalize_cu_immediately(struct cus *cus, struct cu *cu, struct dwarf_cu *dcu, struct conf_load *conf) { int lsk = finalize_cu(cus, cu, dcu, conf); switch (lsk) { case LSK__DELETE: cu__delete(cu); break; case LSK__STOP_LOADING: break; case LSK__KEEPIT: if (!cu->extra_dbg_info) obstack_free(&dcu->obstack, NULL); cus__add(cus, cu); break; } return lsk; } static int cus__load_debug_types(struct cus *cus, struct conf_load *conf, Dwfl_Module *mod, Dwarf *dw, Elf *elf, const char *filename, const unsigned char *build_id, int build_id_len, struct cu **cup, struct dwarf_cu *dcup) { Dwarf_Off off = 0, noff, type_off; size_t cuhl; uint8_t pointer_size, offset_size; uint64_t signature; *cup = NULL; while (dwarf_next_unit(dw, off, &noff, &cuhl, NULL, NULL, &pointer_size, &offset_size, &signature, &type_off) == 0) { if (*cup == NULL) { struct cu *cu; cu = cu__new("", pointer_size, build_id, build_id_len, filename); if (cu == NULL) { return DWARF_CB_ABORT; } cu->uses_global_strings = true; cu->elf = elf; cu->dwfl = mod; cu->extra_dbg_info = conf ? conf->extra_dbg_info : 0; cu->has_addr_info = conf ? conf->get_addr_info : 0; GElf_Ehdr ehdr; if (gelf_getehdr(elf, &ehdr) == NULL) { return DWARF_CB_ABORT; } cu->little_endian = ehdr.e_ident[EI_DATA] == ELFDATA2LSB; dwarf_cu__init(dcup); dcup->cu = cu; /* Funny hack. */ dcup->type_unit = dcup; cu->priv = dcup; cu->dfops = &dwarf__ops; *cup = cu; } Dwarf_Die die_mem; Dwarf_Die *cu_die = dwarf_offdie_types(dw, off + cuhl, &die_mem); if (die__process(cu_die, *cup) != 0) return DWARF_CB_ABORT; off = noff; } if (*cup != NULL && cu__recode_dwarf_types(*cup) != 0) return DWARF_CB_ABORT; return 0; } static int cus__load_module(struct cus *cus, struct conf_load *conf, Dwfl_Module *mod, Dwarf *dw, Elf *elf, const char *filename) { Dwarf_Off off = 0, noff; size_t cuhl; GElf_Addr vaddr; const unsigned char *build_id = NULL; uint8_t pointer_size, offset_size; #ifdef HAVE_DWFL_MODULE_BUILD_ID int build_id_len = dwfl_module_build_id(mod, &build_id, &vaddr); #else int build_id_len = 0; #endif struct cu *type_cu; struct dwarf_cu type_dcu; int type_lsk = LSK__KEEPIT; int res = cus__load_debug_types(cus, conf, mod, dw, elf, filename, build_id, build_id_len, &type_cu, &type_dcu); if (res != 0) { return res; } if (type_cu != NULL) { type_lsk = finalize_cu(cus, type_cu, &type_dcu, conf); if (type_lsk == LSK__KEEPIT) { cus__add(cus, type_cu); } } while (dwarf_nextcu(dw, off, &noff, &cuhl, NULL, &pointer_size, &offset_size) == 0) { Dwarf_Die die_mem; Dwarf_Die *cu_die = dwarf_offdie(dw, off + cuhl, &die_mem); /* * DW_AT_name in DW_TAG_compile_unit can be NULL, first * seen in: * /usr/libexec/gcc/x86_64-redhat-linux/4.3.2/ecj1.debug */ const char *name = attr_string(cu_die, DW_AT_name); struct cu *cu = cu__new(name ?: "", pointer_size, build_id, build_id_len, filename); if (cu == NULL) return DWARF_CB_ABORT; cu->uses_global_strings = true; cu->elf = elf; cu->dwfl = mod; cu->extra_dbg_info = conf ? conf->extra_dbg_info : 0; cu->has_addr_info = conf ? conf->get_addr_info : 0; GElf_Ehdr ehdr; if (gelf_getehdr(elf, &ehdr) == NULL) { return DWARF_CB_ABORT; } cu->little_endian = ehdr.e_ident[EI_DATA] == ELFDATA2LSB; struct dwarf_cu dcu; dwarf_cu__init(&dcu); dcu.cu = cu; dcu.type_unit = type_cu ? &type_dcu : NULL; cu->priv = &dcu; cu->dfops = &dwarf__ops; if (die__process_and_recode(cu_die, cu) != 0) return DWARF_CB_ABORT; if (finalize_cu_immediately(cus, cu, &dcu, conf) == LSK__STOP_LOADING) return DWARF_CB_ABORT; off = noff; } if (type_lsk == LSK__DELETE) cu__delete(type_cu); return DWARF_CB_OK; } struct process_dwflmod_parms { struct cus *cus; struct conf_load *conf; const char *filename; uint32_t nr_dwarf_sections_found; }; static int cus__process_dwflmod(Dwfl_Module *dwflmod, void **userdata __unused, const char *name __unused, Dwarf_Addr base __unused, void *arg) { struct process_dwflmod_parms *parms = arg; struct cus *cus = parms->cus; GElf_Addr dwflbias; /* * Does the relocation and saves the elf for later processing * by the stealer, such as pahole_stealer, so that it don't * have to create another Elf instance just to do things like * reading this ELF file symtab to do CTF encoding of the * DW_TAG_suprogram tags (functions). */ Elf *elf = dwfl_module_getelf(dwflmod, &dwflbias); Dwarf_Addr dwbias; Dwarf *dw = dwfl_module_getdwarf(dwflmod, &dwbias); int err = DWARF_CB_OK; if (dw != NULL) { ++parms->nr_dwarf_sections_found; err = cus__load_module(cus, parms->conf, dwflmod, dw, elf, parms->filename); } /* * XXX We will fall back to try finding other debugging * formats (CTF), so no point in telling this to the user * Use for debugging. * else * fprintf(stderr, * "%s: can't get debug context descriptor: %s\n", * __func__, dwfl_errmsg(-1)); */ return err; } static int cus__process_file(struct cus *cus, struct conf_load *conf, int fd, const char *filename) { /* Duplicate an fd for dwfl_report_offline to swallow. */ int dwfl_fd = dup(fd); if (dwfl_fd < 0) return -1; /* * Use libdwfl in a trivial way to open the libdw handle for us. * This takes care of applying relocations to DWARF data in ET_REL * files. */ static const Dwfl_Callbacks callbacks = { .section_address = dwfl_offline_section_address, .find_debuginfo = dwfl_standard_find_debuginfo, /* We use this table for core files too. */ .find_elf = dwfl_build_id_find_elf, }; Dwfl *dwfl = dwfl_begin(&callbacks); if (dwfl_report_offline(dwfl, filename, filename, dwfl_fd) == NULL) return -1; dwfl_report_end(dwfl, NULL, NULL); struct process_dwflmod_parms parms = { .cus = cus, .conf = conf, .filename = filename, .nr_dwarf_sections_found = 0, }; /* Process the one or more modules gleaned from this file. */ dwfl_getmodules(dwfl, cus__process_dwflmod, &parms, 0); dwfl_end(dwfl); return parms.nr_dwarf_sections_found ? 0 : -1; } static int dwarf__load_file(struct cus *cus, struct conf_load *conf, const char *filename) { int fd, err; elf_version(EV_CURRENT); fd = open(filename, O_RDONLY); if (fd == -1) return -1; err = cus__process_file(cus, conf, fd, filename); close(fd); return err; } static int dwarf__init(void) { strings = strings__new(); return strings != NULL ? 0 : -ENOMEM; } static void dwarf__exit(void) { strings__delete(strings); strings = NULL; } struct debug_fmt_ops dwarf__ops = { .name = "dwarf", .init = dwarf__init, .exit = dwarf__exit, .load_file = dwarf__load_file, .strings__ptr = dwarf__strings_ptr, .tag__decl_file = dwarf_tag__decl_file, .tag__decl_line = dwarf_tag__decl_line, .tag__orig_id = dwarf_tag__orig_id, .has_alignment_info = true, }; dwarves-dfsg-1.15/dwarves.c000066400000000000000000001446701350511416500156700ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only Copyright (C) 2006 Mandriva Conectiva S.A. Copyright (C) 2006 Arnaldo Carvalho de Melo Copyright (C) 2007 Red Hat Inc. Copyright (C) 2007 Arnaldo Carvalho de Melo */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "config.h" #include "list.h" #include "dwarves.h" #include "dutil.h" #include "strings.h" #include #define obstack_chunk_alloc malloc #define obstack_chunk_free free #define min(x, y) ((x) < (y) ? (x) : (y)) const char *cu__string(const struct cu *cu, strings_t s) { if (cu->dfops && cu->dfops->strings__ptr) return cu->dfops->strings__ptr(cu, s); return NULL; } static inline const char *s(const struct cu *cu, strings_t i) { return cu__string(cu, i); } int __tag__has_type_loop(const struct tag *tag, const struct tag *type, char *bf, size_t len, FILE *fp, const char *fn, int line) { char bbf[2048], *abf = bbf; if (type == NULL) return 0; if (tag->type == type->type) { int printed; if (bf != NULL) abf = bf; else len = sizeof(bbf); printed = snprintf(abf, len, "", fn, line, tag->type, dwarf_tag_name(tag->tag)); if (bf == NULL) printed = fprintf(fp ?: stderr, "%s\n", abf); return printed; } return 0; } static void lexblock__delete_tags(struct tag *tag, struct cu *cu) { struct lexblock *block = tag__lexblock(tag); struct tag *pos, *n; list_for_each_entry_safe_reverse(pos, n, &block->tags, node) { list_del_init(&pos->node); tag__delete(pos, cu); } } void lexblock__delete(struct lexblock *block, struct cu *cu) { lexblock__delete_tags(&block->ip.tag, cu); obstack_free(&cu->obstack, block); } void tag__delete(struct tag *tag, struct cu *cu) { assert(list_empty(&tag->node)); switch (tag->tag) { case DW_TAG_union_type: type__delete(tag__type(tag), cu); break; case DW_TAG_class_type: case DW_TAG_structure_type: class__delete(tag__class(tag), cu); break; case DW_TAG_enumeration_type: enumeration__delete(tag__type(tag), cu); break; case DW_TAG_subroutine_type: ftype__delete(tag__ftype(tag), cu); break; case DW_TAG_subprogram: function__delete(tag__function(tag), cu); break; case DW_TAG_lexical_block: lexblock__delete(tag__lexblock(tag), cu); break; default: obstack_free(&cu->obstack, tag); } } void tag__not_found_die(const char *file, int line, const char *func) { fprintf(stderr, "%s::%s(%d): tag not found, please report to " "acme@kernel.org\n", file, func, line); exit(1); } struct tag *tag__follow_typedef(const struct tag *tag, const struct cu *cu) { struct tag *type = cu__type(cu, tag->type); if (type != NULL && tag__is_typedef(type)) return tag__follow_typedef(type, cu); return type; } struct tag *tag__strip_typedefs_and_modifiers(const struct tag *tag, const struct cu *cu) { struct tag *type = cu__type(cu, tag->type); while (type != NULL && (tag__is_typedef(type) || tag__is_modifier(type))) type = cu__type(cu, type->type); return type; } size_t __tag__id_not_found_fprintf(FILE *fp, type_id_t id, const char *fn, int line) { return fprintf(fp, "\n", fn, line, id); } static struct base_type_name_to_size { const char *name; strings_t sname; size_t size; } base_type_name_to_size_table[] = { { .name = "unsigned", .size = 32, }, { .name = "signed int", .size = 32, }, { .name = "unsigned int", .size = 32, }, { .name = "int", .size = 32, }, { .name = "short unsigned int", .size = 16, }, { .name = "signed short", .size = 16, }, { .name = "unsigned short", .size = 16, }, { .name = "short int", .size = 16, }, { .name = "short", .size = 16, }, { .name = "char", .size = 8, }, { .name = "signed char", .size = 8, }, { .name = "unsigned char", .size = 8, }, { .name = "signed long", .size = 0, }, { .name = "long int", .size = 0, }, { .name = "long", .size = 0, }, { .name = "signed long", .size = 0, }, { .name = "unsigned long", .size = 0, }, { .name = "long unsigned int", .size = 0, }, { .name = "bool", .size = 8, }, { .name = "_Bool", .size = 8, }, { .name = "long long unsigned int", .size = 64, }, { .name = "long long int", .size = 64, }, { .name = "long long", .size = 64, }, { .name = "signed long long", .size = 64, }, { .name = "unsigned long long", .size = 64, }, { .name = "double", .size = 64, }, { .name = "double double", .size = 64, }, { .name = "single float", .size = 32, }, { .name = "float", .size = 32, }, { .name = "long double", .size = sizeof(long double) * 8, }, { .name = "long double long double", .size = sizeof(long double) * 8, }, { .name = "__int128", .size = 128, }, { .name = "unsigned __int128", .size = 128, }, { .name = "__int128 unsigned", .size = 128, }, { .name = "_Float128", .size = 128, }, { .name = NULL }, }; void base_type_name_to_size_table__init(struct strings *strings) { int i = 0; while (base_type_name_to_size_table[i].name != NULL) { if (base_type_name_to_size_table[i].sname == 0) base_type_name_to_size_table[i].sname = strings__find(strings, base_type_name_to_size_table[i].name); ++i; } } size_t base_type__name_to_size(struct base_type *bt, struct cu *cu) { int i = 0; char bf[64]; const char *name, *orig_name; if (bt->name_has_encoding) name = s(cu, bt->name); else name = base_type__name(bt, cu, bf, sizeof(bf)); orig_name = name; try_again: while (base_type_name_to_size_table[i].name != NULL) { if (bt->name_has_encoding) { if (base_type_name_to_size_table[i].sname == bt->name) { size_t size; found: size = base_type_name_to_size_table[i].size; return size ?: ((size_t)cu->addr_size * 8); } } else if (strcmp(base_type_name_to_size_table[i].name, name) == 0) goto found; ++i; } if (strstarts(name, "signed ")) { i = 0; name += sizeof("signed"); goto try_again; } fprintf(stderr, "%s: %s %s\n", __func__, dwarf_tag_name(bt->tag.tag), orig_name); return 0; } static const char *base_type_fp_type_str[] = { [BT_FP_SINGLE] = "single", [BT_FP_DOUBLE] = "double", [BT_FP_CMPLX] = "complex", [BT_FP_CMPLX_DBL] = "complex double", [BT_FP_CMPLX_LDBL] = "complex long double", [BT_FP_LDBL] = "long double", [BT_FP_INTVL] = "interval", [BT_FP_INTVL_DBL] = "interval double", [BT_FP_INTVL_LDBL] = "interval long double", [BT_FP_IMGRY] = "imaginary", [BT_FP_IMGRY_DBL] = "imaginary double", [BT_FP_IMGRY_LDBL] = "imaginary long double", }; const char *base_type__name(const struct base_type *bt, const struct cu *cu, char *bf, size_t len) { if (bt->name_has_encoding) return s(cu, bt->name); if (bt->float_type) snprintf(bf, len, "%s %s", base_type_fp_type_str[bt->float_type], s(cu, bt->name)); else snprintf(bf, len, "%s%s%s", bt->is_bool ? "bool " : "", bt->is_varargs ? "... " : "", s(cu, bt->name)); return bf; } void namespace__delete(struct namespace *space, struct cu *cu) { struct tag *pos, *n; namespace__for_each_tag_safe_reverse(space, pos, n) { list_del_init(&pos->node); /* Look for nested namespaces */ if (tag__has_namespace(pos)) namespace__delete(tag__namespace(pos), cu); tag__delete(pos, cu); } tag__delete(&space->tag, cu); } struct class_member * type__find_first_biggest_size_base_type_member(struct type *type, const struct cu *cu) { struct class_member *pos, *result = NULL; size_t result_size = 0; type__for_each_data_member(type, pos) { if (pos->is_static) continue; struct tag *type = cu__type(cu, pos->tag.type); size_t member_size = 0, power2; struct class_member *inner = NULL; if (type == NULL) { tag__id_not_found_fprintf(stderr, pos->tag.type); continue; } reevaluate: switch (type->tag) { case DW_TAG_base_type: member_size = base_type__size(type); break; case DW_TAG_pointer_type: case DW_TAG_reference_type: member_size = cu->addr_size; break; case DW_TAG_class_type: case DW_TAG_union_type: case DW_TAG_structure_type: if (tag__type(type)->nr_members == 0) continue; inner = type__find_first_biggest_size_base_type_member(tag__type(type), cu); member_size = inner->byte_size; break; case DW_TAG_array_type: case DW_TAG_const_type: case DW_TAG_typedef: case DW_TAG_rvalue_reference_type: case DW_TAG_volatile_type: { struct tag *tag = cu__type(cu, type->type); if (type == NULL) { tag__id_not_found_fprintf(stderr, type->type); continue; } type = tag; } goto reevaluate; case DW_TAG_enumeration_type: member_size = tag__type(type)->size / 8; break; } /* long long */ if (member_size > cu->addr_size) return pos; for (power2 = cu->addr_size; power2 > result_size; power2 /= 2) if (member_size >= power2) { if (power2 == cu->addr_size) return inner ?: pos; result_size = power2; result = inner ?: pos; } } return result; } static void cu__find_class_holes(struct cu *cu) { uint32_t id; struct class *pos; cu__for_each_struct(cu, id, pos) class__find_holes(pos); } void cus__add(struct cus *cus, struct cu *cu) { cus->nr_entries++; list_add_tail(&cu->node, &cus->cus); cu__find_class_holes(cu); } static void ptr_table__init(struct ptr_table *pt) { pt->entries = NULL; pt->nr_entries = pt->allocated_entries = 0; } static void ptr_table__exit(struct ptr_table *pt) { free(pt->entries); pt->entries = NULL; } static int ptr_table__add(struct ptr_table *pt, void *ptr, uint32_t *idxp) { const uint32_t nr_entries = pt->nr_entries + 1; const uint32_t rc = pt->nr_entries; if (nr_entries > pt->allocated_entries) { uint32_t allocated_entries = pt->allocated_entries + 256; void *entries = realloc(pt->entries, sizeof(void *) * allocated_entries); if (entries == NULL) return -ENOMEM; pt->allocated_entries = allocated_entries; pt->entries = entries; } pt->entries[rc] = ptr; pt->nr_entries = nr_entries; *idxp = rc; return 0; } static int ptr_table__add_with_id(struct ptr_table *pt, void *ptr, uint32_t id) { /* Assume we won't be fed with the same id more than once */ if (id >= pt->allocated_entries) { uint32_t allocated_entries = roundup(id + 1, 256); void *entries = realloc(pt->entries, sizeof(void *) * allocated_entries); if (entries == NULL) return -ENOMEM; pt->allocated_entries = allocated_entries; pt->entries = entries; } pt->entries[id] = ptr; ++pt->nr_entries; return 0; } static void *ptr_table__entry(const struct ptr_table *pt, uint32_t id) { return id >= pt->nr_entries ? NULL : pt->entries[id]; } static void cu__insert_function(struct cu *cu, struct tag *tag) { struct function *function = tag__function(tag); struct rb_node **p = &cu->functions.rb_node; struct rb_node *parent = NULL; struct function *f; while (*p != NULL) { parent = *p; f = rb_entry(parent, struct function, rb_node); if (function->lexblock.ip.addr < f->lexblock.ip.addr) p = &(*p)->rb_left; else p = &(*p)->rb_right; } rb_link_node(&function->rb_node, parent, p); rb_insert_color(&function->rb_node, &cu->functions); } int cu__table_add_tag(struct cu *cu, struct tag *tag, uint32_t *type_id) { struct ptr_table *pt = &cu->tags_table; if (tag__is_tag_type(tag)) pt = &cu->types_table; else if (tag__is_function(tag)) { pt = &cu->functions_table; cu__insert_function(cu, tag); } return ptr_table__add(pt, tag, type_id) ? -ENOMEM : 0; } int cu__table_nullify_type_entry(struct cu *cu, uint32_t id) { return ptr_table__add_with_id(&cu->types_table, NULL, id); } int cu__add_tag(struct cu *cu, struct tag *tag, uint32_t *id) { int err = cu__table_add_tag(cu, tag, id); if (err == 0) list_add_tail(&tag->node, &cu->tags); return err; } int cu__table_add_tag_with_id(struct cu *cu, struct tag *tag, uint32_t id) { struct ptr_table *pt = &cu->tags_table; if (tag__is_tag_type(tag)) { pt = &cu->types_table; } else if (tag__is_function(tag)) { pt = &cu->functions_table; cu__insert_function(cu, tag); } return ptr_table__add_with_id(pt, tag, id); } int cu__add_tag_with_id(struct cu *cu, struct tag *tag, uint32_t id) { int err = cu__table_add_tag_with_id(cu, tag, id); if (err == 0) list_add_tail(&tag->node, &cu->tags); return err; } struct cu *cu__new(const char *name, uint8_t addr_size, const unsigned char *build_id, int build_id_len, const char *filename) { struct cu *cu = malloc(sizeof(*cu) + build_id_len); if (cu != NULL) { uint32_t void_id; cu->name = strdup(name); cu->filename = strdup(filename); if (cu->name == NULL || cu->filename == NULL) goto out_free; obstack_init(&cu->obstack); ptr_table__init(&cu->tags_table); ptr_table__init(&cu->types_table); ptr_table__init(&cu->functions_table); /* * the first entry is historically associated with void, * so make sure we don't use it */ if (ptr_table__add(&cu->types_table, NULL, &void_id) < 0) goto out_free_name; cu->functions = RB_ROOT; cu->dfops = NULL; INIT_LIST_HEAD(&cu->tags); INIT_LIST_HEAD(&cu->tool_list); cu->addr_size = addr_size; cu->extra_dbg_info = 0; cu->nr_inline_expansions = 0; cu->size_inline_expansions = 0; cu->nr_structures_changed = 0; cu->nr_functions_changed = 0; cu->max_len_changed_item = 0; cu->function_bytes_added = 0; cu->function_bytes_removed = 0; cu->build_id_len = build_id_len; if (build_id_len > 0) memcpy(cu->build_id, build_id, build_id_len); } out: return cu; out_free_name: free(cu->name); free(cu->filename); out_free: free(cu); cu = NULL; goto out; } void cu__delete(struct cu *cu) { ptr_table__exit(&cu->tags_table); ptr_table__exit(&cu->types_table); ptr_table__exit(&cu->functions_table); if (cu->dfops && cu->dfops->cu__delete) cu->dfops->cu__delete(cu); obstack_free(&cu->obstack, NULL); free(cu->filename); free(cu->name); free(cu); } bool cu__same_build_id(const struct cu *cu, const struct cu *other) { return cu->build_id_len != 0 && cu->build_id_len == other->build_id_len && memcmp(cu->build_id, other->build_id, cu->build_id_len) == 0; } struct tag *cu__function(const struct cu *cu, const uint32_t id) { return cu ? ptr_table__entry(&cu->functions_table, id) : NULL; } struct tag *cu__tag(const struct cu *cu, const uint32_t id) { return cu ? ptr_table__entry(&cu->tags_table, id) : NULL; } struct tag *cu__type(const struct cu *cu, const type_id_t id) { return cu ? ptr_table__entry(&cu->types_table, id) : NULL; } struct tag *cu__find_first_typedef_of_type(const struct cu *cu, const type_id_t type) { uint32_t id; struct tag *pos; if (cu == NULL || type == 0) return NULL; cu__for_each_type(cu, id, pos) if (tag__is_typedef(pos) && pos->type == type) return pos; return NULL; } struct tag *cu__find_base_type_by_name(const struct cu *cu, const char *name, type_id_t *idp) { uint32_t id; struct tag *pos; if (cu == NULL || name == NULL) return NULL; cu__for_each_type(cu, id, pos) { if (pos->tag != DW_TAG_base_type) continue; const struct base_type *bt = tag__base_type(pos); char bf[64]; const char *bname = base_type__name(bt, cu, bf, sizeof(bf)); if (!bname || strcmp(bname, name) != 0) continue; if (idp != NULL) *idp = id; return pos; } return NULL; } struct tag *cu__find_base_type_by_sname_and_size(const struct cu *cu, strings_t sname, uint16_t bit_size, type_id_t *idp) { uint32_t id; struct tag *pos; if (sname == 0) return NULL; cu__for_each_type(cu, id, pos) { if (pos->tag == DW_TAG_base_type) { const struct base_type *bt = tag__base_type(pos); if (bt->bit_size == bit_size && bt->name == sname) { if (idp != NULL) *idp = id; return pos; } } } return NULL; } struct tag *cu__find_enumeration_by_sname_and_size(const struct cu *cu, strings_t sname, uint16_t bit_size, type_id_t *idp) { uint32_t id; struct tag *pos; if (sname == 0) return NULL; cu__for_each_type(cu, id, pos) { if (pos->tag == DW_TAG_enumeration_type) { const struct type *t = tag__type(pos); if (t->size == bit_size && t->namespace.name == sname) { if (idp != NULL) *idp = id; return pos; } } } return NULL; } struct tag *cu__find_struct_by_sname(const struct cu *cu, strings_t sname, const int include_decls, type_id_t *idp) { uint32_t id; struct tag *pos; if (sname == 0) return NULL; cu__for_each_type(cu, id, pos) { struct type *type; if (!tag__is_struct(pos)) continue; type = tag__type(pos); if (type->namespace.name == sname) { if (!type->declaration) goto found; if (include_decls) goto found; } } return NULL; found: if (idp != NULL) *idp = id; return pos; } static struct tag *__cu__find_struct_by_name(const struct cu *cu, const char *name, const int include_decls, bool unions, type_id_t *idp) { if (cu == NULL || name == NULL) return NULL; uint32_t id; struct tag *pos; cu__for_each_type(cu, id, pos) { struct type *type; if (!(tag__is_struct(pos) || (unions && tag__is_union(pos)))) continue; type = tag__type(pos); const char *tname = type__name(type, cu); if (tname && strcmp(tname, name) == 0) { if (!type->declaration) goto found; if (include_decls) goto found; } } return NULL; found: if (idp != NULL) *idp = id; return pos; } struct tag *cu__find_struct_by_name(const struct cu *cu, const char *name, const int include_decls, type_id_t *idp) { return __cu__find_struct_by_name(cu, name, include_decls, false, idp); } struct tag *cu__find_struct_or_union_by_name(const struct cu *cu, const char *name, const int include_decls, type_id_t *idp) { return __cu__find_struct_by_name(cu, name, include_decls, true, idp); } static struct tag *__cus__find_struct_by_name(const struct cus *cus, struct cu **cu, const char *name, const int include_decls, bool unions, type_id_t *id) { struct cu *pos; list_for_each_entry(pos, &cus->cus, node) { struct tag *tag = __cu__find_struct_by_name(pos, name, include_decls, unions, id); if (tag != NULL) { if (cu != NULL) *cu = pos; return tag; } } return NULL; } struct tag *cus__find_struct_by_name(const struct cus *cus, struct cu **cu, const char *name, const int include_decls, type_id_t *idp) { return __cus__find_struct_by_name(cus, cu, name, include_decls, false, idp); } struct tag *cus__find_struct_or_union_by_name(const struct cus *cus, struct cu **cu, const char *name, const int include_decls, type_id_t *idp) { return __cus__find_struct_by_name(cus, cu, name, include_decls, true, idp); } struct function *cu__find_function_at_addr(const struct cu *cu, uint64_t addr) { struct rb_node *n; if (cu == NULL) return NULL; n = cu->functions.rb_node; while (n) { struct function *f = rb_entry(n, struct function, rb_node); if (addr < f->lexblock.ip.addr) n = n->rb_left; else if (addr >= f->lexblock.ip.addr + f->lexblock.size) n = n->rb_right; else return f; } return NULL; } struct function *cus__find_function_at_addr(const struct cus *cus, uint64_t addr, struct cu **cu) { struct cu *pos; list_for_each_entry(pos, &cus->cus, node) { struct function *f = cu__find_function_at_addr(pos, addr); if (f != NULL) { if (cu != NULL) *cu = pos; return f; } } return NULL; } struct cu *cus__find_cu_by_name(const struct cus *cus, const char *name) { struct cu *pos; list_for_each_entry(pos, &cus->cus, node) if (pos->name && strcmp(pos->name, name) == 0) return pos; return NULL; } struct tag *cu__find_function_by_name(const struct cu *cu, const char *name) { if (cu == NULL || name == NULL) return NULL; uint32_t id; struct function *pos; cu__for_each_function(cu, id, pos) { const char *fname = function__name(pos, cu); if (fname && strcmp(fname, name) == 0) return function__tag(pos); } return NULL; } static size_t array_type__nr_entries(const struct array_type *at) { int i; size_t nr_entries = 1; for (i = 0; i < at->dimensions; ++i) nr_entries *= at->nr_entries[i]; return nr_entries; } size_t tag__size(const struct tag *tag, const struct cu *cu) { size_t size; switch (tag->tag) { case DW_TAG_member: { struct class_member *member = tag__class_member(tag); if (member->is_static) return 0; /* Is it cached already? */ size = member->byte_size; if (size != 0) return size; break; } case DW_TAG_pointer_type: case DW_TAG_reference_type: return cu->addr_size; case DW_TAG_base_type: return base_type__size(tag); case DW_TAG_enumeration_type: return tag__type(tag)->size / 8; } if (tag->type == 0) { /* struct class: unions, structs */ struct type *type = tag__type(tag); /* empty base optimization trick */ if (type->size == 1 && type->nr_members == 0) size = 0; else size = tag__type(tag)->size; } else { const struct tag *type = cu__type(cu, tag->type); if (type == NULL) { tag__id_not_found_fprintf(stderr, tag->type); return -1; } else if (tag__has_type_loop(tag, type, NULL, 0, NULL)) return -1; size = tag__size(type, cu); } if (tag->tag == DW_TAG_array_type) return size * array_type__nr_entries(tag__array_type(tag)); return size; } const char *variable__name(const struct variable *var, const struct cu *cu) { if (cu->dfops && cu->dfops->variable__name) return cu->dfops->variable__name(var, cu); return s(cu, var->name); } const char *variable__type_name(const struct variable *var, const struct cu *cu, char *bf, size_t len) { const struct tag *tag = cu__type(cu, var->ip.tag.type); return tag != NULL ? tag__name(tag, cu, bf, len, NULL) : NULL; } void class_member__delete(struct class_member *member, struct cu *cu) { obstack_free(&cu->obstack, member); } static struct class_member *class_member__clone(const struct class_member *from, struct cu *cu) { struct class_member *member = obstack_alloc(&cu->obstack, sizeof(*member)); if (member != NULL) memcpy(member, from, sizeof(*member)); return member; } static void type__delete_class_members(struct type *type, struct cu *cu) { struct class_member *pos, *next; type__for_each_tag_safe_reverse(type, pos, next) { list_del_init(&pos->tag.node); class_member__delete(pos, cu); } } void class__delete(struct class *class, struct cu *cu) { if (class->type.namespace.sname != NULL) free(class->type.namespace.sname); type__delete_class_members(&class->type, cu); obstack_free(&cu->obstack, class); } void type__delete(struct type *type, struct cu *cu) { type__delete_class_members(type, cu); obstack_free(&cu->obstack, type); } static void enumerator__delete(struct enumerator *enumerator, struct cu *cu) { obstack_free(&cu->obstack, enumerator); } void enumeration__delete(struct type *type, struct cu *cu) { struct enumerator *pos, *n; type__for_each_enumerator_safe_reverse(type, pos, n) { list_del_init(&pos->tag.node); enumerator__delete(pos, cu); } } void class__add_vtable_entry(struct class *class, struct function *vtable_entry) { ++class->nr_vtable_entries; list_add_tail(&vtable_entry->vtable_node, &class->vtable); } void namespace__add_tag(struct namespace *space, struct tag *tag) { ++space->nr_tags; list_add_tail(&tag->node, &space->tags); } void type__add_member(struct type *type, struct class_member *member) { if (member->is_static) ++type->nr_static_members; else ++type->nr_members; namespace__add_tag(&type->namespace, &member->tag); } struct class_member *type__last_member(struct type *type) { struct class_member *pos; list_for_each_entry_reverse(pos, &type->namespace.tags, tag.node) if (pos->tag.tag == DW_TAG_member) return pos; return NULL; } static int type__clone_members(struct type *type, const struct type *from, struct cu *cu) { struct class_member *pos; type->nr_members = type->nr_static_members = 0; INIT_LIST_HEAD(&type->namespace.tags); type__for_each_member(from, pos) { struct class_member *clone = class_member__clone(pos, cu); if (clone == NULL) return -1; type__add_member(type, clone); } return 0; } struct class *class__clone(const struct class *from, const char *new_class_name, struct cu *cu) { struct class *class = obstack_alloc(&cu->obstack, sizeof(*class)); if (class != NULL) { memcpy(class, from, sizeof(*class)); if (new_class_name != NULL) { class->type.namespace.name = 0; class->type.namespace.sname = strdup(new_class_name); if (class->type.namespace.sname == NULL) { free(class); return NULL; } } if (type__clone_members(&class->type, &from->type, cu) != 0) { class__delete(class, cu); class = NULL; } } return class; } void enumeration__add(struct type *type, struct enumerator *enumerator) { ++type->nr_members; namespace__add_tag(&type->namespace, &enumerator->tag); } void lexblock__add_lexblock(struct lexblock *block, struct lexblock *child) { ++block->nr_lexblocks; list_add_tail(&child->ip.tag.node, &block->tags); } const char *function__name(struct function *func, const struct cu *cu) { if (cu->dfops && cu->dfops->function__name) return cu->dfops->function__name(func, cu); return s(cu, func->name); } static void parameter__delete(struct parameter *parm, struct cu *cu) { obstack_free(&cu->obstack, parm); } void ftype__delete(struct ftype *type, struct cu *cu) { struct parameter *pos, *n; if (type == NULL) return; ftype__for_each_parameter_safe_reverse(type, pos, n) { list_del_init(&pos->tag.node); parameter__delete(pos, cu); } obstack_free(&cu->obstack, type); } void function__delete(struct function *func, struct cu *cu) { lexblock__delete_tags(&func->lexblock.ip.tag, cu); ftype__delete(&func->proto, cu); } int ftype__has_parm_of_type(const struct ftype *ftype, const type_id_t target, const struct cu *cu) { struct parameter *pos; ftype__for_each_parameter(ftype, pos) { struct tag *type = cu__type(cu, pos->tag.type); if (type != NULL && tag__is_pointer(type)) { if (type->type == target) return 1; } } return 0; } void ftype__add_parameter(struct ftype *ftype, struct parameter *parm) { ++ftype->nr_parms; list_add_tail(&parm->tag.node, &ftype->parms); } void lexblock__add_tag(struct lexblock *block, struct tag *tag) { list_add_tail(&tag->node, &block->tags); } void lexblock__add_inline_expansion(struct lexblock *block, struct inline_expansion *exp) { ++block->nr_inline_expansions; block->size_inline_expansions += exp->size; lexblock__add_tag(block, &exp->ip.tag); } void lexblock__add_variable(struct lexblock *block, struct variable *var) { ++block->nr_variables; lexblock__add_tag(block, &var->ip.tag); } void lexblock__add_label(struct lexblock *block, struct label *label) { ++block->nr_labels; lexblock__add_tag(block, &label->ip.tag); } const struct class_member *class__find_bit_hole(const struct class *class, const struct class_member *trailer, const uint16_t bit_hole_size) { struct class_member *pos; const size_t byte_hole_size = bit_hole_size / 8; type__for_each_data_member(&class->type, pos) if (pos == trailer) break; else if (pos->hole >= byte_hole_size || pos->bit_hole >= bit_hole_size) return pos; return NULL; } void class__find_holes(struct class *class) { const struct type *ctype = &class->type; struct class_member *pos, *last = NULL; int cur_bitfield_end = ctype->size * 8, cur_bitfield_size = 0; int bit_holes = 0, byte_holes = 0; int bit_start, bit_end; int last_seen_bit = 0; bool in_bitfield = false; if (!tag__is_struct(class__tag(class))) return; if (class->holes_searched) return; class->nr_holes = 0; class->nr_bit_holes = 0; type__for_each_member(ctype, pos) { /* XXX for now just skip these */ if (pos->tag.tag == DW_TAG_inheritance && pos->virtuality == DW_VIRTUALITY_virtual) continue; if (pos->is_static) continue; pos->bit_hole = 0; pos->hole = 0; bit_start = pos->bit_offset; if (pos->bitfield_size) { bit_end = bit_start + pos->bitfield_size; } else { bit_end = bit_start + pos->byte_size * 8; } bit_holes = 0; byte_holes = 0; if (in_bitfield) { /* check if we have some trailing bitfield bits left */ int bitfield_end = min(bit_start, cur_bitfield_end); bit_holes = bitfield_end - last_seen_bit; last_seen_bit = bitfield_end; } if (pos->bitfield_size) { int aligned_start = pos->byte_offset * 8; /* we can have some alignment byte padding left, * but we need to be careful about bitfield spanning * multiple aligned boundaries */ if (last_seen_bit < aligned_start && aligned_start <= bit_start) { byte_holes = pos->byte_offset - last_seen_bit / 8; last_seen_bit = aligned_start; } bit_holes += bit_start - last_seen_bit; } else { byte_holes = bit_start/8 - last_seen_bit/8; } last_seen_bit = bit_end; if (pos->bitfield_size) { in_bitfield = true; /* if it's a new bitfield set or same, but with * bigger-sized type, readjust size and end bit */ if (bit_end > cur_bitfield_end || pos->bit_size > cur_bitfield_size) { cur_bitfield_size = pos->bit_size; cur_bitfield_end = pos->byte_offset * 8 + cur_bitfield_size; /* * if current bitfield "borrowed" bits from * previous bitfield, it will have byte_offset * of previous bitfield's backing integral * type, but its end bit will be in a new * bitfield "area", so we need to adjust * bitfield end appropriately */ if (bit_end > cur_bitfield_end) { cur_bitfield_end += cur_bitfield_size; } } } else { in_bitfield = false; cur_bitfield_size = 0; cur_bitfield_end = bit_end; } if (last) { last->hole = byte_holes; last->bit_hole = bit_holes; } else { class->pre_hole = byte_holes; class->pre_bit_hole = bit_holes; } if (bit_holes) class->nr_bit_holes++; if (byte_holes) class->nr_holes++; last = pos; } if (in_bitfield) { int bitfield_end = min(ctype->size * 8, cur_bitfield_end); class->bit_padding = bitfield_end - last_seen_bit; last_seen_bit = bitfield_end; } else { class->bit_padding = 0; } class->padding = ctype->size - last_seen_bit / 8; class->holes_searched = true; } static size_t type__natural_alignment(struct type *type, const struct cu *cu); static size_t tag__natural_alignment(struct tag *tag, const struct cu *cu) { size_t natural_alignment = 1; if (tag__is_pointer(tag)) { natural_alignment = cu->addr_size; } else if (tag->tag == DW_TAG_base_type) { natural_alignment = base_type__size(tag); } else if (tag__is_enumeration(tag)) { natural_alignment = tag__type(tag)->size / 8; } else if (tag__is_struct(tag) || tag__is_union(tag)) { natural_alignment = type__natural_alignment(tag__type(tag), cu); } else if (tag->tag == DW_TAG_array_type) { tag = tag__strip_typedefs_and_modifiers(tag, cu); natural_alignment = tag__natural_alignment(tag, cu); } /* * Cope with zero sized types, like: * * struct u64_stats_sync { * #if BITS_PER_LONG==32 && defined(CONFIG_SMP) * seqcount_t seq; * #endif * }; * */ return natural_alignment ?: 1; } static size_t type__natural_alignment(struct type *type, const struct cu *cu) { struct class_member *member; if (type->natural_alignment != 0) return type->natural_alignment; type__for_each_member(type, member) { /* XXX for now just skip these */ if (member->tag.tag == DW_TAG_inheritance && member->virtuality == DW_VIRTUALITY_virtual) continue; struct tag *member_type = tag__strip_typedefs_and_modifiers(&member->tag, cu); size_t member_natural_alignment = tag__natural_alignment(member_type, cu); if (type->natural_alignment < member_natural_alignment) type->natural_alignment = member_natural_alignment; } return type->natural_alignment; } /* * Sometimes the only indication that a struct is __packed__ is for it to * appear embedded in another and at an offset that is not natural for it, * so, in !__packed__ parked struct, check for that and mark the types of * members at unnatural alignments. */ void type__check_structs_at_unnatural_alignments(struct type *type, const struct cu *cu) { struct class_member *member; type__for_each_member(type, member) { struct tag *member_type = tag__strip_typedefs_and_modifiers(&member->tag, cu); if (!tag__is_struct(member_type)) continue; size_t natural_alignment = tag__natural_alignment(member_type, cu); /* Would this break the natural alignment */ if ((member->byte_offset % natural_alignment) != 0) { struct class *cls = tag__class(member_type); cls->is_packed = true; cls->type.packed_attributes_inferred = true; } } } bool class__infer_packed_attributes(struct class *cls, const struct cu *cu) { struct type *ctype = &cls->type; struct class_member *pos, *last = NULL; uint16_t max_natural_alignment = 1; if (!tag__is_struct(class__tag(cls))) return false; if (ctype->packed_attributes_inferred) return cls->is_packed; class__find_holes(cls); if (cls->padding != 0 || cls->nr_holes != 0) { type__check_structs_at_unnatural_alignments(ctype, cu); cls->is_packed = false; goto out; } type__for_each_member(ctype, pos) { /* XXX for now just skip these */ if (pos->tag.tag == DW_TAG_inheritance && pos->virtuality == DW_VIRTUALITY_virtual) continue; if (pos->is_static) continue; struct tag *member_type = tag__strip_typedefs_and_modifiers(&pos->tag, cu); size_t natural_alignment = tag__natural_alignment(member_type, cu); /* Always aligned: */ if (natural_alignment == sizeof(char)) continue; if (max_natural_alignment < natural_alignment) max_natural_alignment = natural_alignment; if ((pos->byte_offset % natural_alignment) == 0) continue; cls->is_packed = true; goto out; } if ((max_natural_alignment != 1 && ctype->alignment == 1) || (class__size(cls) % max_natural_alignment) != 0) cls->is_packed = true; out: ctype->packed_attributes_inferred = true; return cls->is_packed; } /* * If structs embedded in unions, nameless or not, have a size which isn't * isn't a multiple of the union size, then it must be packed, even if * it has no holes nor padding, as an array of such unions would have the * natural alignments of non-multiple structs inside it broken. */ void union__infer_packed_attributes(struct type *type, const struct cu *cu) { const uint32_t union_size = type->size; struct class_member *member; if (type->packed_attributes_inferred) return; type__for_each_member(type, member) { struct tag *member_type = tag__strip_typedefs_and_modifiers(&member->tag, cu); if (!tag__is_struct(member_type)) continue; size_t natural_alignment = tag__natural_alignment(member_type, cu); /* Would this break the natural alignment */ if ((union_size % natural_alignment) != 0) { struct class *cls = tag__class(member_type); cls->is_packed = true; cls->type.packed_attributes_inferred = true; } } type->packed_attributes_inferred = true; } /** class__has_hole_ge - check if class has a hole greater or equal to @size * @class - class instance * @size - hole size to check */ int class__has_hole_ge(const struct class *class, const uint16_t size) { struct class_member *pos; if (class->nr_holes == 0) return 0; type__for_each_data_member(&class->type, pos) if (pos->hole >= size) return 1; return 0; } struct class_member *type__find_member_by_name(const struct type *type, const struct cu *cu, const char *name) { if (name == NULL) return NULL; struct class_member *pos; type__for_each_data_member(type, pos) { const char *curr_name = class_member__name(pos, cu); if (curr_name && strcmp(curr_name, name) == 0) return pos; } return NULL; } uint32_t type__nr_members_of_type(const struct type *type, const type_id_t type_id) { struct class_member *pos; uint32_t nr_members_of_type = 0; type__for_each_member(type, pos) if (pos->tag.type == type_id) ++nr_members_of_type; return nr_members_of_type; } static void lexblock__account_inline_expansions(struct lexblock *block, const struct cu *cu) { struct tag *pos, *type; if (block->nr_inline_expansions == 0) return; list_for_each_entry(pos, &block->tags, node) { if (pos->tag == DW_TAG_lexical_block) { lexblock__account_inline_expansions(tag__lexblock(pos), cu); continue; } else if (pos->tag != DW_TAG_inlined_subroutine) continue; type = cu__function(cu, pos->type); if (type != NULL) { struct function *ftype = tag__function(type); ftype->cu_total_nr_inline_expansions++; ftype->cu_total_size_inline_expansions += tag__inline_expansion(pos)->size; } } } void cu__account_inline_expansions(struct cu *cu) { struct tag *pos; struct function *fpos; list_for_each_entry(pos, &cu->tags, node) { if (!tag__is_function(pos)) continue; fpos = tag__function(pos); lexblock__account_inline_expansions(&fpos->lexblock, cu); cu->nr_inline_expansions += fpos->lexblock.nr_inline_expansions; cu->size_inline_expansions += fpos->lexblock.size_inline_expansions; } } static int list__for_all_tags(struct list_head *list, struct cu *cu, int (*iterator)(struct tag *tag, struct cu *cu, void *cookie), void *cookie) { struct tag *pos, *n; list_for_each_entry_safe_reverse(pos, n, list, node) { if (tag__has_namespace(pos)) { struct namespace *space = tag__namespace(pos); /* * See comment in type__for_each_enumerator, the * enumerators (enum entries) are shared, but the * enumeration tag must be deleted. */ if (!space->shared_tags && list__for_all_tags(&space->tags, cu, iterator, cookie)) return 1; /* * vtable functions are already in the class tags list */ } else if (tag__is_function(pos)) { if (list__for_all_tags(&tag__ftype(pos)->parms, cu, iterator, cookie)) return 1; if (list__for_all_tags(&tag__function(pos)->lexblock.tags, cu, iterator, cookie)) return 1; } else if (pos->tag == DW_TAG_subroutine_type) { if (list__for_all_tags(&tag__ftype(pos)->parms, cu, iterator, cookie)) return 1; } else if (pos->tag == DW_TAG_lexical_block) { if (list__for_all_tags(&tag__lexblock(pos)->tags, cu, iterator, cookie)) return 1; } if (iterator(pos, cu, cookie)) return 1; } return 0; } int cu__for_all_tags(struct cu *cu, int (*iterator)(struct tag *tag, struct cu *cu, void *cookie), void *cookie) { return list__for_all_tags(&cu->tags, cu, iterator, cookie); } void cus__for_each_cu(struct cus *cus, int (*iterator)(struct cu *cu, void *cookie), void *cookie, struct cu *(*filter)(struct cu *cu)) { struct cu *pos; list_for_each_entry(pos, &cus->cus, node) { struct cu *cu = pos; if (filter != NULL) { cu = filter(pos); if (cu == NULL) continue; } if (iterator(cu, cookie)) break; } } int cus__load_dir(struct cus *cus, struct conf_load *conf, const char *dirname, const char *filename_mask, const int recursive) { struct dirent *entry; int err = -1; DIR *dir = opendir(dirname); if (dir == NULL) goto out; err = 0; while ((entry = readdir(dir)) != NULL) { char pathname[PATH_MAX]; struct stat st; if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0) continue; snprintf(pathname, sizeof(pathname), "%s/%s", dirname, entry->d_name); err = lstat(pathname, &st); if (err != 0) break; if (S_ISDIR(st.st_mode)) { if (!recursive) continue; err = cus__load_dir(cus, conf, pathname, filename_mask, recursive); if (err != 0) break; } else if (fnmatch(filename_mask, entry->d_name, 0) == 0) { err = cus__load_file(cus, conf, pathname); if (err != 0) break; } } if (err == -1) puts(dirname); closedir(dir); out: return err; } /* * This should really do demand loading of DSOs, STABS anyone? 8-) */ extern struct debug_fmt_ops dwarf__ops, ctf__ops, btf_elf__ops; static struct debug_fmt_ops *debug_fmt_table[] = { &dwarf__ops, &ctf__ops, &btf_elf__ops, NULL, }; static int debugging_formats__loader(const char *name) { int i = 0; while (debug_fmt_table[i] != NULL) { if (strcmp(debug_fmt_table[i]->name, name) == 0) return i; ++i; } return -1; } int cus__load_file(struct cus *cus, struct conf_load *conf, const char *filename) { int i = 0, err = 0; int loader; if (conf && conf->format_path != NULL) { char *fpath = strdup(conf->format_path); if (fpath == NULL) return -ENOMEM; char *fp = fpath; while (1) { char *sep = strchr(fp, ','); if (sep != NULL) *sep = '\0'; err = -ENOTSUP; loader = debugging_formats__loader(fp); if (loader == -1) break; if (conf->conf_fprintf) conf->conf_fprintf->has_alignment_info = debug_fmt_table[loader]->has_alignment_info; err = 0; if (debug_fmt_table[loader]->load_file(cus, conf, filename) == 0) break; err = -EINVAL; if (sep == NULL) break; fp = sep + 1; } free(fpath); return err; } while (debug_fmt_table[i] != NULL) { if (conf->conf_fprintf) conf->conf_fprintf->has_alignment_info = debug_fmt_table[i]->has_alignment_info; if (debug_fmt_table[i]->load_file(cus, conf, filename) == 0) return 0; ++i; } return -EINVAL; } #define BUILD_ID_SIZE 20 #define SBUILD_ID_SIZE (BUILD_ID_SIZE * 2 + 1) #define NOTE_ALIGN(sz) (((sz) + 3) & ~3) #define NT_GNU_BUILD_ID 3 #ifndef min #define min(x, y) ({ \ typeof(x) _min1 = (x); \ typeof(y) _min2 = (y); \ (void) (&_min1 == &_min2); \ _min1 < _min2 ? _min1 : _min2; }) #endif /* Force a compilation error if condition is true, but also produce a result (of value 0 and type size_t), so the expression can be used e.g. in a structure initializer (or where-ever else comma expressions aren't permitted). */ #define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); })) /* Are two types/vars the same type (ignoring qualifiers)? */ #ifndef __same_type # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) #endif /* &a[0] degrades to a pointer: a different type from an array */ #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) static int sysfs__read_build_id(const char *filename, void *build_id, size_t size) { int fd, err = -1; if (size < BUILD_ID_SIZE) goto out; fd = open(filename, O_RDONLY); if (fd < 0) goto out; while (1) { char bf[BUFSIZ]; GElf_Nhdr nhdr; size_t namesz, descsz; if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr)) break; namesz = NOTE_ALIGN(nhdr.n_namesz); descsz = NOTE_ALIGN(nhdr.n_descsz); if (nhdr.n_type == NT_GNU_BUILD_ID && nhdr.n_namesz == sizeof("GNU")) { if (read(fd, bf, namesz) != (ssize_t)namesz) break; if (memcmp(bf, "GNU", sizeof("GNU")) == 0) { size_t sz = min(descsz, size); if (read(fd, build_id, sz) == (ssize_t)sz) { memset(build_id + sz, 0, size - sz); err = 0; break; } } else if (read(fd, bf, descsz) != (ssize_t)descsz) break; } else { int n = namesz + descsz; if (n > (int)sizeof(bf)) { n = sizeof(bf); fprintf(stderr, "%s: truncating reading of build id in sysfs file %s: n_namesz=%u, n_descsz=%u.\n", __func__, filename, nhdr.n_namesz, nhdr.n_descsz); } if (read(fd, bf, n) != n) break; } } close(fd); out: return err; } static int elf_read_build_id(Elf *elf, void *bf, size_t size) { int err = -1; GElf_Ehdr ehdr; GElf_Shdr shdr; Elf_Data *data; Elf_Scn *sec; Elf_Kind ek; void *ptr; if (size < BUILD_ID_SIZE) goto out; ek = elf_kind(elf); if (ek != ELF_K_ELF) goto out; if (gelf_getehdr(elf, &ehdr) == NULL) { fprintf(stderr, "%s: cannot get elf header.\n", __func__); goto out; } /* * Check following sections for notes: * '.note.gnu.build-id' * '.notes' * '.note' (VDSO specific) */ do { sec = elf_section_by_name(elf, &ehdr, &shdr, ".note.gnu.build-id", NULL); if (sec) break; sec = elf_section_by_name(elf, &ehdr, &shdr, ".notes", NULL); if (sec) break; sec = elf_section_by_name(elf, &ehdr, &shdr, ".note", NULL); if (sec) break; return err; } while (0); data = elf_getdata(sec, NULL); if (data == NULL) goto out; ptr = data->d_buf; while (ptr < (data->d_buf + data->d_size)) { GElf_Nhdr *nhdr = ptr; size_t namesz = NOTE_ALIGN(nhdr->n_namesz), descsz = NOTE_ALIGN(nhdr->n_descsz); const char *name; ptr += sizeof(*nhdr); name = ptr; ptr += namesz; if (nhdr->n_type == NT_GNU_BUILD_ID && nhdr->n_namesz == sizeof("GNU")) { if (memcmp(name, "GNU", sizeof("GNU")) == 0) { size_t sz = min(size, descsz); memcpy(bf, ptr, sz); memset(bf + sz, 0, size - sz); err = descsz; break; } } ptr += descsz; } out: return err; } static int filename__read_build_id(const char *filename, void *bf, size_t size) { int fd, err = -1; Elf *elf; if (size < BUILD_ID_SIZE) goto out; fd = open(filename, O_RDONLY); if (fd < 0) goto out; elf = elf_begin(fd, ELF_C_READ, NULL); if (elf == NULL) { fprintf(stderr, "%s: cannot read %s ELF file.\n", __func__, filename); goto out_close; } err = elf_read_build_id(elf, bf, size); elf_end(elf); out_close: close(fd); out: return err; } static int build_id__sprintf(const unsigned char *build_id, int len, char *bf) { char *bid = bf; const unsigned char *raw = build_id; int i; for (i = 0; i < len; ++i) { sprintf(bid, "%02x", *raw); ++raw; bid += 2; } return (bid - bf) + 1; } static int sysfs__sprintf_build_id(const char *root_dir, char *sbuild_id) { char notes[PATH_MAX]; unsigned char build_id[BUILD_ID_SIZE]; int ret; if (!root_dir) root_dir = ""; snprintf(notes, sizeof(notes), "%s/sys/kernel/notes", root_dir); ret = sysfs__read_build_id(notes, build_id, sizeof(build_id)); if (ret < 0) return ret; return build_id__sprintf(build_id, sizeof(build_id), sbuild_id); } static int filename__sprintf_build_id(const char *pathname, char *sbuild_id) { unsigned char build_id[BUILD_ID_SIZE]; int ret; ret = filename__read_build_id(pathname, build_id, sizeof(build_id)); if (ret < 0) return ret; else if (ret != sizeof(build_id)) return -EINVAL; return build_id__sprintf(build_id, sizeof(build_id), sbuild_id); } /* asnprintf consolidates asprintf and snprintf */ static int asnprintf(char **strp, size_t size, const char *fmt, ...) { va_list ap; int ret; if (!strp) return -EINVAL; va_start(ap, fmt); if (*strp) ret = vsnprintf(*strp, size, fmt, ap); else ret = vasprintf(strp, fmt, ap); va_end(ap); return ret; } #define zfree(ptr) ({ free(*ptr); *ptr = NULL; }) static int vmlinux_path__nr_entries; static char **vmlinux_path; static void vmlinux_path__exit(void) { while (--vmlinux_path__nr_entries >= 0) zfree(&vmlinux_path[vmlinux_path__nr_entries]); vmlinux_path__nr_entries = 0; zfree(&vmlinux_path); } static const char * const vmlinux_paths[] = { "vmlinux", "/boot/vmlinux" }; static const char * const vmlinux_paths_upd[] = { "/boot/vmlinux-%s", "/usr/lib/debug/boot/vmlinux-%s", "/lib/modules/%s/build/vmlinux", "/usr/lib/debug/lib/modules/%s/vmlinux", "/usr/lib/debug/boot/vmlinux-%s.debug" }; static int vmlinux_path__add(const char *new_entry) { vmlinux_path[vmlinux_path__nr_entries] = strdup(new_entry); if (vmlinux_path[vmlinux_path__nr_entries] == NULL) return -1; ++vmlinux_path__nr_entries; return 0; } static int vmlinux_path__init(void) { struct utsname uts; char bf[PATH_MAX]; char *kernel_version; unsigned int i; vmlinux_path = malloc(sizeof(char *) * (ARRAY_SIZE(vmlinux_paths) + ARRAY_SIZE(vmlinux_paths_upd))); if (vmlinux_path == NULL) return -1; for (i = 0; i < ARRAY_SIZE(vmlinux_paths); i++) if (vmlinux_path__add(vmlinux_paths[i]) < 0) goto out_fail; if (uname(&uts) < 0) goto out_fail; kernel_version = uts.release; for (i = 0; i < ARRAY_SIZE(vmlinux_paths_upd); i++) { snprintf(bf, sizeof(bf), vmlinux_paths_upd[i], kernel_version); if (vmlinux_path__add(bf) < 0) goto out_fail; } return 0; out_fail: vmlinux_path__exit(); return -1; } static int cus__load_running_kernel(struct cus *cus, struct conf_load *conf) { int i, err = 0; char running_sbuild_id[SBUILD_ID_SIZE]; elf_version(EV_CURRENT); vmlinux_path__init(); sysfs__sprintf_build_id(NULL, running_sbuild_id); for (i = 0; i < vmlinux_path__nr_entries; ++i) { char sbuild_id[SBUILD_ID_SIZE]; if (filename__sprintf_build_id(vmlinux_path[i], sbuild_id) > 0 && strcmp(sbuild_id, running_sbuild_id) == 0) { err = cus__load_file(cus, conf, vmlinux_path[i]); break; } } vmlinux_path__exit(); return err; } int cus__load_files(struct cus *cus, struct conf_load *conf, char *filenames[]) { int i = 0; while (filenames[i] != NULL) { if (cus__load_file(cus, conf, filenames[i])) return -++i; ++i; } return i ? 0 : cus__load_running_kernel(cus, conf); } int cus__fprintf_load_files_err(struct cus *cus, const char *tool, char *argv[], int err, FILE *output) { /* errno is not properly preserved in some cases, sigh */ return fprintf(output, "%s: %s: %s\n", tool, argv[-err - 1], errno ? strerror(errno) : "No debugging information found"); } struct cus *cus__new(void) { struct cus *cus = malloc(sizeof(*cus)); if (cus != NULL) { cus->nr_entries = 0; INIT_LIST_HEAD(&cus->cus); } return cus; } void cus__delete(struct cus *cus) { struct cu *pos, *n; if (cus == NULL) return; list_for_each_entry_safe(pos, n, &cus->cus, node) { list_del_init(&pos->node); cu__delete(pos); } free(cus); } void dwarves__fprintf_init(uint16_t user_cacheline_size); int dwarves__init(uint16_t user_cacheline_size) { dwarves__fprintf_init(user_cacheline_size); int i = 0; int err = 0; while (debug_fmt_table[i] != NULL) { if (debug_fmt_table[i]->init) { err = debug_fmt_table[i]->init(); if (err) goto out_fail; } ++i; } return 0; out_fail: while (i-- != 0) if (debug_fmt_table[i]->exit) debug_fmt_table[i]->exit(); return err; } void dwarves__exit(void) { int i = 0; while (debug_fmt_table[i] != NULL) { if (debug_fmt_table[i]->exit) debug_fmt_table[i]->exit(); ++i; } } struct argp_state; void dwarves_print_version(FILE *fp, struct argp_state *state __unused) { fprintf(fp, "%s\n", DWARVES_VERSION); } dwarves-dfsg-1.15/dwarves.h000066400000000000000000001116411350511416500156650ustar00rootroot00000000000000#ifndef _DWARVES_H_ #define _DWARVES_H_ 1 /* SPDX-License-Identifier: GPL-2.0-only Copyright (C) 2006 Mandriva Conectiva S.A. Copyright (C) 2006..2019 Arnaldo Carvalho de Melo */ #include #include #include #include #include #include "dutil.h" #include "list.h" #include "rbtree.h" #include "strings.h" struct cu; enum load_steal_kind { LSK__KEEPIT, LSK__DELETE, LSK__STOP_LOADING, }; /* * BTF combines all the types into one big CU using btf_dedup(), so for something * like a allyesconfig vmlinux kernel we can get over 65535 types. */ typedef uint32_t type_id_t; struct conf_fprintf; /** struct conf_load - load configuration * @extra_dbg_info - keep original debugging format extra info * (e.g. DWARF's decl_{line,file}, id, etc) * @fixup_silly_bitfields - Fixup silly things such as "int foo:32;" * @get_addr_info - wheter to load DW_AT_location and other addr info */ struct conf_load { enum load_steal_kind (*steal)(struct cu *cu, struct conf_load *conf); void *cookie; char *format_path; bool extra_dbg_info; bool fixup_silly_bitfields; bool get_addr_info; struct conf_fprintf *conf_fprintf; }; /** struct conf_fprintf - hints to the __fprintf routines * * @flat_arrays - a->foo[10][2] becomes a->foo[20] * @classes_as_structs - class f becomes struct f, CTF doesn't have a "class" * @cachelinep - pointer to current cacheline, so that when expanding types we keep track of it, * needs to be "global", i.e. not set at each recursion. * @suppress_force_paddings: This makes sense only if the debugging format has struct alignment information, * So allow for it to be disabled and disable it automatically for things like BTF, * that don't have such info. */ struct conf_fprintf { const char *prefix; const char *suffix; int32_t type_spacing; int32_t name_spacing; uint32_t base_offset; uint32_t *cachelinep; uint8_t indent; uint8_t expand_types:1; uint8_t expand_pointers:1; uint8_t rel_offset:1; uint8_t emit_stats:1; uint8_t suppress_comments:1; uint8_t has_alignment_info:1; uint8_t suppress_aligned_attribute:1; uint8_t suppress_offset_comment:1; uint8_t suppress_force_paddings:1; uint8_t suppress_packed:1; uint8_t show_decl_info:1; uint8_t show_only_data_members:1; uint8_t no_semicolon:1; uint8_t show_first_biggest_size_base_type_member:1; uint8_t flat_arrays:1; uint8_t first_member:1; uint8_t last_member:1; uint8_t union_member:1; uint8_t no_parm_names:1; uint8_t classes_as_structs:1; uint8_t hex_fmt:1; uint8_t strip_inline:1; }; struct cus { uint32_t nr_entries; struct list_head cus; }; struct cus *cus__new(void); void cus__delete(struct cus *cus); int cus__load_file(struct cus *cus, struct conf_load *conf, const char *filename); int cus__load_files(struct cus *cus, struct conf_load *conf, char *filenames[]); int cus__fprintf_load_files_err(struct cus *cus, const char *tool, char *argv[], int err, FILE *output); int cus__load_dir(struct cus *cus, struct conf_load *conf, const char *dirname, const char *filename_mask, const int recursive); void cus__add(struct cus *cus, struct cu *cu); void cus__print_error_msg(const char *progname, const struct cus *cus, const char *filename, const int err); struct cu *cus__find_cu_by_name(const struct cus *cus, const char *name); struct tag *cus__find_struct_by_name(const struct cus *cus, struct cu **cu, const char *name, const int include_decls, type_id_t *id); struct tag *cus__find_struct_or_union_by_name(const struct cus *cus, struct cu **cu, const char *name, const int include_decls, type_id_t *id); struct function *cus__find_function_at_addr(const struct cus *cus, uint64_t addr, struct cu **cu); void cus__for_each_cu(struct cus *cus, int (*iterator)(struct cu *cu, void *cookie), void *cookie, struct cu *(*filter)(struct cu *cu)); struct ptr_table { void **entries; uint32_t nr_entries; uint32_t allocated_entries; }; struct function; struct tag; struct cu; struct variable; /* Same as DW_LANG, so that we don't have to include dwarf.h in CTF */ enum dwarf_languages { LANG_C89 = 0x01, /* ISO C:1989 */ LANG_C = 0x02, /* C */ LANG_Ada83 = 0x03, /* ISO Ada:1983 */ LANG_C_plus_plus = 0x04, /* ISO C++:1998 */ LANG_Cobol74 = 0x05, /* ISO Cobol:1974 */ LANG_Cobol85 = 0x06, /* ISO Cobol:1985 */ LANG_Fortran77 = 0x07, /* ISO FORTRAN 77 */ LANG_Fortran90 = 0x08, /* ISO Fortran 90 */ LANG_Pascal83 = 0x09, /* ISO Pascal:1983 */ LANG_Modula2 = 0x0a, /* ISO Modula-2:1996 */ LANG_Java = 0x0b, /* Java */ LANG_C99 = 0x0c, /* ISO C:1999 */ LANG_Ada95 = 0x0d, /* ISO Ada:1995 */ LANG_Fortran95 = 0x0e, /* ISO Fortran 95 */ LANG_PL1 = 0x0f, /* ISO PL/1:1976 */ LANG_Objc = 0x10, /* Objective-C */ LANG_ObjC_plus_plus = 0x11, /* Objective-C++ */ LANG_UPC = 0x12, /* Unified Parallel C */ LANG_D = 0x13, /* D */ }; /** struct debug_fmt_ops - specific to the underlying debug file format * * @function__name - will be called by function__name(), giving a chance to * formats such as CTF to get this from some other place * than the global strings table. CTF does this by storing * GElf_Sym->st_name in function->name, and by using * function->name as an index into the .strtab ELF section. * @variable__name - will be called by variable__name(), see @function_name * cu__delete - called at cu__delete(), to give a chance to formats such as * CTF to keep the .strstab ELF section available till the cu is * deleted. See @function__name */ struct debug_fmt_ops { const char *name; int (*init)(void); void (*exit)(void); int (*load_file)(struct cus *cus, struct conf_load *conf, const char *filename); const char *(*tag__decl_file)(const struct tag *tag, const struct cu *cu); uint32_t (*tag__decl_line)(const struct tag *tag, const struct cu *cu); unsigned long long (*tag__orig_id)(const struct tag *tag, const struct cu *cu); void (*tag__free_orig_info)(struct tag *tag, struct cu *cu); const char *(*function__name)(struct function *tag, const struct cu *cu); const char *(*variable__name)(const struct variable *var, const struct cu *cu); const char *(*strings__ptr)(const struct cu *cu, strings_t s); void (*cu__delete)(struct cu *cu); bool has_alignment_info; }; struct cu { struct list_head node; struct list_head tags; struct list_head tool_list; /* To be used by tools such as ctracer */ struct ptr_table types_table; struct ptr_table functions_table; struct ptr_table tags_table; struct rb_root functions; char *name; char *filename; void *priv; struct obstack obstack; struct debug_fmt_ops *dfops; Elf *elf; Dwfl_Module *dwfl; uint32_t cached_symtab_nr_entries; uint8_t addr_size; uint8_t extra_dbg_info:1; uint8_t has_addr_info:1; uint8_t uses_global_strings:1; uint8_t little_endian:1; uint16_t language; unsigned long nr_inline_expansions; size_t size_inline_expansions; uint32_t nr_functions_changed; uint32_t nr_structures_changed; size_t max_len_changed_item; size_t function_bytes_added; size_t function_bytes_removed; int build_id_len; unsigned char build_id[0]; }; struct cu *cu__new(const char *name, uint8_t addr_size, const unsigned char *build_id, int build_id_len, const char *filename); void cu__delete(struct cu *cu); const char *cu__string(const struct cu *cu, strings_t s); static inline int cu__cache_symtab(struct cu *cu) { int err = dwfl_module_getsymtab(cu->dwfl); if (err > 0) cu->cached_symtab_nr_entries = dwfl_module_getsymtab(cu->dwfl); return err; } static inline __pure bool cu__is_c_plus_plus(const struct cu *cu) { return cu->language == LANG_C_plus_plus; } /** * cu__for_each_cached_symtab_entry - iterate thru the cached symtab entries * @cu: struct cu instance * @id: uint32_t tag id * @pos: struct GElf_Sym iterator * @name: char pointer where the symbol_name will be stored */ #define cu__for_each_cached_symtab_entry(cu, id, pos, name) \ for (id = 1, \ name = dwfl_module_getsym(cu->dwfl, id, &sym, NULL); \ id < cu->cached_symtab_nr_entries; \ ++id, name = dwfl_module_getsym(cu->dwfl, id, &sym, NULL)) /** * cu__for_each_type - iterate thru all the type tags * @cu: struct cu instance to iterate * @id: type_id_t id * @pos: struct tag iterator * * See cu__table_nullify_type_entry and users for the reason for * the NULL test (hint: CTF Unknown types) */ #define cu__for_each_type(cu, id, pos) \ for (id = 1; id < cu->types_table.nr_entries; ++id) \ if (!(pos = cu->types_table.entries[id])) \ continue; \ else /** * cu__for_each_struct - iterate thru all the struct tags * @cu: struct cu instance to iterate * @pos: struct class iterator * @id: type_id_t id */ #define cu__for_each_struct(cu, id, pos) \ for (id = 1; id < cu->types_table.nr_entries; ++id) \ if (!(pos = tag__class(cu->types_table.entries[id])) || \ !tag__is_struct(class__tag(pos))) \ continue; \ else /** * cu__for_each_struct_or_union - iterate thru all the struct and union tags * @cu: struct cu instance to iterate * @pos: struct class iterator * @id: type_id_t tag id */ #define cu__for_each_struct_or_union(cu, id, pos) \ for (id = 1; id < cu->types_table.nr_entries; ++id) \ if (!(pos = tag__class(cu->types_table.entries[id])) || \ !(tag__is_struct(class__tag(pos)) || \ tag__is_union(class__tag(pos)))) \ continue; \ else /** * cu__for_each_function - iterate thru all the function tags * @cu: struct cu instance to iterate * @pos: struct function iterator * @id: uint32_t tag id */ #define cu__for_each_function(cu, id, pos) \ for (id = 0; id < cu->functions_table.nr_entries; ++id) \ if (!(pos = tag__function(cu->functions_table.entries[id]))) \ continue; \ else /** * cu__for_each_variable - iterate thru all the global variable tags * @cu: struct cu instance to iterate * @pos: struct tag iterator * @id: uint32_t tag id */ #define cu__for_each_variable(cu, id, pos) \ for (id = 0; id < cu->tags_table.nr_entries; ++id) \ if (!(pos = cu->tags_table.entries[id]) || \ !tag__is_variable(pos)) \ continue; \ else int cu__add_tag(struct cu *cu, struct tag *tag, uint32_t *id); int cu__add_tag_with_id(struct cu *cu, struct tag *tag, uint32_t id); int cu__table_add_tag(struct cu *cu, struct tag *tag, uint32_t *id); int cu__table_add_tag_with_id(struct cu *cu, struct tag *tag, uint32_t id); int cu__table_nullify_type_entry(struct cu *cu, uint32_t id); struct tag *cu__find_base_type_by_name(const struct cu *cu, const char *name, type_id_t *id); struct tag *cu__find_base_type_by_sname_and_size(const struct cu *cu, strings_t name, uint16_t bit_size, type_id_t *idp); struct tag *cu__find_enumeration_by_sname_and_size(const struct cu *cu, strings_t sname, uint16_t bit_size, type_id_t *idp); struct tag *cu__find_first_typedef_of_type(const struct cu *cu, const type_id_t type); struct tag *cu__find_function_by_name(const struct cu *cu, const char *name); struct tag *cu__find_struct_by_sname(const struct cu *cu, strings_t sname, const int include_decls, type_id_t *idp); struct function *cu__find_function_at_addr(const struct cu *cu, uint64_t addr); struct tag *cu__function(const struct cu *cu, const uint32_t id); struct tag *cu__tag(const struct cu *cu, const uint32_t id); struct tag *cu__type(const struct cu *cu, const type_id_t id); struct tag *cu__find_struct_by_name(const struct cu *cu, const char *name, const int include_decls, type_id_t *id); struct tag *cu__find_struct_or_union_by_name(const struct cu *cu, const char *name, const int include_decls, type_id_t *id); bool cu__same_build_id(const struct cu *cu, const struct cu *other); void cu__account_inline_expansions(struct cu *cu); int cu__for_all_tags(struct cu *cu, int (*iterator)(struct tag *tag, struct cu *cu, void *cookie), void *cookie); /** struct tag - basic representation of a debug info element * @priv - extra data, for instance, DWARF offset, id, decl_{file,line} * @top_level - */ struct tag { struct list_head node; type_id_t type; uint16_t tag; bool visited; bool top_level; uint16_t recursivity_level; void *priv; }; void tag__delete(struct tag *tag, struct cu *cu); static inline int tag__is_enumeration(const struct tag *tag) { return tag->tag == DW_TAG_enumeration_type; } static inline int tag__is_namespace(const struct tag *tag) { return tag->tag == DW_TAG_namespace; } static inline int tag__is_struct(const struct tag *tag) { return tag->tag == DW_TAG_structure_type || tag->tag == DW_TAG_interface_type || tag->tag == DW_TAG_class_type; } static inline int tag__is_typedef(const struct tag *tag) { return tag->tag == DW_TAG_typedef; } static inline int tag__is_rvalue_reference_type(const struct tag *tag) { return tag->tag == DW_TAG_rvalue_reference_type; } static inline int tag__is_union(const struct tag *tag) { return tag->tag == DW_TAG_union_type; } static inline int tag__is_const(const struct tag *tag) { return tag->tag == DW_TAG_const_type; } static inline int tag__is_pointer(const struct tag *tag) { return tag->tag == DW_TAG_pointer_type; } static inline int tag__is_pointer_to(const struct tag *tag, type_id_t type) { return tag__is_pointer(tag) && tag->type == type; } static inline bool tag__is_variable(const struct tag *tag) { return tag->tag == DW_TAG_variable; } static inline bool tag__is_volatile(const struct tag *tag) { return tag->tag == DW_TAG_volatile_type; } static inline bool tag__is_restrict(const struct tag *tag) { return tag->tag == DW_TAG_restrict_type; } static inline int tag__is_modifier(const struct tag *tag) { return tag__is_const(tag) || tag__is_volatile(tag) || tag__is_restrict(tag); } static inline bool tag__has_namespace(const struct tag *tag) { return tag__is_struct(tag) || tag__is_union(tag) || tag__is_namespace(tag) || tag__is_enumeration(tag); } /** * tag__is_tag_type - is this tag derived from the 'type' class? * @tag - tag queried */ static inline int tag__is_type(const struct tag *tag) { return tag__is_union(tag) || tag__is_struct(tag) || tag__is_typedef(tag) || tag__is_rvalue_reference_type(tag) || tag__is_enumeration(tag); } /** * tag__is_tag_type - is this one of the possible types for a tag? * @tag - tag queried */ static inline int tag__is_tag_type(const struct tag *tag) { return tag__is_type(tag) || tag->tag == DW_TAG_array_type || tag->tag == DW_TAG_base_type || tag->tag == DW_TAG_const_type || tag->tag == DW_TAG_pointer_type || tag->tag == DW_TAG_rvalue_reference_type || tag->tag == DW_TAG_ptr_to_member_type || tag->tag == DW_TAG_reference_type || tag->tag == DW_TAG_restrict_type || tag->tag == DW_TAG_subroutine_type || tag->tag == DW_TAG_unspecified_type || tag->tag == DW_TAG_volatile_type; } static inline const char *tag__decl_file(const struct tag *tag, const struct cu *cu) { if (cu->dfops && cu->dfops->tag__decl_file) return cu->dfops->tag__decl_file(tag, cu); return NULL; } static inline uint32_t tag__decl_line(const struct tag *tag, const struct cu *cu) { if (cu->dfops && cu->dfops->tag__decl_line) return cu->dfops->tag__decl_line(tag, cu); return 0; } static inline unsigned long long tag__orig_id(const struct tag *tag, const struct cu *cu) { if (cu->dfops && cu->dfops->tag__orig_id) return cu->dfops->tag__orig_id(tag, cu); return 0; } static inline void tag__free_orig_info(struct tag *tag, struct cu *cu) { if (cu->dfops && cu->dfops->tag__free_orig_info) cu->dfops->tag__free_orig_info(tag, cu); } size_t tag__fprintf_decl_info(const struct tag *tag, const struct cu *cu, FILE *fp); size_t tag__fprintf(struct tag *tag, const struct cu *cu, const struct conf_fprintf *conf, FILE *fp); const char *tag__name(const struct tag *tag, const struct cu *cu, char *bf, size_t len, const struct conf_fprintf *conf); void tag__not_found_die(const char *file, int line, const char *func); #define tag__assert_search_result(tag) \ do { if (!tag) tag__not_found_die(__FILE__,\ __LINE__, __func__); } while (0) size_t tag__size(const struct tag *tag, const struct cu *cu); size_t tag__nr_cachelines(const struct tag *tag, const struct cu *cu); struct tag *tag__follow_typedef(const struct tag *tag, const struct cu *cu); struct tag *tag__strip_typedefs_and_modifiers(const struct tag *tag, const struct cu *cu); size_t __tag__id_not_found_fprintf(FILE *fp, type_id_t id, const char *fn, int line); #define tag__id_not_found_fprintf(fp, id) \ __tag__id_not_found_fprintf(fp, id, __func__, __LINE__) int __tag__has_type_loop(const struct tag *tag, const struct tag *type, char *bf, size_t len, FILE *fp, const char *fn, int line); #define tag__has_type_loop(tag, type, bf, len, fp) \ __tag__has_type_loop(tag, type, bf, len, fp, __func__, __LINE__) struct ptr_to_member_type { struct tag tag; type_id_t containing_type; }; static inline struct ptr_to_member_type * tag__ptr_to_member_type(const struct tag *tag) { return (struct ptr_to_member_type *)tag; } /** struct namespace - base class for enums, structs, unions, typedefs, etc * * @sname - for clones, for instance, where we can't always add a new string * @tags - class_member, enumerators, etc * @shared_tags: if this bit is set, don't free the entries in @tags */ struct namespace { struct tag tag; strings_t name; uint16_t nr_tags; uint8_t shared_tags; char * sname; struct list_head tags; }; static inline struct namespace *tag__namespace(const struct tag *tag) { return (struct namespace *)tag; } void namespace__delete(struct namespace *nspace, struct cu *cu); /** * namespace__for_each_tag - iterate thru all the tags * @nspace: struct namespace instance to iterate * @pos: struct tag iterator */ #define namespace__for_each_tag(nspace, pos) \ list_for_each_entry(pos, &(nspace)->tags, node) /** * namespace__for_each_tag_safe_reverse - safely iterate thru all the tags, in reverse order * @nspace: struct namespace instance to iterate * @pos: struct tag iterator * @n: struct class_member temp iterator */ #define namespace__for_each_tag_safe_reverse(nspace, pos, n) \ list_for_each_entry_safe_reverse(pos, n, &(nspace)->tags, node) void namespace__add_tag(struct namespace *nspace, struct tag *tag); struct ip_tag { struct tag tag; uint64_t addr; }; struct inline_expansion { struct ip_tag ip; size_t size; uint64_t high_pc; }; static inline struct inline_expansion * tag__inline_expansion(const struct tag *tag) { return (struct inline_expansion *)tag; } struct label { struct ip_tag ip; strings_t name; }; static inline struct label *tag__label(const struct tag *tag) { return (struct label *)tag; } static inline const char *label__name(const struct label *label, const struct cu *cu) { return cu__string(cu, label->name); } enum vscope { VSCOPE_UNKNOWN, VSCOPE_LOCAL, VSCOPE_GLOBAL, VSCOPE_REGISTER, VSCOPE_OPTIMIZED } __attribute__((packed)); struct location { Dwarf_Op *expr; size_t exprlen; }; struct variable { struct ip_tag ip; strings_t name; uint8_t external:1; uint8_t declaration:1; enum vscope scope; struct location location; struct hlist_node tool_hnode; }; static inline struct variable *tag__variable(const struct tag *tag) { return (struct variable *)tag; } enum vscope variable__scope(const struct variable *var); const char *variable__scope_str(const struct variable *var); const char *variable__name(const struct variable *var, const struct cu *cu); const char *variable__type_name(const struct variable *var, const struct cu *cu, char *bf, size_t len); struct lexblock { struct ip_tag ip; struct list_head tags; uint32_t size; uint16_t nr_inline_expansions; uint16_t nr_labels; uint16_t nr_variables; uint16_t nr_lexblocks; uint32_t size_inline_expansions; }; static inline struct lexblock *tag__lexblock(const struct tag *tag) { return (struct lexblock *)tag; } void lexblock__delete(struct lexblock *lexblock, struct cu *cu); struct function; void lexblock__add_inline_expansion(struct lexblock *lexblock, struct inline_expansion *exp); void lexblock__add_label(struct lexblock *lexblock, struct label *label); void lexblock__add_lexblock(struct lexblock *lexblock, struct lexblock *child); void lexblock__add_tag(struct lexblock *lexblock, struct tag *tag); void lexblock__add_variable(struct lexblock *lexblock, struct variable *var); size_t lexblock__fprintf(const struct lexblock *lexblock, const struct cu *cu, struct function *function, uint16_t indent, const struct conf_fprintf *conf, FILE *fp); struct parameter { struct tag tag; strings_t name; }; static inline struct parameter *tag__parameter(const struct tag *tag) { return (struct parameter *)tag; } static inline const char *parameter__name(const struct parameter *parm, const struct cu *cu) { return cu__string(cu, parm->name); } /* * tag.tag can be DW_TAG_subprogram_type or DW_TAG_subroutine_type. */ struct ftype { struct tag tag; struct list_head parms; uint16_t nr_parms; uint8_t unspec_parms; /* just one bit is needed */ }; static inline struct ftype *tag__ftype(const struct tag *tag) { return (struct ftype *)tag; } void ftype__delete(struct ftype *ftype, struct cu *cu); /** * ftype__for_each_parameter - iterate thru all the parameters * @ftype: struct ftype instance to iterate * @pos: struct parameter iterator */ #define ftype__for_each_parameter(ftype, pos) \ list_for_each_entry(pos, &(ftype)->parms, tag.node) /** * ftype__for_each_parameter_safe - safely iterate thru all the parameters * @ftype: struct ftype instance to iterate * @pos: struct parameter iterator * @n: struct parameter temp iterator */ #define ftype__for_each_parameter_safe(ftype, pos, n) \ list_for_each_entry_safe(pos, n, &(ftype)->parms, tag.node) /** * ftype__for_each_parameter_safe_reverse - safely iterate thru all the parameters, in reverse order * @ftype: struct ftype instance to iterate * @pos: struct parameter iterator * @n: struct parameter temp iterator */ #define ftype__for_each_parameter_safe_reverse(ftype, pos, n) \ list_for_each_entry_safe_reverse(pos, n, &(ftype)->parms, tag.node) void ftype__add_parameter(struct ftype *ftype, struct parameter *parm); size_t ftype__fprintf(const struct ftype *ftype, const struct cu *cu, const char *name, const int inlined, const int is_pointer, const int type_spacing, const struct conf_fprintf *conf, FILE *fp); size_t ftype__fprintf_parms(const struct ftype *ftype, const struct cu *cu, int indent, const struct conf_fprintf *conf, FILE *fp); int ftype__has_parm_of_type(const struct ftype *ftype, const type_id_t target, const struct cu *cu); struct function { struct ftype proto; struct lexblock lexblock; struct rb_node rb_node; strings_t name; strings_t linkage_name; uint32_t cu_total_size_inline_expansions; uint16_t cu_total_nr_inline_expansions; uint8_t inlined:2; uint8_t abstract_origin:1; uint8_t external:1; uint8_t accessibility:2; /* DW_ACCESS_{public,protected,private} */ uint8_t virtuality:2; /* DW_VIRTUALITY_{none,virtual,pure_virtual} */ int32_t vtable_entry; struct list_head vtable_node; /* fields used by tools */ union { struct list_head tool_node; struct hlist_node tool_hnode; }; void *priv; }; static inline struct function *tag__function(const struct tag *tag) { return (struct function *)tag; } static inline struct tag *function__tag(const struct function *func) { return (struct tag *)func; } void function__delete(struct function *func, struct cu *cu); static __pure inline int tag__is_function(const struct tag *tag) { return tag->tag == DW_TAG_subprogram; } /** * function__for_each_parameter - iterate thru all the parameters * @func: struct function instance to iterate * @pos: struct parameter iterator */ #define function__for_each_parameter(func, pos) \ ftype__for_each_parameter(&func->proto, pos) const char *function__name(struct function *func, const struct cu *cu); static inline const char *function__linkage_name(const struct function *func, const struct cu *cu) { return cu__string(cu, func->linkage_name); } size_t function__fprintf_stats(const struct tag *tag_func, const struct cu *cu, const struct conf_fprintf *conf, FILE *fp); const char *function__prototype(const struct function *func, const struct cu *cu, char *bf, size_t len); static __pure inline uint64_t function__addr(const struct function *func) { return func->lexblock.ip.addr; } static __pure inline uint32_t function__size(const struct function *func) { return func->lexblock.size; } static inline int function__declared_inline(const struct function *func) { return (func->inlined == DW_INL_declared_inlined || func->inlined == DW_INL_declared_not_inlined); } static inline int function__inlined(const struct function *func) { return (func->inlined == DW_INL_inlined || func->inlined == DW_INL_declared_inlined); } /* struct class_member - struct, union, class member * * @bit_offset - offset in bits from the start of the struct * @bit_size - cached bit size, can be smaller than the integral type if in a bitfield * @byte_offset - offset in bytes from the start of the struct * @byte_size - cached byte size, integral type byte size for bitfields * @bitfield_offset - offset in the current bitfield * @bitfield_size - size in the current bitfield * @bit_hole - If there is a bit hole before the next one (or the end of the struct) * @bitfield_end - Is this the last entry in a bitfield? * @alignment - DW_AT_alignement, zero if not present, gcc emits since circa 7.3.1 * @accessibility - DW_ACCESS_{public,protected,private} * @virtuality - DW_VIRTUALITY_{none,virtual,pure_virtual} * @hole - If there is a hole before the next one (or the end of the struct) */ struct class_member { struct tag tag; strings_t name; uint32_t bit_offset; uint32_t bit_size; uint32_t byte_offset; size_t byte_size; int8_t bitfield_offset; uint8_t bitfield_size; uint8_t bit_hole; uint8_t bitfield_end:1; uint64_t const_value; uint32_t alignment; uint8_t visited:1; uint8_t is_static:1; uint8_t accessibility:2; uint8_t virtuality:2; uint16_t hole; }; void class_member__delete(struct class_member *member, struct cu *cu); static inline struct class_member *tag__class_member(const struct tag *tag) { return (struct class_member *)tag; } static inline const char *class_member__name(const struct class_member *member, const struct cu *cu) { return cu__string(cu, member->name); } static __pure inline int tag__is_class_member(const struct tag *tag) { return tag->tag == DW_TAG_member; } /** * struct type - base type for enumerations, structs and unions * * @nnr_members: number of non static DW_TAG_member entries * @nr_static_members: number of static DW_TAG_member entries * @nr_tags: number of tags * @alignment: DW_AT_alignement, zero if not present, gcc emits since circa 7.3.1 * @natural_alignment: For inferring __packed__, normally the widest scalar in it, recursively */ struct type { struct namespace namespace; struct list_head node; uint32_t size; int32_t size_diff; uint16_t nr_static_members; uint16_t nr_members; uint32_t alignment; uint16_t natural_alignment; bool packed_attributes_inferred; uint8_t declaration; /* only one bit used */ uint8_t definition_emitted:1; uint8_t fwd_decl_emitted:1; uint8_t resized:1; }; static inline struct class *type__class(const struct type *type) { return (struct class *)type; } static inline struct tag *type__tag(const struct type *type) { return (struct tag *)type; } void type__delete(struct type *type, struct cu *cu); /** * type__for_each_tag - iterate thru all the tags * @type: struct type instance to iterate * @pos: struct tag iterator */ #define type__for_each_tag(type, pos) \ list_for_each_entry(pos, &(type)->namespace.tags, node) /** * type__for_each_enumerator - iterate thru the enumerator entries * @type: struct type instance to iterate * @pos: struct enumerator iterator */ #define type__for_each_enumerator(type, pos) \ struct list_head *__type__for_each_enumerator_head = \ (type)->namespace.shared_tags ? \ (type)->namespace.tags.next : \ &(type)->namespace.tags; \ list_for_each_entry(pos, __type__for_each_enumerator_head, tag.node) /** * type__for_each_enumerator_safe_reverse - safely iterate thru the enumerator entries, in reverse order * @type: struct type instance to iterate * @pos: struct enumerator iterator * @n: struct enumerator temp iterator */ #define type__for_each_enumerator_safe_reverse(type, pos, n) \ if ((type)->namespace.shared_tags) /* Do nothing */ ; else \ list_for_each_entry_safe_reverse(pos, n, &(type)->namespace.tags, tag.node) /** * type__for_each_member - iterate thru the entries that use space * (data members and inheritance entries) * @type: struct type instance to iterate * @pos: struct class_member iterator */ #define type__for_each_member(type, pos) \ list_for_each_entry(pos, &(type)->namespace.tags, tag.node) \ if (!(pos->tag.tag == DW_TAG_member || \ pos->tag.tag == DW_TAG_inheritance)) \ continue; \ else /** * type__for_each_data_member - iterate thru the data member entries * @type: struct type instance to iterate * @pos: struct class_member iterator */ #define type__for_each_data_member(type, pos) \ list_for_each_entry(pos, &(type)->namespace.tags, tag.node) \ if (pos->tag.tag != DW_TAG_member) \ continue; \ else /** * type__for_each_member_safe - safely iterate thru the entries that use space * (data members and inheritance entries) * @type: struct type instance to iterate * @pos: struct class_member iterator * @n: struct class_member temp iterator */ #define type__for_each_member_safe(type, pos, n) \ list_for_each_entry_safe(pos, n, &(type)->namespace.tags, tag.node) \ if (pos->tag.tag != DW_TAG_member) \ continue; \ else /** * type__for_each_data_member_safe - safely iterate thru the data member entries * @type: struct type instance to iterate * @pos: struct class_member iterator * @n: struct class_member temp iterator */ #define type__for_each_data_member_safe(type, pos, n) \ list_for_each_entry_safe(pos, n, &(type)->namespace.tags, tag.node) \ if (pos->tag.tag != DW_TAG_member) \ continue; \ else /** * type__for_each_tag_safe_reverse - safely iterate thru all tags in a type, in reverse order * @type: struct type instance to iterate * @pos: struct class_member iterator * @n: struct class_member temp iterator */ #define type__for_each_tag_safe_reverse(type, pos, n) \ list_for_each_entry_safe_reverse(pos, n, &(type)->namespace.tags, tag.node) void type__add_member(struct type *type, struct class_member *member); struct class_member * type__find_first_biggest_size_base_type_member(struct type *type, const struct cu *cu); struct class_member *type__find_member_by_name(const struct type *type, const struct cu *cu, const char *name); uint32_t type__nr_members_of_type(const struct type *type, const type_id_t oftype); struct class_member *type__last_member(struct type *type); size_t typedef__fprintf(const struct tag *tag_type, const struct cu *cu, const struct conf_fprintf *conf, FILE *fp); static inline struct type *tag__type(const struct tag *tag) { return (struct type *)tag; } struct class { struct type type; struct list_head vtable; uint16_t nr_vtable_entries; uint8_t nr_holes; uint8_t nr_bit_holes; uint16_t pre_hole; uint16_t padding; uint8_t pre_bit_hole; uint8_t bit_padding; bool holes_searched; bool is_packed; void *priv; }; static inline struct class *tag__class(const struct tag *tag) { return (struct class *)tag; } static inline struct tag *class__tag(const struct class *cls) { return (struct tag *)cls; } struct class *class__clone(const struct class *from, const char *new_class_name, struct cu *cu); void class__delete(struct class *cls, struct cu *cu); static inline struct list_head *class__tags(struct class *cls) { return &cls->type.namespace.tags; } static __pure inline const char *namespace__name(const struct namespace *nspace, const struct cu *cu) { return nspace->sname ?: cu__string(cu, nspace->name); } static __pure inline const char *type__name(const struct type *type, const struct cu *cu) { return namespace__name(&type->namespace, cu); } static __pure inline const char *class__name(struct class *cls, const struct cu *cu) { return type__name(&cls->type, cu); } static inline int class__is_struct(const struct class *cls) { return tag__is_struct(&cls->type.namespace.tag); } void class__find_holes(struct class *cls); int class__has_hole_ge(const struct class *cls, const uint16_t size); bool class__infer_packed_attributes(struct class *cls, const struct cu *cu); void union__infer_packed_attributes(struct type *type, const struct cu *cu); void type__check_structs_at_unnatural_alignments(struct type *type, const struct cu *cu); size_t class__fprintf(struct class *cls, const struct cu *cu, FILE *fp); void class__add_vtable_entry(struct class *cls, struct function *vtable_entry); static inline struct class_member * class__find_member_by_name(const struct class *cls, const struct cu *cu, const char *name) { return type__find_member_by_name(&cls->type, cu, name); } static inline uint16_t class__nr_members(const struct class *cls) { return cls->type.nr_members; } static inline uint32_t class__size(const struct class *cls) { return cls->type.size; } static inline int class__is_declaration(const struct class *cls) { return cls->type.declaration; } const struct class_member *class__find_bit_hole(const struct class *cls, const struct class_member *trailer, const uint16_t bit_hole_size); #define class__for_each_member_from(cls, from, pos) \ pos = list_prepare_entry(from, class__tags(cls), tag.node); \ list_for_each_entry_from(pos, class__tags(cls), tag.node) \ if (!tag__is_class_member(&pos->tag)) \ continue; \ else #define class__for_each_member_safe_from(cls, from, pos, tmp) \ pos = list_prepare_entry(from, class__tags(cls), tag.node); \ list_for_each_entry_safe_from(pos, tmp, class__tags(cls), tag.node) \ if (!tag__is_class_member(&pos->tag)) \ continue; \ else #define class__for_each_member_continue(cls, from, pos) \ pos = list_prepare_entry(from, class__tags(cls), tag.node); \ list_for_each_entry_continue(pos, class__tags(cls), tag.node) \ if (!tag__is_class_member(&pos->tag)) \ continue; \ else #define class__for_each_member_reverse(cls, member) \ list_for_each_entry_reverse(member, class__tags(cls), tag.node) \ if (member->tag.tag != DW_TAG_member) \ continue; \ else enum base_type_float_type { BT_FP_SINGLE = 1, BT_FP_DOUBLE, BT_FP_CMPLX, BT_FP_CMPLX_DBL, BT_FP_CMPLX_LDBL, BT_FP_LDBL, BT_FP_INTVL, BT_FP_INTVL_DBL, BT_FP_INTVL_LDBL, BT_FP_IMGRY, BT_FP_IMGRY_DBL, BT_FP_IMGRY_LDBL }; struct base_type { struct tag tag; strings_t name; uint16_t bit_size; uint8_t name_has_encoding:1; uint8_t is_signed:1; uint8_t is_bool:1; uint8_t is_varargs:1; uint8_t float_type:4; }; static inline struct base_type *tag__base_type(const struct tag *tag) { return (struct base_type *)tag; } static inline uint16_t base_type__size(const struct tag *tag) { return tag__base_type(tag)->bit_size / 8; } const char *base_type__name(const struct base_type *btype, const struct cu *cu, char *bf, size_t len); void base_type_name_to_size_table__init(struct strings *strings); size_t base_type__name_to_size(struct base_type *btype, struct cu *cu); struct array_type { struct tag tag; uint32_t *nr_entries; uint8_t dimensions; bool is_vector; }; static inline struct array_type *tag__array_type(const struct tag *tag) { return (struct array_type *)tag; } struct enumerator { struct tag tag; strings_t name; uint32_t value; }; static inline const char *enumerator__name(const struct enumerator *enumerator, const struct cu *cu) { return cu__string(cu, enumerator->name); } void enumeration__delete(struct type *type, struct cu *cu); void enumeration__add(struct type *type, struct enumerator *enumerator); size_t enumeration__fprintf(const struct tag *tag_enum, const struct cu *cu, const struct conf_fprintf *conf, FILE *fp); int dwarves__init(uint16_t user_cacheline_size); void dwarves__exit(void); const char *dwarf_tag_name(const uint32_t tag); struct argp_state; void dwarves_print_version(FILE *fp, struct argp_state *state); extern bool no_bitfield_type_recode; #endif /* _DWARVES_H_ */ dwarves-dfsg-1.15/dwarves_emit.c000066400000000000000000000213351350511416500166760ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only Copyright (C) 2006 Mandriva Conectiva S.A. Copyright (C) 2006 Arnaldo Carvalho de Melo Copyright (C) 2007 Red Hat Inc. Copyright (C) 2007 Arnaldo Carvalho de Melo */ #include #include "list.h" #include "dwarves_emit.h" #include "dwarves.h" void type_emissions__init(struct type_emissions *emissions) { INIT_LIST_HEAD(&emissions->definitions); INIT_LIST_HEAD(&emissions->fwd_decls); } static void type_emissions__add_definition(struct type_emissions *emissions, struct type *type) { type->definition_emitted = 1; if (!list_empty(&type->node)) list_del(&type->node); list_add_tail(&type->node, &emissions->definitions); } static void type_emissions__add_fwd_decl(struct type_emissions *emissions, struct type *type) { type->fwd_decl_emitted = 1; if (list_empty(&type->node)) list_add_tail(&type->node, &emissions->fwd_decls); } struct type *type_emissions__find_definition(const struct type_emissions *emissions, const struct cu *cu, const char *name) { struct type *pos; if (name == NULL) return NULL; list_for_each_entry(pos, &emissions->definitions, node) if (type__name(pos, cu) != NULL && strcmp(type__name(pos, cu), name) == 0) return pos; return NULL; } static struct type *type_emissions__find_fwd_decl(const struct type_emissions *emissions, const struct cu *cu, const char *name) { struct type *pos; if (name == NULL) return NULL; list_for_each_entry(pos, &emissions->fwd_decls, node) { const char *curr_name = type__name(pos, cu); if (curr_name && strcmp(curr_name, name) == 0) return pos; } return NULL; } static int enumeration__emit_definitions(struct tag *tag, struct cu *cu, struct type_emissions *emissions, const struct conf_fprintf *conf, FILE *fp) { struct type *etype = tag__type(tag); /* Have we already emitted this in this CU? */ if (etype->definition_emitted) return 0; /* Ok, lets look at the previous CUs: */ if (type_emissions__find_definition(emissions, cu, type__name(etype, cu)) != NULL) { /* * Yes, so lets mark it visited on this CU too, * to speed up the lookup. */ etype->definition_emitted = 1; return 0; } enumeration__fprintf(tag, cu, conf, fp); fputs(";\n", fp); type_emissions__add_definition(emissions, etype); return 1; } static int tag__emit_definitions(struct tag *tag, struct cu *cu, struct type_emissions *emissions, FILE *fp); static int typedef__emit_definitions(struct tag *tdef, struct cu *cu, struct type_emissions *emissions, FILE *fp) { struct type *def = tag__type(tdef); struct tag *type, *ptr_type; int is_pointer = 0; /* Have we already emitted this in this CU? */ if (def->definition_emitted) return 0; /* Ok, lets look at the previous CUs: */ if (type_emissions__find_definition(emissions, cu, type__name(def, cu)) != NULL) { /* * Yes, so lets mark it visited on this CU too, * to speed up the lookup. */ def->definition_emitted = 1; return 0; } type = cu__type(cu, tdef->type); tag__assert_search_result(type); switch (type->tag) { case DW_TAG_array_type: tag__emit_definitions(type, cu, emissions, fp); break; case DW_TAG_typedef: typedef__emit_definitions(type, cu, emissions, fp); break; case DW_TAG_pointer_type: ptr_type = cu__type(cu, type->type); /* void ** can make ptr_type be NULL */ if (ptr_type == NULL) break; if (ptr_type->tag == DW_TAG_typedef) { typedef__emit_definitions(ptr_type, cu, emissions, fp); break; } else if (ptr_type->tag != DW_TAG_subroutine_type) break; type = ptr_type; is_pointer = 1; /* Fall thru */ case DW_TAG_subroutine_type: ftype__emit_definitions(tag__ftype(type), cu, emissions, fp); break; case DW_TAG_enumeration_type: { struct type *ctype = tag__type(type); struct conf_fprintf conf = { .suffix = NULL, }; if (type__name(ctype, cu) == NULL) { fputs("typedef ", fp); conf.suffix = type__name(def, cu); enumeration__emit_definitions(type, cu, emissions, &conf, fp); goto out; } else enumeration__emit_definitions(type, cu, emissions, &conf, fp); } break; case DW_TAG_structure_type: case DW_TAG_union_type: { struct type *ctype = tag__type(type); if (type__name(ctype, cu) == NULL) { if (type__emit_definitions(type, cu, emissions, fp)) type__emit(type, cu, "typedef", type__name(def, cu), fp); goto out; } else if (type__emit_definitions(type, cu, emissions, fp)) type__emit(type, cu, NULL, NULL, fp); } } /* * Recheck if the typedef was emitted, as there are cases, like * wait_queue_t in the Linux kernel, that is against struct * __wait_queue, that has a wait_queue_func_t member, a function * typedef that has as one of its parameters a... wait_queue_t, that * will thus be emitted before the function typedef, making a no go to * redefine the typedef after struct __wait_queue. */ if (!def->definition_emitted) { typedef__fprintf(tdef, cu, NULL, fp); fputs(";\n", fp); } out: type_emissions__add_definition(emissions, def); return 1; } int type__emit_fwd_decl(struct type *ctype, const struct cu *cu, struct type_emissions *emissions, FILE *fp) { /* Have we already emitted this in this CU? */ if (ctype->fwd_decl_emitted) return 0; const char *name = type__name(ctype, cu); if (name == NULL) return 0; /* Ok, lets look at the previous CUs: */ if (type_emissions__find_fwd_decl(emissions, cu, name) != NULL) { /* * Yes, so lets mark it visited on this CU too, * to speed up the lookup. */ ctype->fwd_decl_emitted = 1; return 0; } fprintf(fp, "%s %s;\n", tag__is_union(&ctype->namespace.tag) ? "union" : "struct", type__name(ctype, cu)); type_emissions__add_fwd_decl(emissions, ctype); return 1; } static int tag__emit_definitions(struct tag *tag, struct cu *cu, struct type_emissions *emissions, FILE *fp) { struct tag *type = cu__type(cu, tag->type); int pointer = 0; if (type == NULL) return 0; next_indirection: switch (type->tag) { case DW_TAG_pointer_type: case DW_TAG_reference_type: pointer = 1; /* Fall thru */ case DW_TAG_array_type: case DW_TAG_const_type: case DW_TAG_volatile_type: type = cu__type(cu, type->type); if (type == NULL) return 0; goto next_indirection; case DW_TAG_typedef: return typedef__emit_definitions(type, cu, emissions, fp); case DW_TAG_enumeration_type: if (type__name(tag__type(type), cu) != NULL) { struct conf_fprintf conf = { .suffix = NULL, }; return enumeration__emit_definitions(type, cu, emissions, &conf, fp); } break; case DW_TAG_structure_type: case DW_TAG_union_type: if (pointer) { /* * Struct defined inline, no name, need to have its * members types emitted. */ if (type__name(tag__type(type), cu) == NULL) type__emit_definitions(type, cu, emissions, fp); return type__emit_fwd_decl(tag__type(type), cu, emissions, fp); } if (type__emit_definitions(type, cu, emissions, fp)) type__emit(type, cu, NULL, NULL, fp); return 1; case DW_TAG_subroutine_type: return ftype__emit_definitions(tag__ftype(type), cu, emissions, fp); } return 0; } int ftype__emit_definitions(struct ftype *ftype, struct cu *cu, struct type_emissions *emissions, FILE *fp) { struct parameter *pos; /* First check the function return type */ int printed = tag__emit_definitions(&ftype->tag, cu, emissions, fp); /* Then its parameters */ list_for_each_entry(pos, &ftype->parms, tag.node) if (tag__emit_definitions(&pos->tag, cu, emissions, fp)) printed = 1; if (printed) fputc('\n', fp); return printed; } int type__emit_definitions(struct tag *tag, struct cu *cu, struct type_emissions *emissions, FILE *fp) { struct type *ctype = tag__type(tag); struct class_member *pos; if (ctype->definition_emitted) return 0; /* Ok, lets look at the previous CUs: */ if (type_emissions__find_definition(emissions, cu, type__name(ctype, cu)) != NULL) { ctype->definition_emitted = 1; return 0; } if (tag__is_typedef(tag)) return typedef__emit_definitions(tag, cu, emissions, fp); type_emissions__add_definition(emissions, ctype); type__check_structs_at_unnatural_alignments(ctype, cu); type__for_each_member(ctype, pos) if (tag__emit_definitions(&pos->tag, cu, emissions, fp)) fputc('\n', fp); return 1; } void type__emit(struct tag *tag, struct cu *cu, const char *prefix, const char *suffix, FILE *fp) { struct type *ctype = tag__type(tag); if (type__name(ctype, cu) != NULL || suffix != NULL || prefix != NULL) { struct conf_fprintf conf = { .prefix = prefix, .suffix = suffix, .emit_stats = 1, }; tag__fprintf(tag, cu, &conf, fp); fputc('\n', fp); } } dwarves-dfsg-1.15/dwarves_emit.h000066400000000000000000000022601350511416500166770ustar00rootroot00000000000000#ifndef _DWARVES_EMIT_H_ #define _DWARVES_EMIT_H_ 1 /* SPDX-License-Identifier: GPL-2.0-only Copyright (C) 2006 Mandriva Conectiva S.A. Copyright (C) 2006 Arnaldo Carvalho de Melo Copyright (C) 2007 Arnaldo Carvalho de Melo */ #include #include "list.h" struct cu; struct ftype; struct tag; struct type; struct type_emissions { struct list_head definitions; /* struct type entries */ struct list_head fwd_decls; /* struct class entries */ }; void type_emissions__init(struct type_emissions *temissions); int ftype__emit_definitions(struct ftype *ftype, struct cu *cu, struct type_emissions *emissions, FILE *fp); int type__emit_definitions(struct tag *tag, struct cu *cu, struct type_emissions *emissions, FILE *fp); int type__emit_fwd_decl(struct type *ctype, const struct cu *cu, struct type_emissions *emissions, FILE *fp); void type__emit(struct tag *tag_type, struct cu *cu, const char *prefix, const char *suffix, FILE *fp); struct type *type_emissions__find_definition(const struct type_emissions *temissions, const struct cu *cu, const char *name); #endif /* _DWARVES_EMIT_H_ */ dwarves-dfsg-1.15/dwarves_fprintf.c000066400000000000000000001530451350511416500174140ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only Copyright (C) 2006 Mandriva Conectiva S.A. Copyright (C) 2006 Arnaldo Carvalho de Melo Copyright (C) 2007..2009 Red Hat Inc. Copyright (C) 2007..2009 Arnaldo Carvalho de Melo */ #include #include #include #include #include #include #include #include "config.h" #include "dwarves.h" static const char *dwarf_tag_names[] = { [DW_TAG_array_type] = "array_type", [DW_TAG_class_type] = "class_type", [DW_TAG_entry_point] = "entry_point", [DW_TAG_enumeration_type] = "enumeration_type", [DW_TAG_formal_parameter] = "formal_parameter", [DW_TAG_imported_declaration] = "imported_declaration", [DW_TAG_label] = "label", [DW_TAG_lexical_block] = "lexical_block", [DW_TAG_member] = "member", [DW_TAG_pointer_type] = "pointer_type", [DW_TAG_reference_type] = "reference_type", [DW_TAG_compile_unit] = "compile_unit", [DW_TAG_string_type] = "string_type", [DW_TAG_structure_type] = "structure_type", [DW_TAG_subroutine_type] = "subroutine_type", [DW_TAG_typedef] = "typedef", [DW_TAG_union_type] = "union_type", [DW_TAG_unspecified_parameters] = "unspecified_parameters", [DW_TAG_variant] = "variant", [DW_TAG_common_block] = "common_block", [DW_TAG_common_inclusion] = "common_inclusion", [DW_TAG_inheritance] = "inheritance", [DW_TAG_inlined_subroutine] = "inlined_subroutine", [DW_TAG_module] = "module", [DW_TAG_ptr_to_member_type] = "ptr_to_member_type", [DW_TAG_set_type] = "set_type", [DW_TAG_subrange_type] = "subrange_type", [DW_TAG_with_stmt] = "with_stmt", [DW_TAG_access_declaration] = "access_declaration", [DW_TAG_base_type] = "base_type", [DW_TAG_catch_block] = "catch_block", [DW_TAG_const_type] = "const_type", [DW_TAG_constant] = "constant", [DW_TAG_enumerator] = "enumerator", [DW_TAG_file_type] = "file_type", [DW_TAG_friend] = "friend", [DW_TAG_namelist] = "namelist", [DW_TAG_namelist_item] = "namelist_item", [DW_TAG_packed_type] = "packed_type", [DW_TAG_subprogram] = "subprogram", [DW_TAG_template_type_parameter] = "template_type_parameter", [DW_TAG_template_value_parameter] = "template_value_parameter", [DW_TAG_thrown_type] = "thrown_type", [DW_TAG_try_block] = "try_block", [DW_TAG_variant_part] = "variant_part", [DW_TAG_variable] = "variable", [DW_TAG_volatile_type] = "volatile_type", [DW_TAG_dwarf_procedure] = "dwarf_procedure", [DW_TAG_restrict_type] = "restrict_type", [DW_TAG_interface_type] = "interface_type", [DW_TAG_namespace] = "namespace", [DW_TAG_imported_module] = "imported_module", [DW_TAG_unspecified_type] = "unspecified_type", [DW_TAG_partial_unit] = "partial_unit", [DW_TAG_imported_unit] = "imported_unit", [DW_TAG_condition] = "condition", [DW_TAG_shared_type] = "shared_type", #ifdef STB_GNU_UNIQUE [DW_TAG_type_unit] = "type_unit", [DW_TAG_rvalue_reference_type] = "rvalue_reference_type", #endif }; static const char *dwarf_gnu_tag_names[] = { [DW_TAG_MIPS_loop - DW_TAG_MIPS_loop] = "MIPS_loop", [DW_TAG_format_label - DW_TAG_MIPS_loop] = "format_label", [DW_TAG_function_template - DW_TAG_MIPS_loop] = "function_template", [DW_TAG_class_template - DW_TAG_MIPS_loop] = "class_template", #ifdef STB_GNU_UNIQUE [DW_TAG_GNU_BINCL - DW_TAG_MIPS_loop] = "GNU_BINCL", [DW_TAG_GNU_EINCL - DW_TAG_MIPS_loop] = "GNU_EINCL", [DW_TAG_GNU_template_template_param - DW_TAG_MIPS_loop] = "GNU_template_template_param", [DW_TAG_GNU_template_parameter_pack - DW_TAG_MIPS_loop] = "GNU_template_parameter_pack", [DW_TAG_GNU_formal_parameter_pack - DW_TAG_MIPS_loop] = "GNU_formal_parameter_pack", #endif #if _ELFUTILS_PREREQ(0, 153) [DW_TAG_GNU_call_site - DW_TAG_MIPS_loop] = "GNU_call_site", [DW_TAG_GNU_call_site_parameter - DW_TAG_MIPS_loop] = "GNU_call_site_parameter", #endif }; const char *dwarf_tag_name(const uint32_t tag) { if (tag >= DW_TAG_array_type && tag <= #ifdef STB_GNU_UNIQUE DW_TAG_rvalue_reference_type #else DW_TAG_shared_type #endif ) return dwarf_tag_names[tag]; else if (tag >= DW_TAG_MIPS_loop && tag <= #if _ELFUTILS_PREREQ(0, 153) DW_TAG_GNU_call_site_parameter #elif STB_GNU_UNIQUE DW_TAG_GNU_formal_parameter_pack #else DW_TAG_class_template #endif ) return dwarf_gnu_tag_names[tag - DW_TAG_MIPS_loop]; return "INVALID"; } static const struct conf_fprintf conf_fprintf__defaults = { .name_spacing = 23, .type_spacing = 26, .emit_stats = 1, }; static const char tabs[] = "\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t"; static size_t cacheline_size; size_t tag__nr_cachelines(const struct tag *tag, const struct cu *cu) { return (tag__size(tag, cu) + cacheline_size - 1) / cacheline_size; } static const char *tag__accessibility(const struct tag *tag) { int a; switch (tag->tag) { case DW_TAG_inheritance: case DW_TAG_member: a = tag__class_member(tag)->accessibility; break; case DW_TAG_subprogram: a = tag__function(tag)->accessibility; break; default: return NULL; } switch (a) { case DW_ACCESS_public: return "public"; case DW_ACCESS_private: return "private"; case DW_ACCESS_protected: return "protected"; } return NULL; } static size_t __tag__id_not_found_snprintf(char *bf, size_t len, uint32_t id, const char *fn, int line) { return snprintf(bf, len, "", fn, line, (unsigned long long)id); } #define tag__id_not_found_snprintf(bf, len, id) \ __tag__id_not_found_snprintf(bf, len, id, __func__, __LINE__) size_t tag__fprintf_decl_info(const struct tag *tag, const struct cu *cu, FILE *fp) { return fprintf(fp, "/* <%llx> %s:%u */\n", tag__orig_id(tag, cu), tag__decl_file(tag, cu), tag__decl_line(tag, cu)); return 0; } static size_t type__fprintf(struct tag *type, const struct cu *cu, const char *name, const struct conf_fprintf *conf, FILE *fp); static size_t array_type__fprintf(const struct tag *tag, const struct cu *cu, const char *name, const struct conf_fprintf *conf, FILE *fp) { struct array_type *at = tag__array_type(tag); struct tag *type = cu__type(cu, tag->type); size_t printed; unsigned long long flat_dimensions = 0; int i; if (type == NULL) return tag__id_not_found_fprintf(fp, tag->type); /* Zero sized arrays? */ if (at->dimensions >= 1 && at->nr_entries[0] == 0 && tag__is_const(type)) type = cu__type(cu, type->type); printed = type__fprintf(type, cu, name, conf, fp); for (i = 0; i < at->dimensions; ++i) { if (conf->flat_arrays || at->is_vector) { /* * Seen on the Linux kernel on tun_filter: * * __u8 addr[0][ETH_ALEN]; */ if (at->nr_entries[i] == 0 && i == 0) break; if (!flat_dimensions) flat_dimensions = at->nr_entries[i]; else flat_dimensions *= at->nr_entries[i]; } else { bool single_member = conf->last_member && conf->first_member; if (at->nr_entries[i] != 0 || !conf->last_member || single_member || conf->union_member) printed += fprintf(fp, "[%u]", at->nr_entries[i]); else printed += fprintf(fp, "[]"); } } if (at->is_vector) { type = tag__follow_typedef(tag, cu); if (flat_dimensions == 0) flat_dimensions = 1; printed += fprintf(fp, " __attribute__ ((__vector_size__ (%llu)))", flat_dimensions * tag__size(type, cu)); } else if (conf->flat_arrays) { bool single_member = conf->last_member && conf->first_member; if (flat_dimensions != 0 || !conf->last_member || single_member || conf->union_member) printed += fprintf(fp, "[%llu]", flat_dimensions); else printed += fprintf(fp, "[]"); } return printed; } size_t typedef__fprintf(const struct tag *tag, const struct cu *cu, const struct conf_fprintf *conf, FILE *fp) { struct type *type = tag__type(tag); const struct conf_fprintf *pconf = conf ?: &conf_fprintf__defaults; const struct tag *tag_type; const struct tag *ptr_type; char bf[512]; int is_pointer = 0; size_t printed; /* * Check for void (humm, perhaps we should have a fake void tag instance * to avoid all these checks? */ if (tag->type == 0) return fprintf(fp, "typedef void %s", type__name(type, cu)); tag_type = cu__type(cu, tag->type); if (tag_type == NULL) { printed = fprintf(fp, "typedef "); printed += tag__id_not_found_fprintf(fp, tag->type); return printed + fprintf(fp, " %s", type__name(type, cu)); } switch (tag_type->tag) { case DW_TAG_array_type: printed = fprintf(fp, "typedef "); return printed + array_type__fprintf(tag_type, cu, type__name(type, cu), pconf, fp); case DW_TAG_pointer_type: if (tag_type->type == 0) /* void pointer */ break; ptr_type = cu__type(cu, tag_type->type); if (ptr_type == NULL) { printed = fprintf(fp, "typedef "); printed += tag__id_not_found_fprintf(fp, tag_type->type); return printed + fprintf(fp, " *%s", type__name(type, cu)); } if (ptr_type->tag != DW_TAG_subroutine_type) break; tag_type = ptr_type; is_pointer = 1; /* Fall thru */ case DW_TAG_subroutine_type: printed = fprintf(fp, "typedef "); return printed + ftype__fprintf(tag__ftype(tag_type), cu, type__name(type, cu), 0, is_pointer, 0, pconf, fp); case DW_TAG_class_type: case DW_TAG_structure_type: { struct type *ctype = tag__type(tag_type); if (type__name(ctype, cu) != NULL) return fprintf(fp, "typedef struct %s %s", type__name(ctype, cu), type__name(type, cu)); } } return fprintf(fp, "typedef %s %s", tag__name(tag_type, cu, bf, sizeof(bf), pconf), type__name(type, cu)); } static size_t imported_declaration__fprintf(const struct tag *tag, const struct cu *cu, FILE *fp) { char bf[BUFSIZ]; size_t printed = fprintf(fp, "using ::"); const struct tag *decl = cu__function(cu, tag->type); if (decl == NULL) { decl = cu__tag(cu, tag->type); if (decl == NULL) return printed + tag__id_not_found_fprintf(fp, tag->type); } return printed + fprintf(fp, "%s", tag__name(decl, cu, bf, sizeof(bf), NULL)); } static size_t imported_module__fprintf(const struct tag *tag, const struct cu *cu, FILE *fp) { const struct tag *module = cu__tag(cu, tag->type); const char *name = ""; if (tag__is_namespace(module)) name = namespace__name(tag__namespace(module), cu); return fprintf(fp, "using namespace %s", name); } size_t enumeration__fprintf(const struct tag *tag, const struct cu *cu, const struct conf_fprintf *conf, FILE *fp) { struct type *type = tag__type(tag); struct enumerator *pos; size_t printed = fprintf(fp, "enum%s%s {\n", type__name(type, cu) ? " " : "", type__name(type, cu) ?: ""); int indent = conf->indent; if (indent >= (int)sizeof(tabs)) indent = sizeof(tabs) - 1; type__for_each_enumerator(type, pos) printed += fprintf(fp, "%.*s\t%s = %u,\n", indent, tabs, enumerator__name(pos, cu), pos->value); printed += fprintf(fp, "%.*s}", indent, tabs); /* * XXX: find out how to precisely determine the max size for an * enumeration, use sizeof(int) for now. */ if (type->size / 8 != sizeof(int)) printed += fprintf(fp, " __attribute__((__packed__))", conf->suffix); if (conf->suffix) printed += fprintf(fp, " %s", conf->suffix); return printed; } static const char *tag__prefix(const struct cu *cu, const uint32_t tag, const struct conf_fprintf *conf) { switch (tag) { case DW_TAG_enumeration_type: return "enum "; case DW_TAG_structure_type: return (!conf->classes_as_structs && cu->language == DW_LANG_C_plus_plus) ? "class " : "struct "; case DW_TAG_class_type: return conf->classes_as_structs ? "struct " : "class "; case DW_TAG_union_type: return "union "; case DW_TAG_pointer_type: return " *"; case DW_TAG_reference_type: return " &"; } return ""; } static const char *__tag__name(const struct tag *tag, const struct cu *cu, char *bf, size_t len, const struct conf_fprintf *conf); static const char *tag__ptr_name(const struct tag *tag, const struct cu *cu, char *bf, size_t len, const char *ptr_suffix) { if (tag->type == 0) /* No type == void */ snprintf(bf, len, "void %s", ptr_suffix); else { const struct tag *type = cu__type(cu, tag->type); if (type == NULL) { size_t l = tag__id_not_found_snprintf(bf, len, tag->type); snprintf(bf + l, len - l, " %s", ptr_suffix); } else if (!tag__has_type_loop(tag, type, bf, len, NULL)) { char tmpbf[1024]; const char *const_pointer = ""; if (tag__is_const(type)) { struct tag *next_type = cu__type(cu, type->type); if (next_type && tag__is_pointer(next_type)) { const_pointer = "const "; type = next_type; } } snprintf(bf, len, "%s %s%s", __tag__name(type, cu, tmpbf, sizeof(tmpbf), NULL), const_pointer, ptr_suffix); } } return bf; } static const char *__tag__name(const struct tag *tag, const struct cu *cu, char *bf, size_t len, const struct conf_fprintf *conf) { struct tag *type; const struct conf_fprintf *pconf = conf ?: &conf_fprintf__defaults; if (tag == NULL) strncpy(bf, "void", len); else switch (tag->tag) { case DW_TAG_base_type: { const struct base_type *bt = tag__base_type(tag); const char *name = "nameless base type!"; char bf2[64]; if (bt->name) name = base_type__name(tag__base_type(tag), cu, bf2, sizeof(bf2)); strncpy(bf, name, len); } break; case DW_TAG_subprogram: strncpy(bf, function__name(tag__function(tag), cu), len); break; case DW_TAG_pointer_type: return tag__ptr_name(tag, cu, bf, len, "*"); case DW_TAG_reference_type: return tag__ptr_name(tag, cu, bf, len, "&"); case DW_TAG_ptr_to_member_type: { char suffix[512]; type_id_t id = tag__ptr_to_member_type(tag)->containing_type; type = cu__type(cu, id); if (type != NULL) snprintf(suffix, sizeof(suffix), "%s::*", class__name(tag__class(type), cu)); else { size_t l = tag__id_not_found_snprintf(suffix, sizeof(suffix), id); snprintf(suffix + l, sizeof(suffix) - l, "::*"); } return tag__ptr_name(tag, cu, bf, len, suffix); } case DW_TAG_volatile_type: case DW_TAG_const_type: case DW_TAG_restrict_type: case DW_TAG_unspecified_type: type = cu__type(cu, tag->type); if (type == NULL && tag->type != 0) tag__id_not_found_snprintf(bf, len, tag->type); else if (!tag__has_type_loop(tag, type, bf, len, NULL)) { char tmpbf[128]; const char *prefix = "", *suffix = "", *type_str = __tag__name(type, cu, tmpbf, sizeof(tmpbf), pconf); switch (tag->tag) { case DW_TAG_volatile_type: prefix = "volatile "; break; case DW_TAG_const_type: prefix = "const "; break; case DW_TAG_restrict_type: suffix = " restrict"; break; } snprintf(bf, len, "%s%s%s ", prefix, type_str, suffix); } break; case DW_TAG_array_type: type = cu__type(cu, tag->type); if (type == NULL) tag__id_not_found_snprintf(bf, len, tag->type); else if (!tag__has_type_loop(tag, type, bf, len, NULL)) return __tag__name(type, cu, bf, len, pconf); break; case DW_TAG_subroutine_type: { FILE *bfp = fmemopen(bf, len, "w"); if (bfp != NULL) { ftype__fprintf(tag__ftype(tag), cu, NULL, 0, 0, 0, pconf, bfp); fclose(bfp); } else snprintf(bf, len, "", __func__); } break; case DW_TAG_member: snprintf(bf, len, "%s", class_member__name(tag__class_member(tag), cu)); break; case DW_TAG_variable: snprintf(bf, len, "%s", variable__name(tag__variable(tag), cu)); break; default: snprintf(bf, len, "%s%s", tag__prefix(cu, tag->tag, pconf), type__name(tag__type(tag), cu) ?: ""); break; } return bf; } const char *tag__name(const struct tag *tag, const struct cu *cu, char *bf, size_t len, const struct conf_fprintf *conf) { int printed = 0; if (tag == NULL) { strncpy(bf, "void", len); return bf; } __tag__name(tag, cu, bf + printed, len - printed, conf); return bf; } static const char *variable__prefix(const struct variable *var) { switch (variable__scope(var)) { case VSCOPE_REGISTER: return "register "; case VSCOPE_UNKNOWN: if (var->external && var->declaration) return "extern "; break; case VSCOPE_GLOBAL: if (!var->external) return "static "; break; case VSCOPE_LOCAL: case VSCOPE_OPTIMIZED: break; } return NULL; } static size_t type__fprintf_stats(struct type *type, const struct cu *cu, const struct conf_fprintf *conf, FILE *fp) { size_t printed = fprintf(fp, "\n%.*s/* size: %d, cachelines: %zd, members: %u", conf->indent, tabs, type->size, tag__nr_cachelines(type__tag(type), cu), type->nr_members); if (type->nr_static_members != 0) printed += fprintf(fp, ", static members: %u */\n", type->nr_static_members); else printed += fprintf(fp, " */\n"); return printed; } static size_t union__fprintf(struct type *type, const struct cu *cu, const struct conf_fprintf *conf, FILE *fp); static size_t __class__fprintf(struct class *class, const struct cu *cu, const struct conf_fprintf *conf, FILE *fp); static size_t type__fprintf(struct tag *type, const struct cu *cu, const char *name, const struct conf_fprintf *conf, FILE *fp) { char tbf[128]; char namebf[256]; char namebfptr[258]; struct type *ctype; struct conf_fprintf tconf; size_t printed = 0; int expand_types = conf->expand_types; int suppress_offset_comment = conf->suppress_offset_comment; if (type == NULL) goto out_type_not_found; if (conf->expand_pointers) { int nr_indirections = 0; while (tag__is_pointer(type) && type->type != 0) { struct tag *ttype = cu__type(cu, type->type); if (ttype == NULL) goto out_type_not_found; else { printed = tag__has_type_loop(type, ttype, NULL, 0, fp); if (printed) return printed; } type = ttype; ++nr_indirections; } if (nr_indirections > 0) { const size_t len = strlen(name); if (len + nr_indirections >= sizeof(namebf)) goto out_type_not_found; memset(namebf, '*', nr_indirections); memcpy(namebf + nr_indirections, name, len); namebf[len + nr_indirections] = '\0'; name = namebf; } expand_types = nr_indirections; if (!suppress_offset_comment) suppress_offset_comment = !!nr_indirections; /* Avoid loops */ if (type->recursivity_level != 0) expand_types = 0; ++type->recursivity_level; } if (expand_types) { int typedef_expanded = 0; while (tag__is_typedef(type)) { struct tag *type_type; int n; ctype = tag__type(type); if (typedef_expanded) printed += fprintf(fp, " -> %s", type__name(ctype, cu)); else { printed += fprintf(fp, "/* typedef %s", type__name(ctype, cu)); typedef_expanded = 1; } type_type = cu__type(cu, type->type); if (type_type == NULL) goto out_type_not_found; n = tag__has_type_loop(type, type_type, NULL, 0, fp); if (n) return printed + n; type = type_type; } if (typedef_expanded) printed += fprintf(fp, " */ "); } tconf = *conf; if (tag__is_struct(type) || tag__is_union(type) || tag__is_enumeration(type)) { inner_struct: tconf.type_spacing -= 8; tconf.prefix = NULL; tconf.suffix = name; tconf.emit_stats = 0; tconf.suppress_offset_comment = suppress_offset_comment; } next_type: switch (type->tag) { case DW_TAG_pointer_type: if (type->type != 0) { int n; struct tag *ptype = cu__type(cu, type->type); if (ptype == NULL) goto out_type_not_found; n = tag__has_type_loop(type, ptype, NULL, 0, fp); if (n) return printed + n; if (ptype->tag == DW_TAG_subroutine_type) { printed += ftype__fprintf(tag__ftype(ptype), cu, name, 0, 1, tconf.type_spacing, &tconf, fp); break; } if ((tag__is_struct(ptype) || tag__is_union(ptype) || tag__is_enumeration(ptype)) && type__name(tag__type(ptype), cu) == NULL) { snprintf(namebfptr, sizeof(namebfptr), "* %s", name); tconf.rel_offset = 1; name = namebfptr; type = ptype; goto inner_struct; } } /* Fall Thru */ default: print_default: printed += fprintf(fp, "%-*s %s", tconf.type_spacing, tag__name(type, cu, tbf, sizeof(tbf), &tconf), name); break; case DW_TAG_subroutine_type: printed += ftype__fprintf(tag__ftype(type), cu, name, 0, 0, tconf.type_spacing, &tconf, fp); break; case DW_TAG_const_type: { size_t const_printed = fprintf(fp, "%s ", "const"); tconf.type_spacing -= const_printed; printed += const_printed; } type = cu__type(cu, type->type); if (type) goto next_type; goto print_default; case DW_TAG_array_type: printed += array_type__fprintf(type, cu, name, &tconf, fp); break; case DW_TAG_class_type: case DW_TAG_structure_type: ctype = tag__type(type); if (type__name(ctype, cu) != NULL && !expand_types) { printed += fprintf(fp, "%s %-*s %s", (type->tag == DW_TAG_class_type && !tconf.classes_as_structs) ? "class" : "struct", tconf.type_spacing - 7, type__name(ctype, cu), name); } else { struct class *cclass = tag__class(type); if (!tconf.suppress_comments) class__find_holes(cclass); printed += __class__fprintf(cclass, cu, &tconf, fp); } break; case DW_TAG_union_type: ctype = tag__type(type); if (type__name(ctype, cu) != NULL && !expand_types) printed += fprintf(fp, "union %-*s %s", tconf.type_spacing - 6, type__name(ctype, cu), name); else printed += union__fprintf(ctype, cu, &tconf, fp); break; case DW_TAG_enumeration_type: ctype = tag__type(type); if (type__name(ctype, cu) != NULL) printed += fprintf(fp, "enum %-*s %s", tconf.type_spacing - 5, type__name(ctype, cu), name); else printed += enumeration__fprintf(type, cu, &tconf, fp); break; } out: if (tconf.expand_types) --type->recursivity_level; return printed; out_type_not_found: printed = fprintf(fp, "%-*s %s", tconf.type_spacing, "", name); goto out; } static size_t class__fprintf_cacheline_boundary(struct conf_fprintf *conf, uint32_t offset, FILE *fp); static size_t class_member__fprintf(struct class_member *member, bool union_member, struct tag *type, const struct cu *cu, struct conf_fprintf *conf, FILE *fp) { const int size = member->byte_size; struct conf_fprintf sconf = *conf; uint32_t offset = member->byte_offset; size_t printed = 0, printed_cacheline = 0; const char *cm_name = class_member__name(member, cu), *name = cm_name; if (!sconf.rel_offset) { offset += sconf.base_offset; if (!union_member) sconf.base_offset = offset; } if (member->bitfield_offset < 0) offset += member->byte_size; if (!conf->suppress_comments) printed_cacheline = class__fprintf_cacheline_boundary(conf, offset, fp); if (member->tag.tag == DW_TAG_inheritance) { name = ""; printed += fprintf(fp, "/* "); } if (member->is_static) printed += fprintf(fp, "static "); printed += type__fprintf(type, cu, name, &sconf, fp); if (member->is_static) { if (member->const_value != 0) printed += fprintf(fp, " = %" PRIu64, member->const_value); } else if (member->bitfield_size != 0) { printed += fprintf(fp, ":%u", member->bitfield_size); } if (!sconf.suppress_aligned_attribute && member->alignment != 0) printed += fprintf(fp, " __attribute__((__aligned__(%u)))", member->alignment); fputc(';', fp); ++printed; if ((tag__is_union(type) || tag__is_struct(type) || tag__is_enumeration(type)) && /* Look if is a type defined inline */ type__name(tag__type(type), cu) == NULL) { if (!sconf.suppress_offset_comment) { /* Check if this is a anonymous union */ int slen = cm_name ? (int)strlen(cm_name) : -1; int size_spacing = 5; if (tag__is_struct(type) && tag__class(type)->is_packed && !conf->suppress_packed) { int packed_len = sizeof("__attribute__((__packed__))"); slen += packed_len; } printed += fprintf(fp, sconf.hex_fmt ? "%*s/* %#5x" : "%*s/* %5u", (sconf.type_spacing + sconf.name_spacing - slen - 3), " ", offset); if (member->bitfield_size != 0) { unsigned int bitfield_offset = member->bitfield_offset; if (member->bitfield_offset < 0) bitfield_offset = member->byte_size * 8 + member->bitfield_offset; printed += fprintf(fp, sconf.hex_fmt ? ":%#2x" : ":%2u", bitfield_offset); size_spacing -= 3; } printed += fprintf(fp, sconf.hex_fmt ? " %#*x */" : " %*u */", size_spacing, size); } } else { int spacing = sconf.type_spacing + sconf.name_spacing - printed; if (member->tag.tag == DW_TAG_inheritance) { const size_t p = fprintf(fp, " */"); printed += p; spacing -= p; } if (!sconf.suppress_offset_comment) { int size_spacing = 5; printed += fprintf(fp, sconf.hex_fmt ? "%*s/* %#5x" : "%*s/* %5u", spacing > 0 ? spacing : 0, " ", offset); if (member->bitfield_size != 0) { unsigned int bitfield_offset = member->bitfield_offset; if (member->bitfield_offset < 0) bitfield_offset = member->byte_size * 8 + member->bitfield_offset; printed += fprintf(fp, sconf.hex_fmt ? ":%#2x" : ":%2u", bitfield_offset); size_spacing -= 3; } printed += fprintf(fp, sconf.hex_fmt ? " %#*x */" : " %*u */", size_spacing, size); } } return printed + printed_cacheline; } static size_t struct_member__fprintf(struct class_member *member, struct tag *type, const struct cu *cu, struct conf_fprintf *conf, FILE *fp) { return class_member__fprintf(member, false, type, cu, conf, fp); } static size_t union_member__fprintf(struct class_member *member, struct tag *type, const struct cu *cu, struct conf_fprintf *conf, FILE *fp) { return class_member__fprintf(member, true, type, cu, conf, fp); } static size_t union__fprintf(struct type *type, const struct cu *cu, const struct conf_fprintf *conf, FILE *fp) { struct class_member *pos; size_t printed = 0; int indent = conf->indent; struct conf_fprintf uconf; uint32_t initial_union_cacheline; int cacheline = 0; /* This will only be used if this is the outermost union */ if (indent >= (int)sizeof(tabs)) indent = sizeof(tabs) - 1; if (conf->prefix != NULL) printed += fprintf(fp, "%s ", conf->prefix); printed += fprintf(fp, "union%s%s {\n", type__name(type, cu) ? " " : "", type__name(type, cu) ?: ""); uconf = *conf; uconf.indent = indent + 1; /* * If structs embedded in unions, nameless or not, have a size which isn't * isn't a multiple of the union size, then it must be packed, even if * it has no holes nor padding, as an array of such unions would have the * natural alignments of non-multiple structs inside it broken. */ union__infer_packed_attributes(type, cu); /* * We may be called directly or from tag__fprintf, so keep sure * we keep track of the cacheline we're in. * * If we're being called from an outer structure, i.e. union within * struct, class or another union, then this will already have a * value and we'll continue to use it. */ if (uconf.cachelinep == NULL) uconf.cachelinep = &cacheline; /* * Save the cacheline we're in, then, after each union member, get * back to it. Else we'll end up showing cacheline boundaries in * just the first of a multi struct union, for instance. */ initial_union_cacheline = *uconf.cachelinep; type__for_each_member(type, pos) { struct tag *pos_type = cu__type(cu, pos->tag.type); if (pos_type == NULL) { printed += fprintf(fp, "%.*s", uconf.indent, tabs); printed += tag__id_not_found_fprintf(fp, pos->tag.type); continue; } uconf.union_member = 1; printed += fprintf(fp, "%.*s", uconf.indent, tabs); printed += union_member__fprintf(pos, pos_type, cu, &uconf, fp); fputc('\n', fp); ++printed; *uconf.cachelinep = initial_union_cacheline; } return printed + fprintf(fp, "%.*s}%s%s", indent, tabs, conf->suffix ? " " : "", conf->suffix ?: ""); } const char *function__prototype(const struct function *func, const struct cu *cu, char *bf, size_t len) { FILE *bfp = fmemopen(bf, len, "w"); if (bfp != NULL) { ftype__fprintf(&func->proto, cu, NULL, 0, 0, 0, &conf_fprintf__defaults, bfp); fclose(bfp); } else snprintf(bf, len, "", __func__); return bf; } size_t ftype__fprintf_parms(const struct ftype *ftype, const struct cu *cu, int indent, const struct conf_fprintf *conf, FILE *fp) { struct parameter *pos; int first_parm = 1; char sbf[128]; struct tag *type; const char *name, *stype; size_t printed = fprintf(fp, "("); ftype__for_each_parameter(ftype, pos) { if (!first_parm) { if (indent == 0) printed += fprintf(fp, ", "); else printed += fprintf(fp, ",\n%.*s", indent, tabs); } else first_parm = 0; name = conf->no_parm_names ? NULL : parameter__name(pos, cu); type = cu__type(cu, pos->tag.type); if (type == NULL) { snprintf(sbf, sizeof(sbf), "", pos->tag.type); stype = sbf; goto print_it; } if (tag__is_pointer(type)) { if (type->type != 0) { int n; struct tag *ptype = cu__type(cu, type->type); if (ptype == NULL) { printed += tag__id_not_found_fprintf(fp, type->type); continue; } n = tag__has_type_loop(type, ptype, NULL, 0, fp); if (n) return printed + n; if (ptype->tag == DW_TAG_subroutine_type) { printed += ftype__fprintf(tag__ftype(ptype), cu, name, 0, 1, 0, conf, fp); continue; } } } else if (type->tag == DW_TAG_subroutine_type) { printed += ftype__fprintf(tag__ftype(type), cu, name, 0, 0, 0, conf, fp); continue; } stype = tag__name(type, cu, sbf, sizeof(sbf), conf); print_it: printed += fprintf(fp, "%s%s%s", stype, name ? " " : "", name ?: ""); } /* No parameters? */ if (first_parm) printed += fprintf(fp, "void)"); else if (ftype->unspec_parms) printed += fprintf(fp, ", ...)"); else printed += fprintf(fp, ")"); return printed; } static size_t function__tag_fprintf(const struct tag *tag, const struct cu *cu, struct function *function, uint16_t indent, const struct conf_fprintf *conf, FILE *fp) { char bf[512]; size_t printed = 0, n; const void *vtag = tag; int c; if (indent >= sizeof(tabs)) indent = sizeof(tabs) - 1; c = indent * 8; switch (tag->tag) { case DW_TAG_inlined_subroutine: { const struct inline_expansion *exp = vtag; const struct tag *talias = cu__function(cu, exp->ip.tag.type); struct function *alias = tag__function(talias); const char *name; if (alias == NULL) { printed += tag__id_not_found_fprintf(fp, exp->ip.tag.type); break; } printed = fprintf(fp, "%.*s", indent, tabs); name = function__name(alias, cu); n = fprintf(fp, "%s", name); size_t namelen = 0; if (name != NULL) namelen = strlen(name); n += ftype__fprintf_parms(&alias->proto, cu, indent + (namelen + 7) / 8, conf, fp); n += fprintf(fp, "; /* size=%zd, low_pc=%#llx */", exp->size, (unsigned long long)exp->ip.addr); #if 0 n = fprintf(fp, "%s(); /* size=%zd, low_pc=%#llx */", function__name(alias, cu), exp->size, (unsigned long long)exp->ip.addr); #endif c = 69; printed += n; } break; case DW_TAG_variable: printed = fprintf(fp, "%.*s", indent, tabs); n = fprintf(fp, "%s %s; /* scope: %s */", variable__type_name(vtag, cu, bf, sizeof(bf)), variable__name(vtag, cu), variable__scope_str(vtag)); c += n; printed += n; break; case DW_TAG_label: { const struct label *label = vtag; printed = fprintf(fp, "%.*s", indent, tabs); fputc('\n', fp); ++printed; c = fprintf(fp, "%s:", label__name(label, cu)); printed += c; } break; case DW_TAG_lexical_block: printed = lexblock__fprintf(vtag, cu, function, indent, conf, fp); fputc('\n', fp); return printed + 1; default: printed = fprintf(fp, "%.*s", indent, tabs); n = fprintf(fp, "%s <%llx>", dwarf_tag_name(tag->tag), tag__orig_id(tag, cu)); c += n; printed += n; break; } return printed + fprintf(fp, "%-*.*s// %5u\n", 70 - c, 70 - c, " ", tag__decl_line(tag, cu)); } size_t lexblock__fprintf(const struct lexblock *block, const struct cu *cu, struct function *function, uint16_t indent, const struct conf_fprintf *conf, FILE *fp) { struct tag *pos; size_t printed; if (indent >= sizeof(tabs)) indent = sizeof(tabs) - 1; printed = fprintf(fp, "%.*s{", indent, tabs); if (block->ip.addr != 0) { uint64_t offset = block->ip.addr - function->lexblock.ip.addr; if (offset == 0) printed += fprintf(fp, " /* low_pc=%#llx */", (unsigned long long)block->ip.addr); else printed += fprintf(fp, " /* %s+%#llx */", function__name(function, cu), (unsigned long long)offset); } printed += fprintf(fp, "\n"); list_for_each_entry(pos, &block->tags, node) printed += function__tag_fprintf(pos, cu, function, indent + 1, conf, fp); printed += fprintf(fp, "%.*s}", indent, tabs); if (function->lexblock.ip.addr != block->ip.addr) printed += fprintf(fp, " /* lexblock size=%d */", block->size); return printed; } size_t ftype__fprintf(const struct ftype *ftype, const struct cu *cu, const char *name, const int inlined, const int is_pointer, int type_spacing, const struct conf_fprintf *conf, FILE *fp) { struct tag *type = cu__type(cu, ftype->tag.type); char sbf[128]; const char *stype = tag__name(type, cu, sbf, sizeof(sbf), conf); size_t printed = fprintf(fp, "%s%-*s %s%s%s%s", inlined ? "inline " : "", type_spacing, stype, ftype->tag.tag == DW_TAG_subroutine_type ? "(" : "", is_pointer ? "*" : "", name ?: "", ftype->tag.tag == DW_TAG_subroutine_type ? ")" : ""); return printed + ftype__fprintf_parms(ftype, cu, 0, conf, fp); } static size_t function__fprintf(const struct tag *tag, const struct cu *cu, const struct conf_fprintf *conf, FILE *fp) { struct function *func = tag__function(tag); size_t printed = 0; bool inlined = !conf->strip_inline && function__declared_inline(func); if (func->virtuality == DW_VIRTUALITY_virtual || func->virtuality == DW_VIRTUALITY_pure_virtual) printed += fprintf(fp, "virtual "); printed += ftype__fprintf(&func->proto, cu, function__name(func, cu), inlined, 0, 0, conf, fp); if (func->virtuality == DW_VIRTUALITY_pure_virtual) printed += fprintf(fp, " = 0"); return printed; } size_t function__fprintf_stats(const struct tag *tag, const struct cu *cu, const struct conf_fprintf *conf, FILE *fp) { struct function *func = tag__function(tag); size_t printed = lexblock__fprintf(&func->lexblock, cu, func, 0, conf, fp); printed += fprintf(fp, "/* size: %d", function__size(func)); if (func->lexblock.nr_variables > 0) printed += fprintf(fp, ", variables: %u", func->lexblock.nr_variables); if (func->lexblock.nr_labels > 0) printed += fprintf(fp, ", goto labels: %u", func->lexblock.nr_labels); if (func->lexblock.nr_inline_expansions > 0) printed += fprintf(fp, ", inline expansions: %u (%d bytes)", func->lexblock.nr_inline_expansions, func->lexblock.size_inline_expansions); return printed + fprintf(fp, " */\n"); } static size_t class__fprintf_cacheline_boundary(struct conf_fprintf *conf, uint32_t offset, FILE *fp) { int indent = conf->indent; uint32_t cacheline = offset / cacheline_size; size_t printed = 0; if (cacheline > *conf->cachelinep) { const uint32_t cacheline_pos = offset % cacheline_size; const uint32_t cacheline_in_bytes = offset - cacheline_pos; if (cacheline_pos == 0) printed += fprintf(fp, "/* --- cacheline %u boundary " "(%u bytes) --- */\n", cacheline, cacheline_in_bytes); else printed += fprintf(fp, "/* --- cacheline %u boundary " "(%u bytes) was %u bytes ago --- " "*/\n", cacheline, cacheline_in_bytes, cacheline_pos); printed += fprintf(fp, "%.*s", indent, tabs); *conf->cachelinep = cacheline; } return printed; } static size_t class__vtable_fprintf(struct class *class, const struct cu *cu, const struct conf_fprintf *conf, FILE *fp) { struct function *pos; size_t printed = 0; if (class->nr_vtable_entries == 0) goto out; printed += fprintf(fp, "%.*s/* vtable has %u entries: {\n", conf->indent, tabs, class->nr_vtable_entries); list_for_each_entry(pos, &class->vtable, vtable_node) { printed += fprintf(fp, "%.*s [%d] = %s(%s), \n", conf->indent, tabs, pos->vtable_entry, function__name(pos, cu), function__linkage_name(pos, cu)); } printed += fprintf(fp, "%.*s} */", conf->indent, tabs); out: return printed; } static size_t __class__fprintf(struct class *class, const struct cu *cu, const struct conf_fprintf *conf, FILE *fp) { struct type *type = &class->type; size_t last_size = 0, size; uint8_t newline = 0; uint16_t nr_paddings = 0; uint16_t nr_forced_alignments = 0, nr_forced_alignment_holes = 0; uint32_t sum_forced_alignment_holes = 0; uint32_t sum_bytes = 0, sum_bits = 0; uint32_t sum_holes = 0; uint32_t sum_paddings = 0; uint32_t sum_bit_holes = 0; uint32_t cacheline = 0; int size_diff = 0; int first = 1; struct class_member *pos, *last = NULL; struct tag *tag_pos; const char *current_accessibility = NULL; struct conf_fprintf cconf = conf ? *conf : conf_fprintf__defaults; const uint16_t t = type->namespace.tag.tag; size_t printed = fprintf(fp, "%s%s%s%s%s", cconf.prefix ?: "", cconf.prefix ? " " : "", ((cconf.classes_as_structs || t == DW_TAG_structure_type) ? "struct" : t == DW_TAG_class_type ? "class" : "interface"), type__name(type, cu) ? " " : "", type__name(type, cu) ?: ""); int indent = cconf.indent; if (indent >= (int)sizeof(tabs)) indent = sizeof(tabs) - 1; if (cconf.cachelinep == NULL) cconf.cachelinep = &cacheline; cconf.indent = indent + 1; cconf.no_semicolon = 0; class__infer_packed_attributes(class, cu); /* First look if we have DW_TAG_inheritance */ type__for_each_tag(type, tag_pos) { const char *accessibility; if (tag_pos->tag != DW_TAG_inheritance) continue; if (first) { printed += fprintf(fp, " :"); first = 0; } else printed += fprintf(fp, ","); pos = tag__class_member(tag_pos); if (pos->virtuality == DW_VIRTUALITY_virtual) printed += fprintf(fp, " virtual"); accessibility = tag__accessibility(tag_pos); if (accessibility != NULL) printed += fprintf(fp, " %s", accessibility); struct tag *pos_type = cu__type(cu, tag_pos->type); if (pos_type != NULL) printed += fprintf(fp, " %s", type__name(tag__type(pos_type), cu)); else printed += tag__id_not_found_fprintf(fp, tag_pos->type); } printed += fprintf(fp, " {\n"); if (class->pre_bit_hole > 0 && !cconf.suppress_comments) { if (!newline++) { fputc('\n', fp); ++printed; } printed += fprintf(fp, "%.*s/* XXX %d bit%s hole, " "try to pack */\n", cconf.indent, tabs, class->pre_bit_hole, class->pre_bit_hole != 1 ? "s" : ""); sum_bit_holes += class->pre_bit_hole; } if (class->pre_hole > 0 && !cconf.suppress_comments) { if (!newline++) { fputc('\n', fp); ++printed; } printed += fprintf(fp, "%.*s/* XXX %d byte%s hole, " "try to pack */\n", cconf.indent, tabs, class->pre_hole, class->pre_hole != 1 ? "s" : ""); sum_holes += class->pre_hole; } type__for_each_tag(type, tag_pos) { const char *accessibility = tag__accessibility(tag_pos); if (accessibility != NULL && accessibility != current_accessibility) { current_accessibility = accessibility; printed += fprintf(fp, "%.*s%s:\n\n", cconf.indent - 1, tabs, accessibility); } if (tag_pos->tag != DW_TAG_member && tag_pos->tag != DW_TAG_inheritance) { if (!cconf.show_only_data_members) { printed += tag__fprintf(tag_pos, cu, &cconf, fp); printed += fprintf(fp, "\n\n"); } continue; } pos = tag__class_member(tag_pos); if (!cconf.suppress_aligned_attribute && pos->alignment != 0) { uint32_t forced_alignment_hole = last ? last->hole : class->pre_hole; if (forced_alignment_hole != 0) { ++nr_forced_alignment_holes; sum_forced_alignment_holes += forced_alignment_hole; } ++nr_forced_alignments; } /* * These paranoid checks doesn't make much sense on * DW_TAG_inheritance, have to understand why virtual public * ancestors make the offset go backwards... */ if (last != NULL && tag_pos->tag == DW_TAG_member && /* * kmemcheck bitfield tricks use zero sized arrays as markers * all over the place. */ last_size != 0) { if (last->bit_hole != 0 && pos->bitfield_size) { uint8_t bitfield_size = last->bit_hole; struct tag *pos_type = cu__type(cu, pos->tag.type); if (pos_type == NULL) { printed += fprintf(fp, "%.*s", cconf.indent, tabs); printed += tag__id_not_found_fprintf(fp, pos->tag.type); continue; } /* * Now check if this isn't something like 'unsigned :N' with N > 0, * i.e. _explicitely_ adding a bit hole. */ if (last->byte_offset != pos->byte_offset) { printed += fprintf(fp, "\n%.*s/* Force alignment to the next boundary: */\n", cconf.indent, tabs); bitfield_size = 0; } printed += fprintf(fp, "%.*s", cconf.indent, tabs); printed += type__fprintf(pos_type, cu, "", &cconf, fp); printed += fprintf(fp, ":%u;\n", bitfield_size); } if (pos->byte_offset < last->byte_offset || (pos->byte_offset == last->byte_offset && last->bitfield_size == 0 && /* * This is just when transitioning from a non-bitfield to * a bitfield, think about zero sized arrays in the middle * of a struct. */ pos->bitfield_size != 0)) { if (!cconf.suppress_comments) { if (!newline++) { fputc('\n', fp); ++printed; } printed += fprintf(fp, "%.*s/* Bitfield combined" " with previous fields */\n", cconf.indent, tabs); } } else { const ssize_t cc_last_size = ((ssize_t)pos->byte_offset - (ssize_t)last->byte_offset); if (cc_last_size > 0 && (size_t)cc_last_size < last_size) { if (!cconf.suppress_comments) { if (!newline++) { fputc('\n', fp); ++printed; } printed += fprintf(fp, "%.*s/* Bitfield combined" " with next fields */\n", cconf.indent, tabs); } } } } if (newline) { fputc('\n', fp); newline = 0; ++printed; } struct tag *pos_type = cu__type(cu, pos->tag.type); if (pos_type == NULL) { printed += fprintf(fp, "%.*s", cconf.indent, tabs); printed += tag__id_not_found_fprintf(fp, pos->tag.type); continue; } cconf.last_member = list_is_last(&tag_pos->node, &type->namespace.tags); cconf.first_member = last == NULL; size = pos->byte_size; printed += fprintf(fp, "%.*s", cconf.indent, tabs); printed += struct_member__fprintf(pos, pos_type, cu, &cconf, fp); if (tag__is_struct(pos_type) && !cconf.suppress_comments) { struct class *tclass = tag__class(pos_type); uint16_t padding; /* * We may not yet have looked for holes and paddings * in this member's struct type. */ class__find_holes(tclass); class__infer_packed_attributes(tclass, cu); padding = tclass->padding; if (padding > 0) { ++nr_paddings; sum_paddings += padding; if (!newline++) { fputc('\n', fp); ++printed; } printed += fprintf(fp, "\n%.*s/* XXX last " "struct has %d byte%s of " "padding */", cconf.indent, tabs, padding, padding != 1 ? "s" : ""); } } if (pos->bit_hole != 0 && !cconf.suppress_comments) { if (!newline++) { fputc('\n', fp); ++printed; } printed += fprintf(fp, "\n%.*s/* XXX %d bit%s hole, " "try to pack */", cconf.indent, tabs, pos->bit_hole, pos->bit_hole != 1 ? "s" : ""); sum_bit_holes += pos->bit_hole; } if (pos->hole > 0 && !cconf.suppress_comments) { if (!newline++) { fputc('\n', fp); ++printed; } printed += fprintf(fp, "\n%.*s/* XXX %d byte%s hole, " "try to pack */", cconf.indent, tabs, pos->hole, pos->hole != 1 ? "s" : ""); sum_holes += pos->hole; } fputc('\n', fp); ++printed; /* XXX for now just skip these */ if (tag_pos->tag == DW_TAG_inheritance) continue; #if 0 /* * This one was being skipped but caused problems with: * http://article.gmane.org/gmane.comp.debugging.dwarves/185 * http://www.spinics.net/lists/dwarves/msg00119.html */ if (pos->virtuality == DW_VIRTUALITY_virtual) continue; #endif if (pos->bitfield_size) { sum_bits += pos->bitfield_size; } else { sum_bytes += pos->byte_size; } if (last == NULL || /* First member */ /* * Last member was a zero sized array, typedef, struct, etc */ last_size == 0 || /* * We moved to a new offset */ last->byte_offset != pos->byte_offset) { last_size = size; } else if (last->bitfield_size == 0 && pos->bitfield_size != 0) { /* * Transitioned from from a non-bitfield to a * bitfield sharing the same offset */ /* * Compensate by removing the size of the * last member that is "inside" this new * member at the same offset. * * E.g.: * struct foo { * u8 a; / 0 1 / * int b:1; / 0:23 4 / * } */ last_size = size; } last = pos; } /* * BTF doesn't have alignment info, for now use this infor from the loader * to avoid adding the forced bitfield paddings and have btfdiff happy. */ if (class->padding != 0 && type->alignment == 0 && cconf.has_alignment_info && !cconf.suppress_force_paddings) { tag_pos = cu__type(cu, last->tag.type); size = tag__size(tag_pos, cu); if (is_power_of_2(size) && class->padding > cu->addr_size) { int added_padding; int bit_size = size * 8; printed += fprintf(fp, "\n%.*s/* Force padding: */\n", cconf.indent, tabs); for (added_padding = 0; added_padding < class->padding; added_padding += size) { printed += fprintf(fp, "%.*s", cconf.indent, tabs); printed += type__fprintf(tag_pos, cu, "", &cconf, fp); printed += fprintf(fp, ":%u;\n", bit_size); } } } if (!cconf.show_only_data_members) class__vtable_fprintf(class, cu, &cconf, fp); if (!cconf.emit_stats) goto out; printed += type__fprintf_stats(type, cu, &cconf, fp); if (sum_holes > 0 || sum_bit_holes > 0) { if (sum_bytes > 0) { printed += fprintf(fp, "%.*s/* sum members: %u", cconf.indent, tabs, sum_bytes); if (sum_holes > 0) printed += fprintf(fp, ", holes: %d, sum holes: %u", class->nr_holes, sum_holes); printed += fprintf(fp, " */\n"); } if (sum_bits > 0) { printed += fprintf(fp, "%.*s/* sum bitfield members: %u bits", cconf.indent, tabs, sum_bits); if (sum_bit_holes > 0) printed += fprintf(fp, ", bit holes: %d, sum bit holes: %u bits", class->nr_bit_holes, sum_bit_holes); else printed += fprintf(fp, " (%u bytes)", sum_bits / 8); printed += fprintf(fp, " */\n"); } } if (class->padding > 0) printed += fprintf(fp, "%.*s/* padding: %u */\n", cconf.indent, tabs, class->padding); if (nr_paddings > 0) printed += fprintf(fp, "%.*s/* paddings: %u, sum paddings: " "%u */\n", cconf.indent, tabs, nr_paddings, sum_paddings); if (class->bit_padding > 0) printed += fprintf(fp, "%.*s/* bit_padding: %u bits */\n", cconf.indent, tabs, class->bit_padding); if (!cconf.suppress_aligned_attribute && nr_forced_alignments != 0) { printed += fprintf(fp, "%.*s/* forced alignments: %u", cconf.indent, tabs, nr_forced_alignments); if (nr_forced_alignment_holes != 0) { printed += fprintf(fp, ", forced holes: %u, sum forced holes: %u", nr_forced_alignment_holes, sum_forced_alignment_holes); } printed += fprintf(fp, " */\n"); } cacheline = (cconf.base_offset + type->size) % cacheline_size; if (cacheline != 0) printed += fprintf(fp, "%.*s/* last cacheline: %u bytes */\n", cconf.indent, tabs, cacheline); if (cconf.show_first_biggest_size_base_type_member && type->nr_members != 0) { struct class_member *m = type__find_first_biggest_size_base_type_member(type, cu); printed += fprintf(fp, "%.*s/* first biggest size base type member: %s %u %zd */\n", cconf.indent, tabs, class_member__name(m, cu), m->byte_offset, m->byte_size); } size_diff = type->size * 8 - (sum_bytes * 8 + sum_bits + sum_holes * 8 + sum_bit_holes + class->padding * 8 + class->bit_padding); if (size_diff && type->nr_members != 0) printed += fprintf(fp, "\n%.*s/* BRAIN FART ALERT! %d bytes != " "%u (member bytes) + %u (member bits) " "+ %u (byte holes) + %u (bit holes), diff = %d bits */\n", cconf.indent, tabs, type->size, sum_bytes, sum_bits, sum_holes, sum_bit_holes, size_diff); out: printed += fprintf(fp, "%.*s}", indent, tabs); if (class->is_packed && !cconf.suppress_packed) printed += fprintf(fp, " __attribute__((__packed__))"); if (cconf.suffix) printed += fprintf(fp, " %s", cconf.suffix); /* * A class that was marked packed by class__infer_packed_attributes * because it has an alignment that is different than its natural * alignment, should not print the __alignment__ here, just the * __packed__ attribute. */ if (!cconf.suppress_aligned_attribute && type->alignment != 0 && !class->is_packed) printed += fprintf(fp, " __attribute__((__aligned__(%u)))", type->alignment); return printed; } size_t class__fprintf(struct class *class, const struct cu *cu, FILE *fp) { return __class__fprintf(class, cu, NULL, fp); } static size_t variable__fprintf(const struct tag *tag, const struct cu *cu, const struct conf_fprintf *conf, FILE *fp) { const struct variable *var = tag__variable(tag); const char *name = variable__name(var, cu); size_t printed = 0; if (name != NULL) { struct tag *type = cu__type(cu, var->ip.tag.type); if (type != NULL) { const char *varprefix = variable__prefix(var); if (varprefix != NULL) printed += fprintf(fp, "%s", varprefix); printed += type__fprintf(type, cu, name, conf, fp); } } return printed; } static size_t namespace__fprintf(const struct tag *tag, const struct cu *cu, const struct conf_fprintf *conf, FILE *fp) { struct namespace *space = tag__namespace(tag); struct conf_fprintf cconf = *conf; size_t printed = fprintf(fp, "namespace %s {\n", namespace__name(space, cu)); struct tag *pos; ++cconf.indent; cconf.no_semicolon = 0; namespace__for_each_tag(space, pos) { printed += tag__fprintf(pos, cu, &cconf, fp); printed += fprintf(fp, "\n\n"); } return printed + fprintf(fp, "}"); } size_t tag__fprintf(struct tag *tag, const struct cu *cu, const struct conf_fprintf *conf, FILE *fp) { size_t printed = 0; struct conf_fprintf tconf; const struct conf_fprintf *pconf = conf; if (conf == NULL) { tconf = conf_fprintf__defaults; pconf = &tconf; if (tconf.expand_types) tconf.name_spacing = 55; else if (tag__is_union(tag)) tconf.name_spacing = 21; } else if (conf->name_spacing == 0 || conf->type_spacing == 0) { tconf = *conf; pconf = &tconf; if (tconf.name_spacing == 0) { if (tconf.expand_types) tconf.name_spacing = 55; else tconf.name_spacing = tag__is_union(tag) ? 21 : 23; } if (tconf.type_spacing == 0) tconf.type_spacing = 26; } if (pconf->expand_types) ++tag->recursivity_level; if (pconf->show_decl_info) { printed += fprintf(fp, "%.*s", pconf->indent, tabs); printed += fprintf(fp, "/* Used at: %s */\n", cu->name); printed += fprintf(fp, "%.*s", pconf->indent, tabs); printed += tag__fprintf_decl_info(tag, cu, fp); } printed += fprintf(fp, "%.*s", pconf->indent, tabs); switch (tag->tag) { case DW_TAG_array_type: printed += array_type__fprintf(tag, cu, "array", pconf, fp); break; case DW_TAG_enumeration_type: printed += enumeration__fprintf(tag, cu, pconf, fp); break; case DW_TAG_typedef: printed += typedef__fprintf(tag, cu, pconf, fp); break; case DW_TAG_class_type: case DW_TAG_interface_type: case DW_TAG_structure_type: printed += __class__fprintf(tag__class(tag), cu, pconf, fp); break; case DW_TAG_namespace: printed += namespace__fprintf(tag, cu, pconf, fp); break; case DW_TAG_subprogram: printed += function__fprintf(tag, cu, pconf, fp); break; case DW_TAG_union_type: printed += union__fprintf(tag__type(tag), cu, pconf, fp); break; case DW_TAG_variable: printed += variable__fprintf(tag, cu, pconf, fp); break; case DW_TAG_imported_declaration: printed += imported_declaration__fprintf(tag, cu, fp); break; case DW_TAG_imported_module: printed += imported_module__fprintf(tag, cu, fp); break; default: printed += fprintf(fp, "/* %s: %s tag not supported! */", __func__, dwarf_tag_name(tag->tag)); break; } if (!pconf->no_semicolon) { fputc(';', fp); ++printed; } if (tag__is_function(tag) && !pconf->suppress_comments) { const struct function *func = tag__function(tag); if (func->linkage_name) printed += fprintf(fp, " /* linkage=%s */", function__linkage_name(func, cu)); } if (pconf->expand_types) --tag->recursivity_level; return printed; } void cus__print_error_msg(const char *progname, const struct cus *cus, const char *filename, const int err) { if (err == -EINVAL || (cus != NULL && list_empty(&cus->cus))) fprintf(stderr, "%s: couldn't load debugging info from %s\n", progname, filename); else fprintf(stderr, "%s: %s\n", progname, strerror(err)); } void dwarves__fprintf_init(uint16_t user_cacheline_size) { if (user_cacheline_size == 0) { long sys_cacheline_size = sysconf(_SC_LEVEL1_DCACHE_LINESIZE); if (sys_cacheline_size > 0) cacheline_size = sys_cacheline_size; else cacheline_size = 64; /* Fall back to a sane value */ } else cacheline_size = user_cacheline_size; } dwarves-dfsg-1.15/dwarves_reorganize.c000066400000000000000000000573521350511416500201150ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only Copyright (C) 2006 Mandriva Conectiva S.A. Copyright (C) 2006 Arnaldo Carvalho de Melo Copyright (C) 2007 Red Hat Inc. Copyright (C) 2007 Arnaldo Carvalho de Melo */ #include "list.h" #include "dwarves_reorganize.h" #include "dwarves.h" static void class__recalc_holes(struct class *class) { class->holes_searched = 0; class__find_holes(class); } void class__subtract_offsets_from(struct class *class, struct class_member *from, const uint16_t size) { struct class_member *member; class__for_each_member_continue(class, from, member) { member->byte_offset -= size; member->bit_offset -= size * 8; } if (class->padding != 0) { struct class_member *last_member = type__last_member(&class->type); const ssize_t new_padding = (class__size(class) - (last_member->byte_offset + last_member->byte_size)); if (new_padding > 0) class->padding = new_padding; else class->padding = 0; } } void class__add_offsets_from(struct class *class, struct class_member *from, const uint16_t size) { struct class_member *member; class__for_each_member_continue(class, from, member) { member->byte_offset += size; member->bit_offset += size * 8; } } /* * XXX: Check this more thoroughly. Right now it is used because I was * to lazy to do class__remove_member properly, adjusting alignments and * holes as we go removing fields. Ditto for class__add_offsets_from. */ void class__fixup_alignment(struct class *class, const struct cu *cu) { struct class_member *pos, *last_member = NULL; size_t power2; type__for_each_data_member(&class->type, pos) { if (last_member == NULL && pos->byte_offset != 0) { /* paranoid! */ class__subtract_offsets_from(class, pos, (pos->byte_offset - pos->byte_size)); pos->byte_offset = 0; pos->bit_offset = 0; } else if (last_member != NULL && last_member->hole >= cu->addr_size) { size_t dec = (last_member->hole / cu->addr_size) * cu->addr_size; last_member->hole -= dec; if (last_member->hole == 0) --class->nr_holes; pos->byte_offset -= dec; pos->bit_offset -= dec * 8; class->type.size -= dec; class__subtract_offsets_from(class, pos, dec); } else for (power2 = cu->addr_size; power2 >= 2; power2 /= 2) { const size_t remainder = pos->byte_offset % power2; if (pos->byte_size == power2) { if (remainder == 0) /* perfectly aligned */ break; if (last_member->hole >= remainder) { last_member->hole -= remainder; if (last_member->hole == 0) --class->nr_holes; pos->byte_offset -= remainder; pos->bit_offset -= remainder * 8; class__subtract_offsets_from(class, pos, remainder); } else { const size_t inc = power2 - remainder; if (last_member->hole == 0) ++class->nr_holes; last_member->hole += inc; pos->byte_offset += inc; pos->bit_offset += inc * 8; class->type.size += inc; class__add_offsets_from(class, pos, inc); } } } last_member = pos; } if (last_member != NULL) { struct class_member *m = type__find_first_biggest_size_base_type_member(&class->type, cu); size_t unpadded_size = last_member->byte_offset + last_member->byte_size; size_t m_size = m->byte_size, remainder; /* google for struct zone_padding in the linux kernel for an example */ if (m_size == 0) return; remainder = unpadded_size % m_size; if (remainder != 0) { class->padding = m_size - remainder; class->type.size = unpadded_size + class->padding; } } } static struct class_member * class__find_next_hole_of_size(struct class *class, struct class_member *from, size_t size) { struct class_member *bitfield_head = NULL; struct class_member *member; class__for_each_member_continue(class, from, member) { if (member->bitfield_size != 0) { if (bitfield_head == NULL) bitfield_head = member; } else bitfield_head = NULL; if (member->hole != 0) { if (member->byte_size != 0 && member->byte_size <= size) return bitfield_head ? : member; } } return NULL; } static struct class_member * class__find_last_member_of_size(struct class *class, struct class_member *to, size_t size) { struct class_member *member; class__for_each_member_reverse(class, member) { if (member->tag.tag != DW_TAG_member) continue; if (member == to) break; /* * Check if this is the first member of a bitfield. It either * has another member before it that is not part of the current * bitfield or it is the first member of the struct. */ if (member->bitfield_size != 0 && member->byte_offset != 0) { struct class_member *prev = list_entry(member->tag.node.prev, struct class_member, tag.node); if (prev->bitfield_size != 0) continue; } if (member->byte_size != 0 && member->byte_size <= size) return member; } return NULL; } static struct class_member * class__find_next_bit_hole_of_size(struct class *class, struct class_member *from, size_t size) { struct class_member *member; class__for_each_member_continue(class, from, member) { if (member->tag.tag != DW_TAG_member) continue; if (member->bit_hole != 0 && member->bitfield_size <= size) return member; } #if 0 /* * FIXME: Handle the case where the bit padding is on the same bitfield * that we're looking, i.e. we can't combine a bitfield with itclass, * perhaps we should tag bitfields with a sequential, clearly marking * each of the bitfields in advance, so that all the algoriths that * have to deal with bitfields, moving them around, demoting, etc, can * be simplified. */ /* * Now look if the last member is a one member bitfield, * i.e. if we have bit_padding */ if (class->bit_padding != 0) return type__last_member(&class->type); #endif return NULL; } static bool class__move_member(struct class *class, struct class_member *dest, struct class_member *from, const struct cu *cu, int from_padding, const int verbose, FILE *fp) { const size_t from_size = from->byte_size; const size_t dest_size = dest->byte_size; #ifndef BITFIELD_REORG_ALGORITHMS_ENABLED /* * For now refuse to move a bitfield, we need to first fixup some BRAIN FARTs */ if (from->bitfield_size != 0) return false; #endif const bool from_was_last = from->tag.node.next == class__tags(class); struct class_member *tail_from = from; struct class_member *from_prev = list_entry(from->tag.node.prev, struct class_member, tag.node); uint16_t orig_tail_from_hole = tail_from->hole; const uint16_t orig_from_offset = from->byte_offset; /* * Align 'from' after 'dest': */ const uint16_t offset = dest->hole % (from_size > cu->addr_size ? cu->addr_size : from_size); /* * Set new 'from' offset, after 'dest->byte_offset', aligned */ const uint16_t new_from_offset = dest->byte_offset + dest_size + offset; if (verbose) fputs("/* Moving", fp); if (from->bitfield_size != 0) { struct class_member *pos, *tmp; LIST_HEAD(from_list); if (verbose) fprintf(fp, " bitfield('%s' ... ", class_member__name(from, cu)); class__for_each_member_safe_from(class, from, pos, tmp) { /* * Have we reached the end of the bitfield? */ if (pos->byte_offset != orig_from_offset) break; tail_from = pos; orig_tail_from_hole = tail_from->hole; pos->byte_offset = new_from_offset; pos->bit_offset = new_from_offset * 8 + pos->bitfield_offset; list_move_tail(&pos->tag.node, &from_list); } list_splice(&from_list, &dest->tag.node); if (verbose) fprintf(fp, "'%s')", class_member__name(tail_from, cu)); } else { if (verbose) fprintf(fp, " '%s'", class_member__name(from, cu)); /* * Remove 'from' from the list */ list_del(&from->tag.node); /* * Add 'from' after 'dest': */ __list_add(&from->tag.node, &dest->tag.node, dest->tag.node.next); from->byte_offset = new_from_offset; from->bit_offset = new_from_offset * 8 + from->bitfield_offset; } if (verbose) fprintf(fp, " from after '%s' to after '%s' */\n", class_member__name(from_prev, cu), class_member__name(dest, cu)); if (from_padding) { /* * Check if we're eliminating the need for padding: */ if (orig_from_offset % cu->addr_size == 0) { /* * Good, no need for padding anymore: */ class->type.size -= from_size + class->padding; } else { /* * No, so just add from_size to the padding: */ if (verbose) fprintf(fp, "/* adding %zd bytes from %s to " "the padding */\n", from_size, class_member__name(from, cu)); } } else if (from_was_last) { class->type.size -= from_size + class->padding; } else { /* * See if we are adding a new hole that is bigger than * sizeof(long), this may have problems with explicit alignment * made by the programmer, perhaps we need A switch that allows * us to avoid realignment, just using existing holes but * keeping the existing alignment, anyway the programmer has to * check the resulting rerganization before using it, and for * automatic stuff such as the one that will be used for struct * "views" in tools such as ctracer we are more interested in * packing the subset as tightly as possible. */ if (orig_tail_from_hole + from_size >= cu->addr_size) { class->type.size -= cu->addr_size; class__subtract_offsets_from(class, from_prev, cu->addr_size); } } class__recalc_holes(class); if (verbose > 1) { class__fprintf(class, cu, fp); fputc('\n', fp); } return true; } static void class__move_bit_member(struct class *class, const struct cu *cu, struct class_member *dest, struct class_member *from, const int verbose, FILE *fp) { struct class_member *from_prev = list_entry(from->tag.node.prev, struct class_member, tag.node); const uint8_t is_last_member = (from->tag.node.next == class__tags(class)); if (verbose) fprintf(fp, "/* Moving '%s:%u' from after '%s' to " "after '%s:%u' */\n", class_member__name(from, cu), from->bitfield_size, class_member__name(from_prev, cu), class_member__name(dest, cu), dest->bitfield_size); /* * Remove 'from' from the list */ list_del(&from->tag.node); /* * Add from after dest: */ __list_add(&from->tag.node, &dest->tag.node, dest->tag.node.next); /* Check if this was the last entry in the bitfield */ if (from_prev->bitfield_size == 0) { size_t from_size = from->byte_size; /* * Are we shrinking the struct? */ if (from_size + from->hole >= cu->addr_size) { class->type.size -= from_size + from->hole; class__subtract_offsets_from(class, from_prev, from_size + from->hole); } } /* * Tricky, what are the rules for bitfield layouts on this arch? * Assume its IA32 */ from->bitfield_offset = dest->bitfield_offset + dest->bitfield_size; /* * Now both have the same offset: */ from->byte_offset = dest->byte_offset; from->bit_offset = dest->byte_offset * 8 + from->bitfield_offset; class__recalc_holes(class); if (verbose > 1) { class__fprintf(class, cu, fp); fputc('\n', fp); } } static void class__demote_bitfield_members(struct class *class, struct class_member *from, struct class_member *to, const struct base_type *old_type, const struct base_type *new_type, type_id_t new_type_id) { struct class_member *member; class__for_each_member_from(class, from, member) { member->byte_size = new_type->bit_size / 8; member->tag.type = new_type_id; if (member == to) break; } } static struct tag *cu__find_base_type_of_size(const struct cu *cu, const size_t size, type_id_t *id) { const char *type_name, *type_name_alt = NULL; switch (size) { case sizeof(unsigned char): type_name = "unsigned char"; break; case sizeof(unsigned short int): type_name = "short unsigned int"; type_name_alt = "unsigned short"; break; case sizeof(unsigned int): type_name = "unsigned int"; type_name_alt = "unsigned"; break; case sizeof(unsigned long long): if (cu->addr_size == 8) { type_name = "long unsigned int"; type_name_alt = "unsigned long"; } else { type_name = "long long unsigned int"; type_name_alt = "unsigned long long"; } break; default: return NULL; } struct tag *ret = cu__find_base_type_by_name(cu, type_name, id); return ret ?: cu__find_base_type_by_name(cu, type_name_alt, id); } static int class__demote_bitfields(struct class *class, const struct cu *cu, const int verbose, FILE *fp) { struct class_member *member; struct class_member *bitfield_head = NULL; const struct tag *old_type_tag, *new_type_tag; size_t current_bitfield_size = 0, size, bytes_needed; int some_was_demoted = 0; type__for_each_data_member(&class->type, member) { /* * Check if we are moving away from a bitfield */ if (member->bitfield_size == 0) { current_bitfield_size = 0; bitfield_head = NULL; } else { if (bitfield_head == NULL) { bitfield_head = member; current_bitfield_size = member->bitfield_size; } else if (bitfield_head->byte_offset != member->byte_offset) { /* * We moved from one bitfield to another, for * now don't handle this case, just move on to * the next bitfield, we may well move it to * another place and then the first bitfield will * be isolated and will be handled in the next * pass. */ bitfield_head = member; current_bitfield_size = member->bitfield_size; } else current_bitfield_size += member->bitfield_size; } /* * Have we got to the end of a bitfield with holes? */ if (member->bit_hole == 0) continue; size = member->byte_size; bytes_needed = (current_bitfield_size + 7) / 8; bytes_needed = roundup_pow_of_two(bytes_needed); if (bytes_needed == size) continue; type_id_t new_type_id; old_type_tag = cu__type(cu, member->tag.type); new_type_tag = cu__find_base_type_of_size(cu, bytes_needed, &new_type_id); if (new_type_tag == NULL) { fprintf(fp, "/* BRAIN FART ALERT! couldn't find a " "%zd bytes base type */\n\n", bytes_needed); continue; } if (verbose) { char old_bf[64], new_bf[64]; fprintf(fp, "/* Demoting bitfield ('%s' ... '%s') " "from '%s' to '%s' */\n", class_member__name(bitfield_head, cu), class_member__name(member, cu), base_type__name(tag__base_type(old_type_tag), cu, old_bf, sizeof(old_bf)), base_type__name(tag__base_type(new_type_tag), cu, new_bf, sizeof(new_bf))); } class__demote_bitfield_members(class, bitfield_head, member, tag__base_type(old_type_tag), tag__base_type(new_type_tag), new_type_id); class__recalc_holes(class); some_was_demoted = 1; if (verbose > 1) { class__fprintf(class, cu, fp); fputc('\n', fp); } } /* * Now look if we have bit padding, i.e. if the the last member * is a bitfield and its the sole member in this bitfield, i.e. * if it wasn't already demoted as part of a bitfield of more than * one member: */ member = type__last_member(&class->type); if (class->bit_padding != 0 && bitfield_head == member) { size = member->byte_size; bytes_needed = (member->bitfield_size + 7) / 8; if (bytes_needed < size) { old_type_tag = cu__type(cu, member->tag.type); type_id_t new_type_id; new_type_tag = cu__find_base_type_of_size(cu, bytes_needed, &new_type_id); tag__assert_search_result(old_type_tag); tag__assert_search_result(new_type_tag); if (verbose) { char old_bf[64], new_bf[64]; fprintf(fp, "/* Demoting bitfield ('%s') " "from '%s' to '%s' */\n", class_member__name(member, cu), base_type__name(tag__base_type(old_type_tag), cu, old_bf, sizeof(old_bf)), base_type__name(tag__base_type(new_type_tag), cu, new_bf, sizeof(new_bf))); } class__demote_bitfield_members(class, member, member, tag__base_type(old_type_tag), tag__base_type(new_type_tag), new_type_id); class__recalc_holes(class); some_was_demoted = 1; if (verbose > 1) { class__fprintf(class, cu, fp); fputc('\n', fp); } } } return some_was_demoted; } static void class__reorganize_bitfields(struct class *class, const struct cu *cu, const int verbose, FILE *fp) { struct class_member *member, *brother; restart: type__for_each_data_member(&class->type, member) { /* See if we have a hole after this member */ if (member->bit_hole != 0) { /* * OK, try to find a member that has a bit hole after * it and that has a size that fits the current hole: */ brother = class__find_next_bit_hole_of_size(class, member, member->bit_hole); if (brother != NULL) { class__move_bit_member(class, cu, member, brother, verbose, fp); goto restart; } } } } static void class__fixup_bitfield_types(struct class *class, struct class_member *from, struct class_member *to_before, type_id_t type) { struct class_member *member; class__for_each_member_from(class, from, member) { if (member == to_before) break; member->tag.type = type; } } /* * Think about this pahole output a bit: * * [filo examples]$ pahole swiss_cheese cheese * / * <11b> /home/acme/git/pahole/examples/swiss_cheese.c:3 * / * struct cheese { * * int bitfield1:1; / * 64 4 * / * int bitfield2:1; / * 64 4 * / * * / * XXX 14 bits hole, try to pack * / * / * Bitfield WARNING: DWARF size=4, real size=2 * / * * short int d; / * 66 2 * / * * * The compiler (gcc 4.1.1 20070105 (Red Hat 4.1.1-51) in the above example), * Decided to combine what was declared as an int (4 bytes) bitfield but doesn't * uses even one byte with the next field, that is a short int (2 bytes), * without demoting the type of the bitfield to short int (2 bytes), so in terms * of alignment the real size is 2, not 4, to make things easier for the rest of * the reorganizing routines we just do the demotion ourselves, fixing up the * sizes. */ static void class__fixup_member_types(struct class *class, const struct cu *cu, const uint8_t verbose, FILE *fp) { struct class_member *pos, *bitfield_head = NULL; uint8_t fixup_was_done = 0; type__for_each_data_member(&class->type, pos) { /* * Is this bitfield member? */ if (pos->bitfield_size != 0) { /* * The first entry in a bitfield? */ if (bitfield_head == NULL) bitfield_head = pos; continue; } /* * OK, not a bitfield member, but have we just passed * by a bitfield? */ if (bitfield_head != NULL) { const uint16_t real_size = (pos->byte_offset - bitfield_head->byte_offset); const size_t size = bitfield_head->byte_size; /* * Another case: struct irq_cfg { struct irq_pin_list * irq_2_pin; / * 0 8 * / cpumask_var_t domain; / * 8 16 * / cpumask_var_t old_domain; / * 24 16 * / u8 vector; / * 40 1 * / u8 move_in_progress:1; / * 41: 7 1 * / u8 remapped:1; / * 41: 6 1 * / / * XXX 6 bits hole, try to pack * / / * XXX 6 bytes hole, try to pack * / union { struct irq_2_iommu irq_2_iommu; / * 16 * / struct irq_2_irte irq_2_irte; / * 4 * / }; / * 48 16 * / / * --- cacheline 1 boundary (64 bytes) --- * / * So just fix it up if the byte_size of the bitfield is * greater than what it really uses. */ if (real_size < size) { type_id_t new_type_id; struct tag *new_type_tag = cu__find_base_type_of_size(cu, real_size, &new_type_id); if (new_type_tag == NULL) { fprintf(stderr, "%s: couldn't find" " a base_type of %d bytes!\n", __func__, real_size); continue; } class__fixup_bitfield_types(class, bitfield_head, pos, new_type_id); fixup_was_done = 1; } } bitfield_head = NULL; } if (fixup_was_done) { class__recalc_holes(class); } if (verbose && fixup_was_done) { fprintf(fp, "/* bitfield types were fixed */\n"); if (verbose > 1) { class__fprintf(class, cu, fp); fputc('\n', fp); } } } void class__reorganize(struct class *class, const struct cu *cu, const int verbose, FILE *fp) { struct class_member *member, *brother, *last_member; size_t alignment_size; class__find_holes(class); #ifdef BITFIELD_REORG_ALGORITHMS_ENABLED class__fixup_member_types(class, cu, verbose, fp); while (class__demote_bitfields(class, cu, verbose, fp)) class__reorganize_bitfields(class, cu, verbose, fp); #endif /* Now try to combine holes */ restart: alignment_size = 0; /* * It can be NULL if this class doesn't have any data members, * just inheritance entries */ last_member = type__last_member(&class->type); if (last_member == NULL) return; type__for_each_data_member(&class->type, member) { const size_t aligned_size = member->byte_size + member->hole; if (aligned_size <= cu->addr_size && aligned_size > alignment_size) alignment_size = aligned_size; } if (alignment_size != 0) { size_t modulo; uint16_t new_padding; if (alignment_size > 1) alignment_size = roundup(alignment_size, 2); modulo = (last_member->byte_offset + last_member->byte_size) % alignment_size; if (modulo != 0) new_padding = cu->addr_size - modulo; else new_padding = 0; if (new_padding != class->padding) { class->padding = new_padding; class->type.size = (last_member->byte_offset + last_member->byte_size + new_padding); } } type__for_each_data_member(&class->type, member) { /* See if we have a hole after this member */ if (member->hole != 0) { /* * OK, try to find a member that has a hole after it * and that has a size that fits the current hole: */ brother = class__find_next_hole_of_size(class, member, member->hole); if (brother != NULL) { struct class_member *brother_prev = list_entry(brother->tag.node.prev, struct class_member, tag.node); /* * If it the next member, avoid moving it closer, * it could be a explicit alignment rule, like * ____cacheline_aligned_in_smp in the Linux * kernel. */ if (brother_prev != member) { if (class__move_member(class, member, brother, cu, 0, verbose, fp)) goto restart; } } /* * OK, but is there padding? If so the last member * has a hole, if we are not at the last member and * it has a size that is smaller than the current hole * we can move it after the current member, reducing * the padding or eliminating it altogether. */ if (class->padding > 0 && member != last_member && last_member->byte_size != 0 && last_member->byte_size <= member->hole) { if (class__move_member(class, member, last_member, cu, 1, verbose, fp)) goto restart; } } } /* Now try to move members at the tail to after holes */ if (class->nr_holes == 0) return; type__for_each_data_member(&class->type, member) { /* See if we have a hole after this member */ if (member->hole != 0) { brother = class__find_last_member_of_size(class, member, member->hole); if (brother != NULL) { struct class_member *brother_prev = list_entry(brother->tag.node.prev, struct class_member, tag.node); /* * If it the next member, avoid moving it closer, * it could be a explicit alignment rule, like * ____cacheline_aligned_in_smp in the Linux * kernel. */ if (brother_prev != member) { if (class__move_member(class, member, brother, cu, 0, verbose, fp)) goto restart; } } } } } dwarves-dfsg-1.15/dwarves_reorganize.h000066400000000000000000000014471350511416500201140ustar00rootroot00000000000000#ifndef _DWARVES_REORGANIZE_H_ #define _DWARVES_REORGANIZE_H_ 1 /* SPDX-License-Identifier: GPL-2.0-only Copyright (C) 2006 Mandriva Conectiva S.A. Copyright (C) 2006 Arnaldo Carvalho de Melo Copyright (C) 2007 Arnaldo Carvalho de Melo */ #include #include struct class; struct cu; struct class_member; void class__subtract_offsets_from(struct class *cls, struct class_member *from, const uint16_t size); void class__add_offsets_from(struct class *cls, struct class_member *from, const uint16_t size); void class__fixup_alignment(struct class *cls, const struct cu *cu); void class__reorganize(struct class *cls, const struct cu *cu, const int verbose, FILE *fp); #endif /* _DWARVES_REORGANIZE_H_ */ dwarves-dfsg-1.15/elf_symtab.c000066400000000000000000000024521350511416500163310ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only Copyright (C) 2009 Red Hat Inc. Copyright (C) 2009 Arnaldo Carvalho de Melo */ #include #include #include #include "dutil.h" #include "elf_symtab.h" #define HASHSYMS__BITS 8 #define HASHSYMS__SIZE (1UL << HASHSYMS__BITS) struct elf_symtab *elf_symtab__new(const char *name, Elf *elf, GElf_Ehdr *ehdr) { if (name == NULL) name = ".symtab"; GElf_Shdr shdr; Elf_Scn *sec = elf_section_by_name(elf, ehdr, &shdr, name, NULL); if (sec == NULL) return NULL; if (gelf_getshdr(sec, &shdr) == NULL) return NULL; struct elf_symtab *symtab = malloc(sizeof(*symtab)); if (symtab == NULL) return NULL; symtab->name = strdup(name); if (symtab->name == NULL) goto out_delete; symtab->syms = elf_getdata(sec, NULL); if (symtab->syms == NULL) goto out_free_name; sec = elf_getscn(elf, shdr.sh_link); if (sec == NULL) goto out_free_name; symtab->symstrs = elf_getdata(sec, NULL); if (symtab->symstrs == NULL) goto out_free_name; symtab->nr_syms = shdr.sh_size / shdr.sh_entsize; return symtab; out_free_name: free(symtab->name); out_delete: free(symtab); return NULL; } void elf_symtab__delete(struct elf_symtab *symtab) { if (symtab == NULL) return; free(symtab->name); free(symtab); } dwarves-dfsg-1.15/elf_symtab.h000066400000000000000000000040621350511416500163350ustar00rootroot00000000000000#ifndef _ELF_SYMTAB_H_ #define _ELF_SYMTAB_H_ 1 /* SPDX-License-Identifier: GPL-2.0-only Copyright (C) 2009 Red Hat Inc. Copyright (C) 2009 Arnaldo Carvalho de Melo */ #include #include #include #include struct elf_symtab { uint32_t nr_syms; Elf_Data *syms; Elf_Data *symstrs; char *name; }; struct elf_symtab *elf_symtab__new(const char *name, Elf *elf, GElf_Ehdr *ehdr); void elf_symtab__delete(struct elf_symtab *symtab); static inline uint32_t elf_symtab__nr_symbols(const struct elf_symtab *symtab) { return symtab->nr_syms; } static inline const char *elf_sym__name(const GElf_Sym *sym, const struct elf_symtab *symtab) { return symtab->symstrs->d_buf + sym->st_name; } static inline uint8_t elf_sym__type(const GElf_Sym *sym) { return GELF_ST_TYPE(sym->st_info); } static inline uint16_t elf_sym__section(const GElf_Sym *sym) { return sym->st_shndx; } static inline uint8_t elf_sym__bind(const GElf_Sym *sym) { return GELF_ST_BIND(sym->st_info); } static inline uint8_t elf_sym__visibility(const GElf_Sym *sym) { return GELF_ST_VISIBILITY(sym->st_other); } static inline uint32_t elf_sym__size(const GElf_Sym *sym) { return sym->st_size; } static inline uint64_t elf_sym__value(const GElf_Sym *sym) { return sym->st_value; } static inline bool elf_sym__is_local_function(const GElf_Sym *sym) { return elf_sym__type(sym) == STT_FUNC && sym->st_name != 0 && sym->st_shndx != SHN_UNDEF; } static inline bool elf_sym__is_local_object(const GElf_Sym *sym) { return elf_sym__type(sym) == STT_OBJECT && sym->st_name != 0 && sym->st_shndx != SHN_UNDEF; } /** * elf_symtab__for_each_symbol - iterate thru all the symbols * * @symtab: struct elf_symtab instance to iterate * @index: uint32_t index * @sym: GElf_Sym iterator */ #define elf_symtab__for_each_symbol(symtab, index, sym) \ for (index = 0, gelf_getsym(symtab->syms, index, &sym);\ index < symtab->nr_syms; \ index++, gelf_getsym(symtab->syms, index, &sym)) #endif /* _ELF_SYMTAB_H_ */ dwarves-dfsg-1.15/elfcreator.c000066400000000000000000000145501350511416500163340ustar00rootroot00000000000000/* * SPDX-License-Identifier: GPL-2.0-only * * Copyright 2009 Red Hat, Inc. * * Author: Peter Jones */ #include #include #include #include #include #include #include #include #include #include "elfcreator.h" struct elf_creator { const char *path; int fd; Elf *elf; GElf_Ehdr *ehdr, ehdr_mem; Elf *oldelf; /* just because we have to look this up /so/ often... */ Elf_Scn *dynscn; GElf_Shdr *dynshdr, dynshdr_mem; Elf_Data *dyndata; }; static void clear(ElfCreator *ctor, int do_unlink) { if (do_unlink) { if (ctor->elf) elf_end(ctor->elf); if (ctor->fd >= 0) close(ctor->fd); if (ctor->path) unlink(ctor->path); } else { if (ctor->elf) { elf_update(ctor->elf, ELF_C_WRITE_MMAP); elf_end(ctor->elf); } if (ctor->fd >= 0) close(ctor->fd); } memset(ctor, '\0', sizeof(*ctor)); } ElfCreator *elfcreator_begin(char *path, Elf *elf) { ElfCreator *ctor = NULL; GElf_Ehdr ehdr_mem, *ehdr; GElf_Half machine; if (!(ctor = calloc(1, sizeof(*ctor)))) return NULL; clear(ctor, 0); ctor->path = path; ctor->oldelf = elf; ehdr = gelf_getehdr(elf, &ehdr_mem); machine = ehdr->e_machine; if ((ctor->fd = open(path, O_RDWR|O_CREAT|O_TRUNC, 0755)) < 0) { err: clear(ctor, 1); free(ctor); return NULL; } if (!(ctor->elf = elf_begin(ctor->fd, ELF_C_WRITE_MMAP, elf))) goto err; gelf_newehdr(ctor->elf, gelf_getclass(elf)); gelf_update_ehdr(ctor->elf, ehdr); if (!(ctor->ehdr = gelf_getehdr(ctor->elf, &ctor->ehdr_mem))) goto err; return ctor; } static Elf_Scn *get_scn_by_type(ElfCreator *ctor, Elf64_Word sh_type) { Elf_Scn *scn = NULL; while ((scn = elf_nextscn(ctor->elf, scn)) != NULL) { GElf_Shdr *shdr, shdr_mem; shdr = gelf_getshdr(scn, &shdr_mem); if (shdr->sh_type == sh_type) return scn; } return NULL; } static void update_dyn_cache(ElfCreator *ctor) { ctor->dynscn = get_scn_by_type(ctor, SHT_DYNAMIC); if (ctor->dynscn == NULL) return; ctor->dynshdr = gelf_getshdr(ctor->dynscn, &ctor->dynshdr_mem); ctor->dyndata = elf_getdata(ctor->dynscn, NULL); } void elfcreator_copy_scn(ElfCreator *ctor, Elf *src, Elf_Scn *scn) { Elf_Scn *newscn; Elf_Data *indata, *outdata; GElf_Shdr *oldshdr, oldshdr_mem; GElf_Shdr *newshdr, newshdr_mem; newscn = elf_newscn(ctor->elf); newshdr = gelf_getshdr(newscn, &newshdr_mem); oldshdr = gelf_getshdr(scn, &oldshdr_mem); memmove(newshdr, oldshdr, sizeof(*newshdr)); gelf_update_shdr(newscn, newshdr); indata = NULL; while ((indata = elf_getdata(scn, indata)) != NULL) { outdata = elf_newdata(newscn); *outdata = *indata; } if (newshdr->sh_type == SHT_DYNAMIC) update_dyn_cache(ctor); } static GElf_Dyn *get_dyn_by_tag(ElfCreator *ctor, Elf64_Sxword d_tag, GElf_Dyn *mem, size_t *idx) { size_t cnt; if (!ctor->dyndata) return NULL; for (cnt = 1; cnt < ctor->dynshdr->sh_size / ctor->dynshdr->sh_entsize; cnt++) { GElf_Dyn *dyn; if ((dyn = gelf_getdyn(ctor->dyndata, cnt, mem)) == NULL) break; if (dyn->d_tag == d_tag) { *idx = cnt; return dyn; } } return NULL; } static void remove_dyn(ElfCreator *ctor, size_t idx) { size_t cnt; for (cnt = idx; cnt < ctor->dynshdr->sh_size/ctor->dynshdr->sh_entsize; cnt++) { GElf_Dyn *dyn, dyn_mem; if (cnt+1 == ctor->dynshdr->sh_size/ctor->dynshdr->sh_entsize) { memset(&dyn_mem, '\0', sizeof(dyn_mem)); gelf_update_dyn(ctor->dyndata, cnt, &dyn_mem); break; } dyn = gelf_getdyn(ctor->dyndata, cnt+1, &dyn_mem); gelf_update_dyn(ctor->dyndata, cnt, dyn); } ctor->dynshdr->sh_size--; gelf_update_shdr(ctor->dynscn, ctor->dynshdr); update_dyn_cache(ctor); } typedef void (*dyn_fixup_fn)(ElfCreator *ctor, Elf64_Sxword d_tag, Elf_Scn *scn); static void generic_dyn_fixup_fn(ElfCreator *ctor, Elf64_Sxword d_tag, Elf_Scn *scn) { GElf_Shdr *shdr, shdr_mem; GElf_Dyn *dyn, dyn_mem; size_t idx; dyn = get_dyn_by_tag(ctor, d_tag, &dyn_mem, &idx); shdr = gelf_getshdr(scn, &shdr_mem); if (shdr) { dyn->d_un.d_ptr = shdr->sh_addr; gelf_update_dyn(ctor->dyndata, idx, dyn); } else { remove_dyn(ctor, idx); } } static void rela_dyn_fixup_fn(ElfCreator *ctor, Elf64_Sxword d_tag, Elf_Scn *scn) { GElf_Shdr *shdr, shdr_mem; GElf_Dyn *dyn, dyn_mem; size_t idx; dyn = get_dyn_by_tag(ctor, d_tag, &dyn_mem, &idx); shdr = gelf_getshdr(scn, &shdr_mem); if (shdr) { dyn->d_un.d_ptr = shdr->sh_addr; gelf_update_dyn(ctor->dyndata, idx, dyn); } else { remove_dyn(ctor, idx); dyn = get_dyn_by_tag(ctor, DT_RELASZ, &dyn_mem, &idx); if (dyn) { dyn->d_un.d_val = 0; gelf_update_dyn(ctor->dyndata, idx, dyn); } } } static void rel_dyn_fixup_fn(ElfCreator *ctor, Elf64_Sxword d_tag, Elf_Scn *scn) { GElf_Shdr *shdr, shdr_mem; GElf_Dyn *dyn, dyn_mem; size_t idx; dyn = get_dyn_by_tag(ctor, d_tag, &dyn_mem, &idx); shdr = gelf_getshdr(scn, &shdr_mem); if (shdr) { dyn->d_un.d_ptr = shdr->sh_addr; gelf_update_dyn(ctor->dyndata, idx, dyn); } else { remove_dyn(ctor, idx); dyn = get_dyn_by_tag(ctor, DT_RELSZ, &dyn_mem, &idx); if (dyn) { dyn->d_un.d_val = 0; gelf_update_dyn(ctor->dyndata, idx, dyn); } } } static void fixup_dynamic(ElfCreator *ctor) { struct { Elf64_Sxword d_tag; Elf64_Word sh_type; dyn_fixup_fn fn; } fixups[] = { { DT_HASH, SHT_HASH, NULL }, { DT_STRTAB, SHT_STRTAB, NULL }, { DT_SYMTAB, SHT_SYMTAB, NULL }, { DT_RELA, SHT_RELA, rela_dyn_fixup_fn}, { DT_REL, SHT_REL, rel_dyn_fixup_fn}, { DT_GNU_HASH, SHT_GNU_HASH, NULL }, { DT_NULL, SHT_NULL, NULL } }; int i; for (i = 0; fixups[i].d_tag != DT_NULL; i++) { Elf_Scn *scn; scn = get_scn_by_type(ctor, fixups[i].sh_type); if (fixups[i].fn) fixups[i].fn(ctor, fixups[i].d_tag, scn); else generic_dyn_fixup_fn(ctor, fixups[i].d_tag, scn); } } void elfcreator_end(ElfCreator *ctor) { GElf_Phdr phdr_mem, *phdr; int m,n; for (m = 0; (phdr = gelf_getphdr(ctor->oldelf, m, &phdr_mem)) != NULL; m++) /* XXX this should check if an entry is needed */; gelf_newphdr(ctor->elf, m); elf_update(ctor->elf, ELF_C_NULL); update_dyn_cache(ctor); for (n = 0; n < m; n++) { /* XXX this should check if an entry is needed */ phdr = gelf_getphdr(ctor->oldelf, n, &phdr_mem); if (ctor->dynshdr && phdr->p_type == PT_DYNAMIC) phdr->p_offset = ctor->dynshdr->sh_offset; gelf_update_phdr(ctor->elf, n, phdr); } fixup_dynamic(ctor); clear(ctor, 0); free(ctor); } dwarves-dfsg-1.15/elfcreator.h000066400000000000000000000006671350511416500163450ustar00rootroot00000000000000/* * SPDX-License-Identifier: GPL-2.0-only * * Copyright 2009 Red Hat, Inc. * * Author: Peter Jones */ #ifndef ELFCREATOR_H #define ELFCREATOR_H 1 #include typedef struct elf_creator ElfCreator; extern ElfCreator *elfcreator_begin(char *path, Elf *elf); extern void elfcreator_copy_scn(ElfCreator *ctor, Elf *src, Elf_Scn *scn); extern void elfcreator_end(ElfCreator *ctor); #endif /* ELFCREATOR_H */ dwarves-dfsg-1.15/fullcircle000077500000000000000000000026211350511416500161100ustar00rootroot00000000000000#!/bin/bash # SPDX-License-Identifier: GPL-2.0-only # Copyright © 2019 Red Hat Inc, Arnaldo Carvalho de Melo # Use pfunct to produce compilable output from a object, then do a codiff -s # To see if the type information generated from source code generated # from type information in a file compiled from the original source code matches. if [ $# -eq 0 ] ; then echo "Usage: fullcircle " exit 1 fi file=$1 nr_cus=$(readelf -wi ${file} | grep DW_TAG_compile_unit | wc -l) if [ $nr_cus -gt 1 ]; then exit 0 fi c_output=$(mktemp /tmp/fullcircle.XXXXXX.c) o_output=$(mktemp /tmp/fullcircle.XXXXXX.o) pfunct_bin=${PFUNCT-"pfunct"} codiff_bin=${CODIFF-"codiff"} # See how your DW_AT_producer looks like and find the # right regexp to get after the GCC version string, this one # seems good enough for Red Hat/Fedora/CentOS that look like: # # DW_AT_producer : (indirect string, offset: 0x3583): GNU C89 8.2.1 20181215 (Red Hat 8.2.1-6) -mno-sse -mno-mmx # # So we need from -mno-sse onwards CFLAGS=$(readelf -wi $file | grep -w DW_AT_producer | sed -r 's/.*\)( -[[:alnum:]]+.*)+/\1/g') # Check if we managed to do the sed or if this is something like GNU AS [ "${CFLAGS/DW_AT_producer/}" != "${CFLAGS}" ] && exit ${pfunct_bin} --compile $file > $c_output gcc $CFLAGS -c -g $c_output -o $o_output ${codiff_bin} -q -s $file $o_output rm -f $c_output $o_output exit 0 dwarves-dfsg-1.15/gobuffer.c000066400000000000000000000052161350511416500160040ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only Copyright (C) 2008 Arnaldo Carvalho de Melo Grow only buffer, add entries but never delete */ #include "gobuffer.h" #include #include #include #include #include #include #include #include "dutil.h" #define GOBUFFER__BCHUNK (8 * 1024) #define GOBUFFER__ZCHUNK (8 * 1024) void gobuffer__init(struct gobuffer *gb) { gb->entries = NULL; gb->nr_entries = gb->allocated_size = 0; /* 0 == NULL */ gb->index = 1; } struct gobuffer *gobuffer__new(void) { struct gobuffer *gb = malloc(sizeof(*gb)); if (gb != NULL) gobuffer__init(gb); return gb; } void __gobuffer__delete(struct gobuffer *gb) { free(gb->entries); } void gobuffer__delete(struct gobuffer *gb) { __gobuffer__delete(gb); free(gb); } void *gobuffer__ptr(const struct gobuffer *gb, unsigned int s) { return s ? gb->entries + s : NULL; } int gobuffer__allocate(struct gobuffer *gb, unsigned int len) { const unsigned int rc = gb->index; const unsigned int index = gb->index + len; if (index >= gb->allocated_size) { unsigned int allocated_size = (gb->allocated_size + GOBUFFER__BCHUNK); if (allocated_size < index) allocated_size = index + GOBUFFER__BCHUNK; char *entries = realloc(gb->entries, allocated_size); if (entries == NULL) return -ENOMEM; gb->allocated_size = allocated_size; gb->entries = entries; } gb->index = index; return rc; } int gobuffer__add(struct gobuffer *gb, const void *s, unsigned int len) { const int rc = gobuffer__allocate(gb, len); if (rc >= 0) { ++gb->nr_entries; memcpy(gb->entries + rc, s, len); } return rc; } void gobuffer__copy(const struct gobuffer *gb, void *dest) { memcpy(dest, gb->entries, gobuffer__size(gb)); } const void *gobuffer__compress(struct gobuffer *gb, unsigned int *size) { z_stream z = { .zalloc = Z_NULL, .zfree = Z_NULL, .opaque = Z_NULL, .avail_in = gobuffer__size(gb), .next_in = (Bytef *)gobuffer__entries(gb), }; void *bf = NULL; unsigned int bf_size = 0; if (deflateInit(&z, Z_BEST_COMPRESSION) != Z_OK) goto out_free; do { const unsigned int new_bf_size = bf_size + GOBUFFER__ZCHUNK; void *nbf = realloc(bf, new_bf_size); if (nbf == NULL) goto out_close_and_free; bf = nbf; z.avail_out = GOBUFFER__ZCHUNK; z.next_out = (Bytef *)bf + bf_size; bf_size = new_bf_size; if (deflate(&z, Z_FINISH) == Z_STREAM_ERROR) goto out_close_and_free; } while (z.avail_out == 0); deflateEnd(&z); *size = bf_size - z.avail_out; out: return bf; out_close_and_free: deflateEnd(&z); out_free: free(bf); bf = NULL; goto out; } dwarves-dfsg-1.15/gobuffer.h000066400000000000000000000021151350511416500160040ustar00rootroot00000000000000#ifndef _GOBUFFER_H_ #define _GOBUFFER_H_ 1 /* SPDX-License-Identifier: GPL-2.0-only Copyright (C) 2008 Arnaldo Carvalho de Melo */ struct gobuffer { char *entries; unsigned int nr_entries; unsigned int index; unsigned int allocated_size; }; struct gobuffer *gobuffer__new(void); void gobuffer__init(struct gobuffer *gb); void gobuffer__delete(struct gobuffer *gb); void __gobuffer__delete(struct gobuffer *gb); void gobuffer__copy(const struct gobuffer *gb, void *dest); int gobuffer__add(struct gobuffer *gb, const void *s, unsigned int len); int gobuffer__allocate(struct gobuffer *gb, unsigned int len); static inline const void *gobuffer__entries(const struct gobuffer *gb) { return gb->entries; } static inline unsigned int gobuffer__nr_entries(const struct gobuffer *gb) { return gb->nr_entries; } static inline unsigned int gobuffer__size(const struct gobuffer *gb) { return gb->index; } void *gobuffer__ptr(const struct gobuffer *gb, unsigned int s); const void *gobuffer__compress(struct gobuffer *gb, unsigned int *size); #endif /* _GOBUFFER_H_ */ dwarves-dfsg-1.15/hash.h000066400000000000000000000035651350511416500151420ustar00rootroot00000000000000#ifndef _LINUX_HASH_H #define _LINUX_HASH_H /* Fast hashing routine for ints, longs and pointers. (C) 2002 William Lee Irwin III, IBM */ /* * Knuth recommends primes in approximately golden ratio to the maximum * integer representable by a machine word for multiplicative hashing. * Chuck Lever verified the effectiveness of this technique: * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf * * These primes are chosen to be bit-sparse, that is operations on * them can use shifts and additions instead of multiplications for * machines where multiplications are slow. */ #include /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ #define GOLDEN_RATIO_PRIME_32 0x9e370001UL /* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */ #define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001UL #if __WORDSIZE == 32 #define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_32 #define hash_long(val, bits) hash_32(val, bits) #elif __WORDSIZE == 64 #define hash_long(val, bits) hash_64(val, bits) #define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_64 #else #error Wordsize not 32 or 64 #endif static inline uint64_t hash_64(const uint64_t val, const unsigned int bits) { uint64_t hash = val; /* Sigh, gcc can't optimise this alone like it does for 32 bits. */ uint64_t n = hash; n <<= 18; hash -= n; n <<= 33; hash -= n; n <<= 3; hash += n; n <<= 3; hash -= n; n <<= 4; hash += n; n <<= 2; hash += n; /* High bits are more random, so use them. */ return hash >> (64 - bits); } static inline uint32_t hash_32(uint32_t val, unsigned int bits) { /* On some cpus multiply is faster, on others gcc will do shifts */ uint32_t hash = val * GOLDEN_RATIO_PRIME_32; /* High bits are more random, so use them. */ return hash >> (32 - bits); } static inline unsigned long hash_ptr(void *ptr, unsigned int bits) { return hash_long((unsigned long)ptr, bits); } #endif /* _LINUX_HASH_H */ dwarves-dfsg-1.15/lib/000077500000000000000000000000001350511416500146035ustar00rootroot00000000000000dwarves-dfsg-1.15/lib/Makefile000066400000000000000000000020151350511416500162410ustar00rootroot00000000000000obj-m := ctracer.o ctracer-y := ctracer_collector.o ctracer_relay.o # Files generated that shall be removed upon make clean clean-files := ctracer_collector.c CLASS=sock #KDIR := /home/acme/git/OUTPUT/qemu/linux-2.6/ KDIR := /lib/modules/$(shell uname -r)/build PWD := $(shell pwd) default: $(MAKE) -C $(KDIR) SUBDIRS=$(PWD) modules clean: rm -rf .*.mod.c .*o.cmd *.mod.c *.ko *.o \ ctracer_collector.c ctracer_methods.stp \ ctracer_classes.h \ Module.symvers .tmp_versions/ \ $(CLASS).{fields,functions} ctracer2ostra* $(src)/ctracer2ostra: ctracer_methods.stp $(CC) $@.c -o $@ cu_blacklist_file=/usr/share/dwarves/runtime/linux.blacklist.cu LOG=/tmp/ctracer.log callgraph: ctracer2ostra ./ctracer2ostra < $(LOG) > $(LOG).ostra ; \ rm -rf $(CLASS).callgraph ; \ PYTHONPATH=python/ ostra-cg $(CLASS) $(LOG).ostra $(obj)/ctracer_collector.o: ctracer_collector.c $(src)/ctracer_collector.c: ctracer --src_dir $(src) /usr/lib/debug/lib/modules/$(shell uname -r)/vmlinux \ --cu_blacklist $(cu_blacklist_file) $(CLASS) dwarves-dfsg-1.15/lib/bpf/000077500000000000000000000000001350511416500153525ustar00rootroot00000000000000dwarves-dfsg-1.15/lib/ctracer_relay.c000066400000000000000000000046561350511416500176010ustar00rootroot00000000000000/* Copyright (C) 2007 Arnaldo Carvalho de Melo This program is free software; you can redistribute it and/or modify it under the terms of version 2 of the GNU General Public License as published by the Free Software Foundation. */ #include #include #include #include #include #include #include #include #include "ctracer_relay.h" static struct rchan *ctracer__rchan; static int ctracer__subbuf_start_callback(struct rchan_buf *buf, void *subbuf, void *prev_subbuf, size_t prev_padding) { static int warned; if (!relay_buf_full(buf)) return 1; if (!warned) { warned = 1; printk("relay_buf_full!\n"); } return 0; } static struct dentry *ctracer__create_buf_file_callback(const char *filename, struct dentry *parent, int mode, struct rchan_buf *buf, int *is_global) { return debugfs_create_file(filename, mode, parent, buf, &relay_file_operations); } static int ctracer__remove_buf_file_callback(struct dentry *dentry) { debugfs_remove(dentry); return 0; } static struct rchan_callbacks ctracer__relay_callbacks = { .subbuf_start = ctracer__subbuf_start_callback, .create_buf_file = ctracer__create_buf_file_callback, .remove_buf_file = ctracer__remove_buf_file_callback, }; extern void ctracer__class_state(const void *from, void *to); void ctracer__method_hook(const unsigned long long now, const int probe_type, const unsigned long long function_id, const void *object, const int state_len) { if (object != NULL) { void *t = relay_reserve(ctracer__rchan, sizeof(struct trace_entry) + state_len); if (t != NULL) { struct trace_entry *entry = t; entry->nsec = now; entry->probe_type = probe_type; entry->object = object; entry->function_id = function_id; ctracer__class_state(object, t + sizeof(*entry)); } } } EXPORT_SYMBOL_GPL(ctracer__method_hook); static int __init ctracer__relay_init(void) { ctracer__rchan = relay_open("ctracer", NULL, 512 * 1024, 64, &ctracer__relay_callbacks, NULL); if (ctracer__rchan == NULL) { pr_info("ctracer: couldn't create the relay\n"); return -1; } return 0; } module_init(ctracer__relay_init); static void __exit ctracer__relay_exit(void) { relay_close(ctracer__rchan); } module_exit(ctracer__relay_exit); MODULE_LICENSE("GPL"); dwarves-dfsg-1.15/lib/ctracer_relay.h000066400000000000000000000012211350511416500175670ustar00rootroot00000000000000#ifndef _CTRACER_RELAY_H_ #define _CTRACER_RELAY_H_ 1 /* Copyright (C) 2007 Arnaldo Carvalho de Melo This program is free software; you can redistribute it and/or modify it under the terms of version 2 of the GNU General Public License as published by the Free Software Foundation. */ struct trace_entry { unsigned long long nsec; unsigned long long probe_type:1; /* Entry or exit */ unsigned long long function_id:63; const void *object; }; void ctracer__method_hook(const unsigned long long now, const int probe_type, const unsigned long long function, const void *object, const int state_len); #endif dwarves-dfsg-1.15/lib/linux.blacklist.cu000066400000000000000000000000401350511416500202340ustar00rootroot00000000000000kernel/kprobes.c kernel/relay.c dwarves-dfsg-1.15/libbtf.c000066400000000000000000000424121350511416500154460ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only Copyright (C) 2019 Facebook */ #include #include #include #include #include #include #include #include #include #include #include "libbtf.h" #include "lib/bpf/include/uapi/linux/btf.h" #include "lib/bpf/include/linux/err.h" #include "lib/bpf/src/btf.h" #include "lib/bpf/src/libbpf.h" #include "dutil.h" #include "gobuffer.h" #include "dwarves.h" #define BTF_INFO_ENCODE(kind, kind_flag, vlen) \ ((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN)) #define BTF_INT_ENCODE(encoding, bits_offset, nr_bits) \ ((encoding) << 24 | (bits_offset) << 16 | (nr_bits)) struct btf_int_type { struct btf_type type; uint32_t data; }; struct btf_enum_type { struct btf_type type; struct btf_enum btf_enum; }; struct btf_array_type { struct btf_type type; struct btf_array array; }; uint8_t btf_elf__verbose; uint32_t btf_elf__get32(struct btf_elf *btfe, uint32_t *p) { uint32_t val = *p; if (btfe->swapped) val = ((val >> 24) | ((val >> 8) & 0x0000ff00) | ((val << 8) & 0x00ff0000) | (val << 24)); return val; } int btf_elf__load(struct btf_elf *btfe) { int err = -ENOTSUP; GElf_Shdr shdr; Elf_Scn *sec = elf_section_by_name(btfe->elf, &btfe->ehdr, &shdr, ".BTF", NULL); if (sec == NULL) return -ESRCH; Elf_Data *data = elf_getdata(sec, NULL); if (data == NULL) { fprintf(stderr, "%s: cannot get data of BTF section.\n", __func__); return -1; } struct btf_header *hp = data->d_buf; size_t orig_size = data->d_size; if (hp->version != BTF_VERSION) goto out; err = -EINVAL; if (hp->magic == BTF_MAGIC) btfe->swapped = 0; else goto out; err = -ENOMEM; btfe->data = malloc(orig_size); if (btfe->data != NULL) { memcpy(btfe->data, hp, orig_size); btfe->size = orig_size; err = 0; } out: return err; } struct btf_elf *btf_elf__new(const char *filename, Elf *elf) { struct btf_elf *btfe = zalloc(sizeof(*btfe)); if (!btfe) return NULL; btfe->in_fd = -1; btfe->filename = strdup(filename); if (btfe->filename == NULL) goto errout; if (elf != NULL) { btfe->elf = elf; } else { btfe->in_fd = open(filename, O_RDONLY); if (btfe->in_fd < 0) goto errout; if (elf_version(EV_CURRENT) == EV_NONE) { fprintf(stderr, "%s: cannot set libelf version.\n", __func__); goto errout; } btfe->elf = elf_begin(btfe->in_fd, ELF_C_READ_MMAP, NULL); if (!btfe->elf) { fprintf(stderr, "%s: cannot read %s ELF file.\n", __func__, filename); goto errout; } } if (gelf_getehdr(btfe->elf, &btfe->ehdr) == NULL) { fprintf(stderr, "%s: cannot get elf header.\n", __func__); goto errout; } switch (btfe->ehdr.e_ident[EI_DATA]) { case ELFDATA2LSB: btfe->is_big_endian = false; break; case ELFDATA2MSB: btfe->is_big_endian = true; break; default: fprintf(stderr, "%s: unknown elf endianness.\n", __func__); goto errout; } switch (btfe->ehdr.e_ident[EI_CLASS]) { case ELFCLASS32: btfe->wordsize = 4; break; case ELFCLASS64: btfe->wordsize = 8; break; default: btfe->wordsize = 0; break; } return btfe; errout: btf_elf__delete(btfe); return NULL; } void btf_elf__delete(struct btf_elf *btfe) { if (!btfe) return; if (btfe->in_fd != -1) { close(btfe->in_fd); if (btfe->elf) elf_end(btfe->elf); } __gobuffer__delete(&btfe->types); free(btfe->filename); free(btfe->data); free(btfe); } char *btf_elf__string(struct btf_elf *btfe, uint32_t ref) { struct btf_header *hp = btfe->hdr; uint32_t off = ref; char *name; if (off >= btf_elf__get32(btfe, &hp->str_len)) return "(ref out-of-bounds)"; if ((off + btf_elf__get32(btfe, &hp->str_off)) >= btfe->size) return "(string table truncated)"; name = ((char *)(hp + 1) + btf_elf__get32(btfe, &hp->str_off) + off); return name[0] == '\0' ? NULL : name; } static void *btf_elf__nohdr_data(struct btf_elf *btfe) { return btfe->hdr + 1; } void btf_elf__set_strings(struct btf_elf *btfe, struct gobuffer *strings) { btfe->strings = strings; } #define BITS_PER_BYTE 8 #define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1) #define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK) #define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3) #define BITS_ROUNDUP_BYTES(bits) (BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits)) static const char * const btf_kind_str[NR_BTF_KINDS] = { [BTF_KIND_UNKN] = "UNKNOWN", [BTF_KIND_INT] = "INT", [BTF_KIND_PTR] = "PTR", [BTF_KIND_ARRAY] = "ARRAY", [BTF_KIND_STRUCT] = "STRUCT", [BTF_KIND_UNION] = "UNION", [BTF_KIND_ENUM] = "ENUM", [BTF_KIND_FWD] = "FWD", [BTF_KIND_TYPEDEF] = "TYPEDEF", [BTF_KIND_VOLATILE] = "VOLATILE", [BTF_KIND_CONST] = "CONST", [BTF_KIND_RESTRICT] = "RESTRICT", [BTF_KIND_FUNC] = "FUNC", [BTF_KIND_FUNC_PROTO] = "FUNC_PROTO", }; static const char *btf_elf__name_in_gobuf(const struct btf_elf *btfe, uint32_t offset) { if (!offset) return "(anon)"; else return &btfe->strings->entries[offset]; } static const char * btf_elf__int_encoding_str(uint8_t encoding) { if (encoding == 0) return "(none)"; else if (encoding == BTF_INT_SIGNED) return "SIGNED"; else if (encoding == BTF_INT_CHAR) return "CHAR"; else if (encoding == BTF_INT_BOOL) return "BOOL"; else return "UNKN"; } __attribute ((format (printf, 5, 6))) static void btf_elf__log_type(const struct btf_elf *btfe, const struct btf_type *t, bool err, bool output_cr, const char *fmt, ...) { uint8_t kind; FILE *out; if (!btf_elf__verbose && !err) return; kind = BTF_INFO_KIND(t->info); out = err ? stderr : stdout; fprintf(out, "[%u] %s %s", btfe->type_index, btf_kind_str[kind], btf_elf__name_in_gobuf(btfe, t->name_off)); if (fmt && *fmt) { va_list ap; fprintf(out, " "); va_start(ap, fmt); vfprintf(out, fmt, ap); va_end(ap); } if (output_cr) fprintf(out, "\n"); } __attribute ((format (printf, 5, 6))) static void btf_log_member(const struct btf_elf *btfe, const struct btf_member *member, bool kind_flag, bool err, const char *fmt, ...) { FILE *out; if (!btf_elf__verbose && !err) return; out = err ? stderr : stdout; if (kind_flag) fprintf(out, "\t%s type_id=%u bitfield_size=%u bits_offset=%u", btf_elf__name_in_gobuf(btfe, member->name_off), member->type, BTF_MEMBER_BITFIELD_SIZE(member->offset), BTF_MEMBER_BIT_OFFSET(member->offset)); else fprintf(out, "\t%s type_id=%u bits_offset=%u", btf_elf__name_in_gobuf(btfe, member->name_off), member->type, member->offset); if (fmt && *fmt) { va_list ap; fprintf(out, " "); va_start(ap, fmt); vfprintf(out, fmt, ap); va_end(ap); } fprintf(out, "\n"); } __attribute ((format (printf, 6, 7))) static void btf_log_func_param(const struct btf_elf *btfe, uint32_t name_off, uint32_t type, bool err, bool is_last_param, const char *fmt, ...) { FILE *out; if (!btf_elf__verbose && !err) return; out = err ? stderr : stdout; if (is_last_param && !type) fprintf(out, "vararg)\n"); else fprintf(out, "%u %s%s", type, btf_elf__name_in_gobuf(btfe, name_off), is_last_param ? ")\n" : ", "); if (fmt && *fmt) { va_list ap; fprintf(out, " "); va_start(ap, fmt); vfprintf(out, fmt, ap); va_end(ap); } } int32_t btf_elf__add_base_type(struct btf_elf *btfe, const struct base_type *bt) { struct btf_int_type int_type; struct btf_type *t = &int_type.type; uint8_t encoding = 0; t->name_off = bt->name; t->info = BTF_INFO_ENCODE(BTF_KIND_INT, 0, 0); t->size = BITS_ROUNDUP_BYTES(bt->bit_size); if (bt->is_signed) { encoding = BTF_INT_SIGNED; } else if (bt->is_bool) { encoding = BTF_INT_BOOL; } else if (bt->float_type) { fprintf(stderr, "float_type is not supported\n"); return -1; } int_type.data = BTF_INT_ENCODE(encoding, 0, bt->bit_size); ++btfe->type_index; if (gobuffer__add(&btfe->types, &int_type, sizeof(int_type)) >= 0) { btf_elf__log_type(btfe, t, false, true, "size=%u bit_offset=%u nr_bits=%u encoding=%s", t->size, BTF_INT_OFFSET(int_type.data), BTF_INT_BITS(int_type.data), btf_elf__int_encoding_str(BTF_INT_ENCODING(int_type.data))); return btfe->type_index; } else { btf_elf__log_type(btfe, t, true, true, "size=%u bit_offset=%u nr_bits=%u encoding=%s Error in adding gobuffer", t->size, BTF_INT_OFFSET(int_type.data), BTF_INT_BITS(int_type.data), btf_elf__int_encoding_str(BTF_INT_ENCODING(int_type.data))); return -1; } } int32_t btf_elf__add_ref_type(struct btf_elf *btfe, uint16_t kind, uint32_t type, uint32_t name, bool kind_flag) { struct btf_type t; t.name_off = name; t.info = BTF_INFO_ENCODE(kind, kind_flag, 0); t.type = type; ++btfe->type_index; if (gobuffer__add(&btfe->types, &t, sizeof(t)) >= 0) { if (kind == BTF_KIND_FWD) btf_elf__log_type(btfe, &t, false, true, "%s", kind_flag ? "union" : "struct"); else btf_elf__log_type(btfe, &t, false, true, "type_id=%u", t.type); return btfe->type_index; } else { btf_elf__log_type(btfe, &t, true, true, "kind_flag=%d type_id=%u Error in adding gobuffer", kind_flag, t.type); return -1; } } int32_t btf_elf__add_array(struct btf_elf *btfe, uint32_t type, uint32_t index_type, uint32_t nelems) { struct btf_array_type array_type; struct btf_type *t = &array_type.type; struct btf_array *array = &array_type.array; t->name_off = 0; t->info = BTF_INFO_ENCODE(BTF_KIND_ARRAY, 0, 0); t->size = 0; array->type = type; array->index_type = index_type; array->nelems = nelems; ++btfe->type_index; if (gobuffer__add(&btfe->types, &array_type, sizeof(array_type)) >= 0) { btf_elf__log_type(btfe, t, false, true, "type_id=%u index_type_id=%u nr_elems=%u", array->type, array->index_type, array->nelems); return btfe->type_index; } else { btf_elf__log_type(btfe, t, true, true, "type_id=%u index_type_id=%u nr_elems=%u Error in adding gobuffer", array->type, array->index_type, array->nelems); return -1; } } int btf_elf__add_member(struct btf_elf *btfe, uint32_t name, uint32_t type, bool kind_flag, uint32_t bitfield_size, uint32_t offset) { struct btf_member member = { .name_off = name, .type = type, .offset = kind_flag ? (bitfield_size << 24 | offset) : offset, }; if (gobuffer__add(&btfe->types, &member, sizeof(member)) >= 0) { btf_log_member(btfe, &member, kind_flag, false, NULL); return 0; } else { btf_log_member(btfe, &member, kind_flag, true, "Error in adding gobuffer"); return -1; } } int32_t btf_elf__add_struct(struct btf_elf *btfe, uint8_t kind, uint32_t name, bool kind_flag, uint32_t size, uint16_t nr_members) { struct btf_type t; t.name_off = name; t.info = BTF_INFO_ENCODE(kind, kind_flag, nr_members); t.size = size; ++btfe->type_index; if (gobuffer__add(&btfe->types, &t, sizeof(t)) >= 0) { btf_elf__log_type(btfe, &t, false, true, "kind_flag=%d size=%u vlen=%u", kind_flag, t.size, BTF_INFO_VLEN(t.info)); return btfe->type_index; } else { btf_elf__log_type(btfe, &t, true, true, "kind_flag=%d size=%u vlen=%u Error in adding gobuffer", kind_flag, t.size, BTF_INFO_VLEN(t.info)); return -1; } } int32_t btf_elf__add_enum(struct btf_elf *btfe, uint32_t name, uint32_t bit_size, uint16_t nr_entries) { struct btf_type t; t.name_off = name; t.info = BTF_INFO_ENCODE(BTF_KIND_ENUM, 0, nr_entries); t.size = BITS_ROUNDUP_BYTES(bit_size); ++btfe->type_index; if (gobuffer__add(&btfe->types, &t, sizeof(t)) >= 0) { btf_elf__log_type(btfe, &t, false, true, "size=%u vlen=%u", t.size, BTF_INFO_VLEN(t.info)); return btfe->type_index; } else { btf_elf__log_type(btfe, &t, true, true, "size=%u vlen=%u Error in adding gobuffer", t.size, BTF_INFO_VLEN(t.info)); return -1; } } int btf_elf__add_enum_val(struct btf_elf *btfe, uint32_t name, int32_t value) { struct btf_enum e = { .name_off = name, .val = value, }; if (gobuffer__add(&btfe->types, &e, sizeof(e)) < 0) { fprintf(stderr, "\t%s val=%d Error in adding gobuffer\n", btf_elf__name_in_gobuf(btfe, e.name_off), e.val); return -1; } else if (btf_elf__verbose) printf("\t%s val=%d\n", btf_elf__name_in_gobuf(btfe, e.name_off), e.val); return 0; } static int32_t btf_elf__add_func_proto_param(struct btf_elf *btfe, uint32_t name, uint32_t type, bool is_last_param) { struct btf_param param; param.name_off = name; param.type = type; if (gobuffer__add(&btfe->types, ¶m, sizeof(param)) >= 0) { btf_log_func_param(btfe, name, type, false, is_last_param, NULL); return 0; } else { btf_log_func_param(btfe, name, type, true, is_last_param, "Error in adding gobuffer"); return -1; } } int32_t btf_elf__add_func_proto(struct btf_elf *btfe, struct ftype *ftype, uint32_t type_id_off) { uint16_t nr_params, param_idx; struct parameter *param; struct btf_type t; int32_t type_id; /* add btf_type for func_proto */ nr_params = ftype->nr_parms + (ftype->unspec_parms ? 1 : 0); t.name_off = 0; t.info = BTF_INFO_ENCODE(BTF_KIND_FUNC_PROTO, 0, nr_params); t.type = ftype->tag.type == 0 ? 0 : type_id_off + ftype->tag.type; ++btfe->type_index; if (gobuffer__add(&btfe->types, &t, sizeof(t)) >= 0) { btf_elf__log_type(btfe, &t, false, false, "return=%u args=(%s", t.type, !nr_params ? "void)\n" : ""); type_id = btfe->type_index; } else { btf_elf__log_type(btfe, &t, true, true, "return=%u vlen=%u Error in adding gobuffer", t.type, BTF_INFO_VLEN(t.info)); return -1; } /* add parameters */ param_idx = 0; ftype__for_each_parameter(ftype, param) { uint32_t param_type_id = param->tag.type == 0 ? 0 : type_id_off + param->tag.type; ++param_idx; if (btf_elf__add_func_proto_param(btfe, param->name, param_type_id, param_idx == nr_params)) return -1; } ++param_idx; if (ftype->unspec_parms) if (btf_elf__add_func_proto_param(btfe, 0, 0, param_idx == nr_params)) return -1; return type_id; } static int btf_elf__write(const char *filename, struct btf *btf) { GElf_Shdr shdr_mem, *shdr; GElf_Ehdr ehdr_mem, *ehdr; Elf_Data *btf_elf = NULL; Elf_Scn *scn = NULL; Elf *elf = NULL; const void *btf_data; uint32_t btf_size; int fd, err = -1; size_t strndx; fd = open(filename, O_RDWR); if (fd < 0) { fprintf(stderr, "Cannot open %s\n", filename); return -1; } if (elf_version(EV_CURRENT) == EV_NONE) { fprintf(stderr, "Cannot set libelf version.\n"); goto out; } elf = elf_begin(fd, ELF_C_RDWR, NULL); if (elf == NULL) { fprintf(stderr, "Cannot update ELF file.\n"); goto out; } elf_flagelf(elf, ELF_C_SET, ELF_F_DIRTY); ehdr = gelf_getehdr(elf, &ehdr_mem); if (ehdr == NULL) { fprintf(stderr, "%s: elf_getehdr failed.\n", __func__); goto out; } /* * First we look if there was already a .BTF section to overwrite. */ elf_getshdrstrndx(elf, &strndx); while ((scn = elf_nextscn(elf, scn)) != NULL) { shdr = gelf_getshdr(scn, &shdr_mem); if (shdr == NULL) continue; char *secname = elf_strptr(elf, strndx, shdr->sh_name); if (strcmp(secname, ".BTF") == 0) { btf_elf = elf_getdata(scn, btf_elf); break; } } btf_data = btf__get_raw_data(btf, &btf_size); if (btf_elf) { /* Exisiting .BTF section found */ btf_elf->d_buf = (void *)btf_data; btf_elf->d_size = btf_size; elf_flagdata(btf_elf, ELF_C_SET, ELF_F_DIRTY); if (elf_update(elf, ELF_C_NULL) >= 0 && elf_update(elf, ELF_C_WRITE) >= 0) err = 0; } else { const char *llvm_objcopy; char tmp_fn[PATH_MAX]; char cmd[PATH_MAX]; llvm_objcopy = getenv("LLVM_OBJCOPY"); if (!llvm_objcopy) llvm_objcopy = "llvm-objcopy"; /* Use objcopy to add a .BTF section */ snprintf(tmp_fn, sizeof(tmp_fn), "%s.btf", filename); close(fd); fd = creat(tmp_fn, S_IRUSR | S_IWUSR); if (fd == -1) { fprintf(stderr, "%s: open(%s) failed!\n", __func__, tmp_fn); goto out; } snprintf(cmd, sizeof(cmd), "%s --add-section .BTF=%s %s", llvm_objcopy, tmp_fn, filename); if (write(fd, btf_data, btf_size) == btf_size && !system(cmd)) err = 0; unlink(tmp_fn); } out: if (fd != -1) close(fd); if (elf) elf_end(elf); return err; } static int libbpf_log(enum libbpf_print_level level, const char *format, va_list args) { return vfprintf(stderr, format, args); } int btf_elf__encode(struct btf_elf *btfe, uint8_t flags) { struct btf_header *hdr; struct btf *btf; /* Empty file, nothing to do, so... done! */ if (gobuffer__size(&btfe->types) == 0) return 0; btfe->size = sizeof(*hdr) + (gobuffer__size(&btfe->types) + gobuffer__size(btfe->strings)); btfe->data = zalloc(btfe->size); if (btfe->data == NULL) { fprintf(stderr, "%s: malloc failed!\n", __func__); return -1; } hdr = btfe->hdr; hdr->magic = BTF_MAGIC; hdr->version = 1; hdr->flags = flags; hdr->hdr_len = sizeof(*hdr); hdr->type_off = 0; hdr->type_len = gobuffer__size(&btfe->types); hdr->str_off = hdr->type_len; hdr->str_len = gobuffer__size(btfe->strings); gobuffer__copy(&btfe->types, btf_elf__nohdr_data(btfe) + hdr->type_off); gobuffer__copy(btfe->strings, btf_elf__nohdr_data(btfe) + hdr->str_off); *(char *)(btf_elf__nohdr_data(btfe) + hdr->str_off) = '\0'; libbpf_set_print(libbpf_log); btf = btf__new(btfe->data, btfe->size); if (IS_ERR(btf)) { fprintf(stderr, "%s: btf__new failed!\n", __func__); return -1; } if (btf__dedup(btf, NULL, NULL)) { fprintf(stderr, "%s: btf__dedup failed!", __func__); return -1; } return btf_elf__write(btfe->filename, btf); } dwarves-dfsg-1.15/libbtf.h000066400000000000000000000040011350511416500154430ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only Copyright (C) 2019 Facebook */ #ifndef _LIBBTF_H #define _LIBBTF_H #include "gobuffer.h" #include #include struct btf_elf { union { struct btf_header *hdr; void *data; }; void *priv; Elf *elf; GElf_Ehdr ehdr; struct gobuffer types; struct gobuffer *strings; char *filename; size_t size; int swapped; int in_fd; uint8_t wordsize; bool is_big_endian; uint32_t type_index; }; extern uint8_t btf_elf__verbose; #define btf_elf__verbose_log(fmt, ...) { if (btf_elf__verbose) printf(fmt, __VA_ARGS__); } struct base_type; struct ftype; struct btf_elf *btf_elf__new(const char *filename, Elf *elf); void btf_elf__delete(struct btf_elf *btf); int32_t btf_elf__add_base_type(struct btf_elf *btf, const struct base_type *bt); int32_t btf_elf__add_ref_type(struct btf_elf *btf, uint16_t kind, uint32_t type, uint32_t name, bool kind_flag); int btf_elf__add_member(struct btf_elf *btf, uint32_t name, uint32_t type, bool kind_flag, uint32_t bitfield_size, uint32_t bit_offset); int32_t btf_elf__add_struct(struct btf_elf *btf, uint8_t kind, uint32_t name, bool kind_flag, uint32_t size, uint16_t nr_members); int32_t btf_elf__add_array(struct btf_elf *btf, uint32_t type, uint32_t index_type, uint32_t nelems); int32_t btf_elf__add_enum(struct btf_elf *btf, uint32_t name, uint32_t size, uint16_t nr_entries); int btf_elf__add_enum_val(struct btf_elf *btf, uint32_t name, int32_t value); int32_t btf_elf__add_func_proto(struct btf_elf *btf, struct ftype *ftype, uint32_t type_id_off); void btf_elf__set_strings(struct btf_elf *btf, struct gobuffer *strings); int btf_elf__encode(struct btf_elf *btf, uint8_t flags); char *btf_elf__string(struct btf_elf *btf, uint32_t ref); int btf_elf__load(struct btf_elf *btf); uint32_t btf_elf__get32(struct btf_elf *btf, uint32_t *p); void *btf_elf__get_buffer(struct btf_elf *btf); size_t btf_elf__get_size(struct btf_elf *btf); #endif /* _LIBBTF_H */ dwarves-dfsg-1.15/libctf.c000066400000000000000000000435531350511416500154560ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only Copyright (C) 2019 Arnaldo Carvalho de Melo */ #include #include #include #include #include #include #include #include #include #include #include "libctf.h" #include "ctf.h" #include "dutil.h" #include "gobuffer.h" bool ctf__ignore_symtab_function(const GElf_Sym *sym, const char *sym_name) { return (!elf_sym__is_local_function(sym) || elf_sym__visibility(sym) != STV_DEFAULT || sym->st_size == 0 || memcmp(sym_name, "__libc_csu_", sizeof("__libc_csu_") - 1) == 0); } bool ctf__ignore_symtab_object(const GElf_Sym *sym, const char *sym_name) { return (!elf_sym__is_local_object(sym) || sym->st_size == 0 || elf_sym__visibility(sym) != STV_DEFAULT || strchr(sym_name, '.') != NULL); } uint16_t ctf__get16(struct ctf *ctf, uint16_t *p) { uint16_t val = *p; if (ctf->swapped) val = ((val >> 8) | (val << 8)); return val; } uint32_t ctf__get32(struct ctf *ctf, uint32_t *p) { uint32_t val = *p; if (ctf->swapped) val = ((val >> 24) | ((val >> 8) & 0x0000ff00) | ((val << 8) & 0x00ff0000) | (val << 24)); return val; } void ctf__put16(struct ctf *ctf, uint16_t *p, uint16_t val) { if (ctf->swapped) val = ((val >> 8) | (val << 8)); *p = val; } void ctf__put32(struct ctf *ctf, uint32_t *p, uint32_t val) { if (ctf->swapped) val = ((val >> 24) | ((val >> 8) & 0x0000ff00) | ((val << 8) & 0x00ff0000) | (val << 24)); *p = val; } static int ctf__decompress(struct ctf *ctf, void *orig_buf, size_t orig_size) { struct ctf_header *hp = orig_buf; const char *err_str; z_stream state; size_t len; void *new; len = (ctf__get32(ctf, &hp->ctf_str_off) + ctf__get32(ctf, &hp->ctf_str_len)); new = malloc(len + sizeof(*hp)); if (!new) { fprintf(stderr, "CTF decompression allocation failure.\n"); return -ENOMEM; } memcpy(new, hp, sizeof(*hp)); memset(&state, 0, sizeof(state)); state.next_in = (Bytef *) (hp + 1); state.avail_in = orig_size - sizeof(*hp); state.next_out = new + sizeof(*hp); state.avail_out = len; if (inflateInit(&state) != Z_OK) { err_str = "struct ctf decompression inflateInit failure."; goto err; } if (inflate(&state, Z_FINISH) != Z_STREAM_END) { err_str = "struct ctf decompression inflate failure."; goto err; } if (inflateEnd(&state) != Z_OK) { err_str = "struct ctf decompression inflateEnd failure."; goto err; } if (state.total_out != len) { err_str = "struct ctf decompression truncation error."; goto err; } ctf->buf = new; ctf->size = len + sizeof(*hp); return 0; err: fputs(err_str, stderr); free(new); return -EINVAL; } int ctf__load(struct ctf *ctf) { int err = -ENOTSUP; GElf_Shdr shdr; Elf_Scn *sec = elf_section_by_name(ctf->elf, &ctf->ehdr, &shdr, ".SUNW_ctf", NULL); if (sec == NULL) return -ESRCH; Elf_Data *data = elf_getdata(sec, NULL); if (data == NULL) { fprintf(stderr, "%s: cannot get data of CTF section.\n", __func__); return -1; } struct ctf_header *hp = data->d_buf; size_t orig_size = data->d_size; if (hp->ctf_version != CTF_VERSION) goto out; err = -EINVAL; if (hp->ctf_magic == CTF_MAGIC) ctf->swapped = 0; else if (hp->ctf_magic == CTF_MAGIC_SWAP) ctf->swapped = 1; else goto out; if (!(hp->ctf_flags & CTF_FLAGS_COMPR)) { err = -ENOMEM; ctf->buf = malloc(orig_size); if (ctf->buf != NULL) { memcpy(ctf->buf, hp, orig_size); ctf->size = orig_size; err = 0; } } else err = ctf__decompress(ctf, hp, orig_size); out: return err; } struct ctf *ctf__new(const char *filename, Elf *elf) { struct ctf *ctf = zalloc(sizeof(*ctf)); if (ctf != NULL) { ctf->filename = strdup(filename); if (ctf->filename == NULL) goto out_delete; if (elf != NULL) { ctf->in_fd = -1; ctf->elf = elf; } else { ctf->in_fd = open(filename, O_RDONLY); if (ctf->in_fd < 0) goto out_delete_filename; if (elf_version(EV_CURRENT) == EV_NONE) { fprintf(stderr, "%s: cannot set libelf version.\n", __func__); goto out_close; } ctf->elf = elf_begin(ctf->in_fd, ELF_C_READ_MMAP, NULL); if (!ctf->elf) { fprintf(stderr, "%s: cannot read %s ELF file.\n", __func__, filename); goto out_close; } } if (gelf_getehdr(ctf->elf, &ctf->ehdr) == NULL) { fprintf(stderr, "%s: cannot get elf header.\n", __func__); goto out_elf_end; } switch (ctf->ehdr.e_ident[EI_CLASS]) { case ELFCLASS32: ctf->wordsize = 4; break; case ELFCLASS64: ctf->wordsize = 8; break; default: ctf->wordsize = 0; break; } } return ctf; out_elf_end: if (elf == NULL) elf_end(ctf->elf); out_close: if (elf == NULL) close(ctf->in_fd); out_delete_filename: free(ctf->filename); out_delete: free(ctf); return NULL; } void ctf__delete(struct ctf *ctf) { if (ctf != NULL) { if (ctf->in_fd != -1) { elf_end(ctf->elf); close(ctf->in_fd); } __gobuffer__delete(&ctf->objects); __gobuffer__delete(&ctf->types); __gobuffer__delete(&ctf->funcs); elf_symtab__delete(ctf->symtab); free(ctf->filename); free(ctf->buf); free(ctf); } } char *ctf__string(struct ctf *ctf, uint32_t ref) { struct ctf_header *hp = ctf->buf; uint32_t off = CTF_REF_OFFSET(ref); char *name; if (CTF_REF_TBL_ID(ref) != CTF_STR_TBL_ID_0) return "(external ref)"; if (off >= ctf__get32(ctf, &hp->ctf_str_len)) return "(ref out-of-bounds)"; if ((off + ctf__get32(ctf, &hp->ctf_str_off)) >= ctf->size) return "(string table truncated)"; name = ((char *)(hp + 1) + ctf__get32(ctf, &hp->ctf_str_off) + off); return name[0] == '\0' ? NULL : name; } void *ctf__get_buffer(struct ctf *ctf) { return ctf->buf; } size_t ctf__get_size(struct ctf *ctf) { return ctf->size; } int ctf__load_symtab(struct ctf *ctf) { ctf->symtab = elf_symtab__new(".symtab", ctf->elf, &ctf->ehdr); return ctf->symtab == NULL ? -1 : 0; } void ctf__set_strings(struct ctf *ctf, struct gobuffer *strings) { ctf->strings = strings; } uint32_t ctf__add_base_type(struct ctf *ctf, uint32_t name, uint16_t size) { struct ctf_full_type t; t.base.ctf_name = name; t.base.ctf_info = CTF_INFO_ENCODE(CTF_TYPE_KIND_INT, 0, 0); t.base.ctf_size = size; t.ctf_size_high = CTF_TYPE_INT_ENCODE(0, 0, size); gobuffer__add(&ctf->types, &t, sizeof(t) - sizeof(uint32_t)); return ++ctf->type_index; } uint32_t ctf__add_short_type(struct ctf *ctf, uint16_t kind, uint16_t type, uint32_t name) { struct ctf_short_type t; t.ctf_name = name; t.ctf_info = CTF_INFO_ENCODE(kind, 0, 0); t.ctf_type = type; gobuffer__add(&ctf->types, &t, sizeof(t)); return ++ctf->type_index; } uint32_t ctf__add_fwd_decl(struct ctf *ctf, uint32_t name) { return ctf__add_short_type(ctf, CTF_TYPE_KIND_FWD, 0, name); } uint32_t ctf__add_array(struct ctf *ctf, uint16_t type, uint16_t index_type, uint32_t nelems) { struct { struct ctf_short_type t; struct ctf_array a; } array; array.t.ctf_name = 0; array.t.ctf_info = CTF_INFO_ENCODE(CTF_TYPE_KIND_ARR, 0, 0); array.t.ctf_size = 0; array.a.ctf_array_type = type; array.a.ctf_array_index_type = index_type; array.a.ctf_array_nelems = nelems; gobuffer__add(&ctf->types, &array, sizeof(array)); return ++ctf->type_index; } void ctf__add_short_member(struct ctf *ctf, uint32_t name, uint16_t type, uint16_t offset, int64_t *position) { struct ctf_short_member m = { .ctf_member_name = name, .ctf_member_type = type, .ctf_member_offset = offset, }; memcpy(gobuffer__ptr(&ctf->types, *position), &m, sizeof(m)); *position += sizeof(m); } void ctf__add_full_member(struct ctf *ctf, uint32_t name, uint16_t type, uint64_t offset, int64_t *position) { struct ctf_full_member m = { .ctf_member_name = name, .ctf_member_type = type, .ctf_member_offset_high = offset >> 32, .ctf_member_offset_low = offset & 0xffffffffl, }; memcpy(gobuffer__ptr(&ctf->types, *position), &m, sizeof(m)); *position += sizeof(m); } uint32_t ctf__add_struct(struct ctf *ctf, uint16_t kind, uint32_t name, uint64_t size, uint16_t nr_members, int64_t *position) { const bool is_short = size < CTF_SHORT_MEMBER_LIMIT; uint32_t members_len = ((is_short ? sizeof(struct ctf_short_member) : sizeof(struct ctf_full_member)) * nr_members); struct ctf_full_type t; int len; t.base.ctf_name = name; t.base.ctf_info = CTF_INFO_ENCODE(kind, nr_members, 0); if (size < 0xffff) { len = sizeof(t.base); t.base.ctf_size = size; } else { len = sizeof(t); t.base.ctf_size = 0xffff; t.ctf_size_high = size >> 32; t.ctf_size_low = size & 0xffffffff; } gobuffer__add(&ctf->types, &t, len); *position = gobuffer__allocate(&ctf->types, members_len); return ++ctf->type_index; } void ctf__add_parameter(struct ctf *ctf, uint16_t type, int64_t *position) { uint16_t *parm = gobuffer__ptr(&ctf->types, *position); *parm = type; *position += sizeof(*parm); } uint32_t ctf__add_function_type(struct ctf *ctf, uint16_t type, uint16_t nr_parms, bool varargs, int64_t *position) { struct ctf_short_type t; int len = sizeof(uint16_t) * (nr_parms + !!varargs); /* * Round up to next multiple of 4 to maintain 32-bit alignment. */ if (len & 0x2) len += 0x2; t.ctf_name = 0; t.ctf_info = CTF_INFO_ENCODE(CTF_TYPE_KIND_FUNC, nr_parms + !!varargs, 0); t.ctf_type = type; gobuffer__add(&ctf->types, &t, sizeof(t)); *position = gobuffer__allocate(&ctf->types, len); if (varargs) { unsigned int pos = *position + (nr_parms * sizeof(uint16_t)); uint16_t *end_of_args = gobuffer__ptr(&ctf->types, pos); *end_of_args = 0; } return ++ctf->type_index; } uint32_t ctf__add_enumeration_type(struct ctf *ctf, uint32_t name, uint16_t size, uint16_t nr_entries, int64_t *position) { struct ctf_short_type e; e.ctf_name = name; e.ctf_info = CTF_INFO_ENCODE(CTF_TYPE_KIND_ENUM, nr_entries, 0); e.ctf_size = size; gobuffer__add(&ctf->types, &e, sizeof(e)); *position = gobuffer__allocate(&ctf->types, nr_entries * sizeof(struct ctf_enum)); return ++ctf->type_index; } void ctf__add_enumerator(struct ctf *ctf, uint32_t name, uint32_t value, int64_t *position) { struct ctf_enum m = { .ctf_enum_name = name, .ctf_enum_val = value, }; memcpy(gobuffer__ptr(&ctf->types, *position), &m, sizeof(m)); *position += sizeof(m); } void ctf__add_function_parameter(struct ctf *ctf, uint16_t type, int64_t *position) { uint16_t *parm = gobuffer__ptr(&ctf->funcs, *position); *parm = type; *position += sizeof(*parm); } int ctf__add_function(struct ctf *ctf, uint16_t type, uint16_t nr_parms, bool varargs, int64_t *position) { struct ctf_short_type func; int len = sizeof(uint16_t) * (nr_parms + !!varargs); /* * Round up to next multiple of 4 to maintain 32-bit alignment. */ if (len & 0x2) len += 0x2; func.ctf_info = CTF_INFO_ENCODE(CTF_TYPE_KIND_FUNC, nr_parms + !!varargs, 0); func.ctf_type = type; /* * We don't store the name for the function, it comes from the * symtab. */ gobuffer__add(&ctf->funcs, &func.ctf_info, sizeof(func) - sizeof(func.ctf_name)); *position = gobuffer__allocate(&ctf->funcs, len); if (varargs) { unsigned int pos = *position + (nr_parms * sizeof(uint16_t)); uint16_t *end_of_args = gobuffer__ptr(&ctf->funcs, pos); *end_of_args = 0; } return 0; } int ctf__add_object(struct ctf *ctf, uint16_t type) { return gobuffer__add(&ctf->objects, &type, sizeof(type)) >= 0 ? 0 : -ENOMEM; } static const void *ctf__compress(void *orig_buf, unsigned int *size) { z_stream z = { .zalloc = Z_NULL, .zfree = Z_NULL, .opaque = Z_NULL, .avail_in = *size, .next_in = (Bytef *)orig_buf, }; void *bf = NULL; unsigned int bf_size = 0; if (deflateInit(&z, Z_BEST_COMPRESSION) != Z_OK) goto out; #define _GOBUFFER__ZCHUNK 16384 * 1024 do { const unsigned int new_bf_size = bf_size + _GOBUFFER__ZCHUNK; void *nbf = realloc(bf, new_bf_size); if (nbf == NULL) goto out_close_and_free; bf = nbf; z.avail_out = _GOBUFFER__ZCHUNK; z.next_out = (Bytef *)bf + bf_size; bf_size = new_bf_size; if (deflate(&z, Z_FULL_FLUSH) == Z_STREAM_ERROR) goto out_close_and_free; #if 0 fprintf(stderr, "%s: size=%d, bf_size=%d, total_out=%ld, total_in=%ld\n", __func__, *size, bf_size, z.total_out, z.total_in); #endif } while (z.total_in != *size); if (deflate(&z, Z_FINISH) == Z_STREAM_ERROR) goto out_close_and_free; deflateEnd(&z); *size = z.total_out; out: return bf; out_close_and_free: deflateEnd(&z); free(bf); bf = NULL; goto out; } int ctf__encode(struct ctf *ctf, uint8_t flags) { struct ctf_header *hdr; unsigned int size; void *bf = NULL; int err = -1; /* Empty file, nothing to do, so... done! */ if (gobuffer__size(&ctf->types) == 0) return 0; size = (gobuffer__size(&ctf->types) + gobuffer__size(&ctf->objects) + gobuffer__size(&ctf->funcs) + gobuffer__size(ctf->strings)); ctf->size = sizeof(*hdr) + size; ctf->buf = malloc(ctf->size); if (ctf->buf == NULL) { fprintf(stderr, "%s: malloc failed!\n", __func__); return -ENOMEM; } hdr = ctf->buf; memset(hdr, 0, sizeof(*hdr)); hdr->ctf_magic = CTF_MAGIC; hdr->ctf_version = 2; hdr->ctf_flags = flags; uint32_t offset = 0; hdr->ctf_object_off = offset; offset += gobuffer__size(&ctf->objects); hdr->ctf_func_off = offset; offset += gobuffer__size(&ctf->funcs); hdr->ctf_type_off = offset; offset += gobuffer__size(&ctf->types); hdr->ctf_str_off = offset; hdr->ctf_str_len = gobuffer__size(ctf->strings); void *payload = ctf->buf + sizeof(*hdr); gobuffer__copy(&ctf->objects, payload + hdr->ctf_object_off); gobuffer__copy(&ctf->funcs, payload + hdr->ctf_func_off); gobuffer__copy(&ctf->types, payload + hdr->ctf_type_off); gobuffer__copy(ctf->strings, payload + hdr->ctf_str_off); *(char *)(ctf->buf + sizeof(*hdr) + hdr->ctf_str_off) = '\0'; if (flags & CTF_FLAGS_COMPR) { bf = (void *)ctf__compress(ctf->buf + sizeof(*hdr), &size); if (bf == NULL) { printf("%s: ctf__compress failed!\n", __func__); return -ENOMEM; } void *new_bf = malloc(sizeof(*hdr) + size); if (new_bf == NULL) return -ENOMEM; memcpy(new_bf, hdr, sizeof(*hdr)); memcpy(new_bf + sizeof(*hdr), bf, size); free(bf); bf = new_bf; size += sizeof(*hdr); } else { bf = ctf->buf; size = ctf->size; } #if 0 printf("\n\ntypes:\n entries: %d\n size: %u" "\nstrings:\n entries: %u\n size: %u\ncompressed size: %d\n", ctf->type_index, gobuffer__size(&ctf->types), gobuffer__nr_entries(ctf->strings), gobuffer__size(ctf->strings), size); #endif int fd = open(ctf->filename, O_RDWR); if (fd < 0) { fprintf(stderr, "Cannot open %s\n", ctf->filename); return -1; } if (elf_version(EV_CURRENT) == EV_NONE) { fprintf(stderr, "Cannot set libelf version.\n"); goto out_close; } Elf *elf = elf_begin(fd, ELF_C_RDWR, NULL); if (elf == NULL) { fprintf(stderr, "Cannot update ELF file.\n"); goto out_close; } elf_flagelf(elf, ELF_C_SET, ELF_F_DIRTY); GElf_Ehdr ehdr_mem; GElf_Ehdr *ehdr = gelf_getehdr(elf, &ehdr_mem); if (ehdr == NULL) { fprintf(stderr, "%s: elf_getehdr failed.\n", __func__); goto out_close; } /* * First we look if there was already a .SUNW_ctf section to overwrite. */ Elf_Data *data = NULL; size_t strndx; GElf_Shdr shdr_mem; GElf_Shdr *shdr; Elf_Scn *scn = NULL; elf_getshdrstrndx(elf, &strndx); while ((scn = elf_nextscn(elf, scn)) != NULL) { shdr = gelf_getshdr(scn, &shdr_mem); if (shdr == NULL) continue; char *secname = elf_strptr(elf, strndx, shdr->sh_name); if (strcmp(secname, ".SUNW_ctf") == 0) { data = elf_getdata(scn, data); goto out_update; } } /* FIXME * OK, if we have the section, that is ok, we can just replace the * data, if not, I made a mistake on the small amount of boilerplate * below, probably .relA.ted to relocations... */ #if 0 /* Now we look if the ".SUNW_ctf" string is in the strings table */ scn = elf_getscn(elf, strndx); shdr = gelf_getshdr(scn, &shdr_mem); data = elf_getdata(scn, data); fprintf(stderr, "Looking for the string\n"); size_t ctf_name_offset = 1; /* First byte is '\0' */ while (ctf_name_offset < data->d_size) { const char *cur_str = data->d_buf + ctf_name_offset; fprintf(stderr, "*-> %s\n", cur_str); if (strcmp(cur_str, ".SUNW_ctf") == 0) goto found_SUNW_ctf_str; ctf_name_offset += strlen(cur_str) + 1; } /* Add the section name */ const size_t ctf_name_len = strlen(".SUNW_ctf") + 1; char *new_strings_table = malloc(data->d_size + ctf_name_len); if (new_strings_table == NULL) goto out_close; memcpy(new_strings_table, data->d_buf, data->d_size); strcpy(new_strings_table + data->d_size, ".SUNW_ctf"); ctf_name_offset = data->d_size; data->d_size += ctf_name_len; data->d_buf = new_strings_table; elf_flagdata(data, ELF_C_SET, ELF_F_DIRTY); elf_flagshdr(scn, ELF_C_SET, ELF_F_DIRTY); Elf_Scn *newscn; found_SUNW_ctf_str: newscn = elf_newscn(elf); if (newscn == NULL) goto out_close; data = elf_newdata(newscn); if (data == NULL) goto out_close; shdr = gelf_getshdr(newscn, &shdr_mem); shdr->sh_name = ctf_name_offset; shdr->sh_type = SHT_PROGBITS; gelf_update_shdr(newscn, &shdr_mem); elf_flagshdr(newscn, ELF_C_SET, ELF_F_DIRTY); #else char pathname[PATH_MAX]; snprintf(pathname, sizeof(pathname), "%s.SUNW_ctf", ctf->filename); fd = creat(pathname, S_IRUSR | S_IWUSR); if (fd == -1) { fprintf(stderr, "%s: open(%s) failed!\n", __func__, pathname); goto out_close; } if (write(fd, bf, size) != size) goto out_close; if (close(fd) < 0) goto out_unlink; char cmd[PATH_MAX]; snprintf(cmd, sizeof(cmd), "objcopy --add-section .SUNW_ctf=%s %s", pathname, ctf->filename); if (system(cmd) == 0) err = 0; out_unlink: unlink(pathname); return err; #endif out_update: data->d_buf = bf; data->d_size = size; elf_flagdata(data, ELF_C_SET, ELF_F_DIRTY); if (elf_update(elf, ELF_C_NULL) < 0) goto out_close; if (elf_update(elf, ELF_C_WRITE) < 0) goto out_close; elf_end(elf); err = 0; out_close: if (bf != ctf->buf) free(bf); close(fd); return err; } dwarves-dfsg-1.15/libctf.h000066400000000000000000000072061350511416500154560ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only Copyright (C) 2019 Arnaldo Carvalho de Melo */ #ifndef _LIBCTF_H #define _LIBCTF_H #include #include #include #include #include "gobuffer.h" #include "elf_symtab.h" struct ctf { void *buf; void *priv; Elf *elf; struct elf_symtab *symtab; GElf_Ehdr ehdr; struct gobuffer objects; /* data/variables */ struct gobuffer types; struct gobuffer funcs; struct gobuffer *strings; char *filename; size_t size; int swapped; int in_fd; uint8_t wordsize; uint32_t type_index; }; struct ctf *ctf__new(const char *filename, Elf *elf); void ctf__delete(struct ctf *ctf); bool ctf__ignore_symtab_function(const GElf_Sym *sym, const char *sym_name); bool ctf__ignore_symtab_object(const GElf_Sym *sym, const char *sym_name); int ctf__load(struct ctf *ctf); uint16_t ctf__get16(struct ctf *ctf, uint16_t *p); uint32_t ctf__get32(struct ctf *ctf, uint32_t *p); void ctf__put16(struct ctf *ctf, uint16_t *p, uint16_t val); void ctf__put32(struct ctf *ctf, uint32_t *p, uint32_t val); void *ctf__get_buffer(struct ctf *ctf); size_t ctf__get_size(struct ctf *ctf); int ctf__load_symtab(struct ctf *ctf); uint32_t ctf__add_base_type(struct ctf *ctf, uint32_t name, uint16_t size); uint32_t ctf__add_fwd_decl(struct ctf *ctf, uint32_t name); uint32_t ctf__add_short_type(struct ctf *ctf, uint16_t kind, uint16_t type, uint32_t name); void ctf__add_short_member(struct ctf *ctf, uint32_t name, uint16_t type, uint16_t offset, int64_t *position); void ctf__add_full_member(struct ctf *ctf, uint32_t name, uint16_t type, uint64_t offset, int64_t *position); uint32_t ctf__add_struct(struct ctf *ctf, uint16_t kind, uint32_t name, uint64_t size, uint16_t nr_members, int64_t *position); uint32_t ctf__add_array(struct ctf *ctf, uint16_t type, uint16_t index_type, uint32_t nelems); void ctf__add_parameter(struct ctf *ctf, uint16_t type, int64_t *position); uint32_t ctf__add_function_type(struct ctf *ctf, uint16_t type, uint16_t nr_parms, bool varargs, int64_t *position); uint32_t ctf__add_enumeration_type(struct ctf *ctf, uint32_t name, uint16_t size, uint16_t nr_entries, int64_t *position); void ctf__add_enumerator(struct ctf *ctf, uint32_t name, uint32_t value, int64_t *position); void ctf__add_function_parameter(struct ctf *ctf, uint16_t type, int64_t *position); int ctf__add_function(struct ctf *ctf, uint16_t type, uint16_t nr_parms, bool varargs, int64_t *position); int ctf__add_object(struct ctf *ctf, uint16_t type); void ctf__set_strings(struct ctf *ctf, struct gobuffer *strings); int ctf__encode(struct ctf *ctf, uint8_t flags); char *ctf__string(struct ctf *ctf, uint32_t ref); /** * ctf__for_each_symtab_function - iterate thru all the symtab functions * * @ctf: struct ctf instance to iterate * @index: uint32_t index * @sym: GElf_Sym iterator */ #define ctf__for_each_symtab_function(ctf, index, sym) \ elf_symtab__for_each_symbol(ctf->symtab, index, sym) \ if (ctf__ignore_symtab_function(&sym, \ elf_sym__name(&sym, \ ctf->symtab))) \ continue; \ else /** * ctf__for_each_symtab_object - iterate thru all the symtab objects * * @ctf: struct ctf instance to iterate * @index: uint32_t index * @sym: GElf_Sym iterator */ #define ctf__for_each_symtab_object(ctf, index, sym) \ elf_symtab__for_each_symbol(ctf->symtab, index, sym) \ if (ctf__ignore_symtab_object(&sym, \ elf_sym__name(&sym, \ ctf->symtab))) \ continue; \ else #endif /* _LIBCTF_H */ dwarves-dfsg-1.15/list.h000066400000000000000000000423631350511416500151710ustar00rootroot00000000000000#ifndef _LINUX_LIST_H #define _LINUX_LIST_H /* SPDX-License-Identifier: GPL-2.0-only Copyright (C) Cast of dozens, comes from the Linux kernel */ #include /* * These are non-NULL pointers that will result in page faults * under normal circumstances, used to verify that nobody uses * non-initialized list entries. */ #define LIST_POISON1 ((void *)0x00100100) #define LIST_POISON2 ((void *)0x00200200) /** * container_of - cast a member of a structure out to the containing structure * @ptr: the pointer to the member. * @type: the type of the container struct this is embedded in. * @member: the name of the member within the struct. * */ #define container_of(ptr, type, member) ({ \ const typeof( ((type *)0)->member ) *__mptr = (ptr); \ (type *)( (char *)__mptr - offsetof(type,member) );}) /* * Simple doubly linked list implementation. * * Some of the internal functions ("__xxx") are useful when * manipulating whole lists rather than single entries, as * sometimes we already know the next/prev entries and we can * generate better code by using them directly rather than * using the generic single-entry routines. */ struct list_head { struct list_head *next, *prev; }; #define LIST_HEAD_INIT(name) { &(name), &(name) } #define LIST_HEAD(name) \ struct list_head name = LIST_HEAD_INIT(name) static inline void INIT_LIST_HEAD(struct list_head *list) { list->next = list; list->prev = list; } /* * Insert a new entry between two known consecutive entries. * * This is only for internal list manipulation where we know * the prev/next entries already! */ static inline void __list_add(struct list_head *new, struct list_head *prev, struct list_head *next) { next->prev = new; new->next = next; new->prev = prev; prev->next = new; } /** * list_add - add a new entry * @new: new entry to be added * @head: list head to add it after * * Insert a new entry after the specified head. * This is good for implementing stacks. */ static inline void list_add(struct list_head *new, struct list_head *head) { __list_add(new, head, head->next); } /** * list_add_tail - add a new entry * @new: new entry to be added * @head: list head to add it before * * Insert a new entry before the specified head. * This is useful for implementing queues. */ static inline void list_add_tail(struct list_head *new, struct list_head *head) { __list_add(new, head->prev, head); } /* * Delete a list entry by making the prev/next entries * point to each other. * * This is only for internal list manipulation where we know * the prev/next entries already! */ static inline void __list_del(struct list_head * prev, struct list_head * next) { next->prev = prev; prev->next = next; } /** * list_del - deletes entry from list. * @entry: the element to delete from the list. * Note: list_empty on entry does not return true after this, the entry is * in an undefined state. */ static inline void list_del(struct list_head *entry) { __list_del(entry->prev, entry->next); entry->next = LIST_POISON1; entry->prev = LIST_POISON2; } /** * list_del_range - deletes range of entries from list. * @beging: first element in the range to delete from the list. * @beging: first element in the range to delete from the list. * Note: list_empty on the range of entries does not return true after this, * the entries is in an undefined state. */ static inline void list_del_range(struct list_head *begin, struct list_head *end) { begin->prev->next = end->next; end->next->prev = begin->prev; } /** * list_replace - replace old entry by new one * @old : the element to be replaced * @new : the new element to insert * Note: if 'old' was empty, it will be overwritten. */ static inline void list_replace(struct list_head *old, struct list_head *new) { new->next = old->next; new->next->prev = new; new->prev = old->prev; new->prev->next = new; } static inline void list_replace_init(struct list_head *old, struct list_head *new) { list_replace(old, new); INIT_LIST_HEAD(old); } /** * list_del_init - deletes entry from list and reinitialize it. * @entry: the element to delete from the list. */ static inline void list_del_init(struct list_head *entry) { __list_del(entry->prev, entry->next); INIT_LIST_HEAD(entry); } /** * list_move - delete from one list and add as another's head * @list: the entry to move * @head: the head that will precede our entry */ static inline void list_move(struct list_head *list, struct list_head *head) { __list_del(list->prev, list->next); list_add(list, head); } /** * list_move_tail - delete from one list and add as another's tail * @list: the entry to move * @head: the head that will follow our entry */ static inline void list_move_tail(struct list_head *list, struct list_head *head) { __list_del(list->prev, list->next); list_add_tail(list, head); } /** * list_is_last - tests whether @list is the last entry in list @head * @list: the entry to test * @head: the head of the list */ static inline int list_is_last(const struct list_head *list, const struct list_head *head) { return list->next == head; } /** * list_empty - tests whether a list is empty * @head: the list to test. */ static inline int list_empty(const struct list_head *head) { return head->next == head; } /** * list_empty_careful - tests whether a list is empty and not being modified * @head: the list to test * * Description: * tests whether a list is empty _and_ checks that no other CPU might be * in the process of modifying either member (next or prev) * * NOTE: using list_empty_careful() without synchronization * can only be safe if the only activity that can happen * to the list entry is list_del_init(). Eg. it cannot be used * if another CPU could re-list_add() it. */ static inline int list_empty_careful(const struct list_head *head) { struct list_head *next = head->next; return (next == head) && (next == head->prev); } static inline void __list_splice(struct list_head *list, struct list_head *head) { struct list_head *first = list->next; struct list_head *last = list->prev; struct list_head *at = head->next; first->prev = head; head->next = first; last->next = at; at->prev = last; } /** * list_splice - join two lists * @list: the new list to add. * @head: the place to add it in the first list. */ static inline void list_splice(struct list_head *list, struct list_head *head) { if (!list_empty(list)) __list_splice(list, head); } /** * list_splice_init - join two lists and reinitialise the emptied list. * @list: the new list to add. * @head: the place to add it in the first list. * * The list at @list is reinitialised */ static inline void list_splice_init(struct list_head *list, struct list_head *head) { if (!list_empty(list)) { __list_splice(list, head); INIT_LIST_HEAD(list); } } /** * list_entry - get the struct for this entry * @ptr: the &struct list_head pointer. * @type: the type of the struct this is embedded in. * @member: the name of the list_struct within the struct. */ #define list_entry(ptr, type, member) \ container_of(ptr, type, member) /** * list_first_entry - get the first element from a list * @ptr: the list head to take the element from. * @type: the type of the struct this is embedded in. * @member: the name of the list_struct within the struct. * * Note, that list is expected to be not empty. */ #define list_first_entry(ptr, type, member) \ list_entry((ptr)->next, type, member) /** * list_for_each - iterate over a list * @pos: the &struct list_head to use as a loop cursor. * @head: the head for your list. */ #define list_for_each(pos, head) \ for (pos = (head)->next; pos != (head); \ pos = pos->next) /** * __list_for_each - iterate over a list * @pos: the &struct list_head to use as a loop cursor. * @head: the head for your list. * * This variant differs from list_for_each() in that it's the * simplest possible list iteration code, no prefetching is done. * Use this for code that knows the list to be very short (empty * or 1 entry) most of the time. */ #define __list_for_each(pos, head) \ for (pos = (head)->next; pos != (head); pos = pos->next) /** * list_for_each_prev - iterate over a list backwards * @pos: the &struct list_head to use as a loop cursor. * @head: the head for your list. */ #define list_for_each_prev(pos, head) \ for (pos = (head)->prev; pos != (head); \ pos = pos->prev) /** * list_for_each_safe - iterate over a list safe against removal of list entry * @pos: the &struct list_head to use as a loop cursor. * @n: another &struct list_head to use as temporary storage * @head: the head for your list. */ #define list_for_each_safe(pos, n, head) \ for (pos = (head)->next, n = pos->next; pos != (head); \ pos = n, n = pos->next) /** * list_for_each_entry - iterate over list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the list_struct within the struct. */ #define list_for_each_entry(pos, head, member) \ for (pos = list_entry((head)->next, typeof(*pos), member); \ &pos->member != (head); \ pos = list_entry(pos->member.next, typeof(*pos), member)) /** * list_for_each_entry_reverse - iterate backwards over list of given type. * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the list_struct within the struct. */ #define list_for_each_entry_reverse(pos, head, member) \ for (pos = list_entry((head)->prev, typeof(*pos), member); \ &pos->member != (head); \ pos = list_entry(pos->member.prev, typeof(*pos), member)) /** * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue * @pos: the type * to use as a start point * @head: the head of the list * @member: the name of the list_struct within the struct. * * Prepares a pos entry for use as a start point in list_for_each_entry_continue. */ #define list_prepare_entry(pos, head, member) \ ((pos) ? : list_entry(head, typeof(*pos), member)) /** * list_for_each_entry_continue - continue iteration over list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the list_struct within the struct. * * Continue to iterate over list of given type, continuing after * the current position. */ #define list_for_each_entry_continue(pos, head, member) \ for (pos = list_entry(pos->member.next, typeof(*pos), member); \ &pos->member != (head); \ pos = list_entry(pos->member.next, typeof(*pos), member)) /** * list_for_each_entry_from - iterate over list of given type from the current point * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the list_struct within the struct. * * Iterate over list of given type, continuing from current position. */ #define list_for_each_entry_from(pos, head, member) \ for (; &pos->member != (head); \ pos = list_entry(pos->member.next, typeof(*pos), member)) /** * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry * @pos: the type * to use as a loop cursor. * @n: another type * to use as temporary storage * @head: the head for your list. * @member: the name of the list_struct within the struct. */ #define list_for_each_entry_safe(pos, n, head, member) \ for (pos = list_entry((head)->next, typeof(*pos), member), \ n = list_entry(pos->member.next, typeof(*pos), member); \ &pos->member != (head); \ pos = n, n = list_entry(n->member.next, typeof(*n), member)) /** * list_for_each_entry_safe_continue * @pos: the type * to use as a loop cursor. * @n: another type * to use as temporary storage * @head: the head for your list. * @member: the name of the list_struct within the struct. * * Iterate over list of given type, continuing after current point, * safe against removal of list entry. */ #define list_for_each_entry_safe_continue(pos, n, head, member) \ for (pos = list_entry(pos->member.next, typeof(*pos), member), \ n = list_entry(pos->member.next, typeof(*pos), member); \ &pos->member != (head); \ pos = n, n = list_entry(n->member.next, typeof(*n), member)) /** * list_for_each_entry_safe_from * @pos: the type * to use as a loop cursor. * @n: another type * to use as temporary storage * @head: the head for your list. * @member: the name of the list_struct within the struct. * * Iterate over list of given type from current point, safe against * removal of list entry. */ #define list_for_each_entry_safe_from(pos, n, head, member) \ for (n = list_entry(pos->member.next, typeof(*pos), member); \ &pos->member != (head); \ pos = n, n = list_entry(n->member.next, typeof(*n), member)) /** * list_for_each_entry_safe_reverse * @pos: the type * to use as a loop cursor. * @n: another type * to use as temporary storage * @head: the head for your list. * @member: the name of the list_struct within the struct. * * Iterate backwards over list of given type, safe against removal * of list entry. */ #define list_for_each_entry_safe_reverse(pos, n, head, member) \ for (pos = list_entry((head)->prev, typeof(*pos), member), \ n = list_entry(pos->member.prev, typeof(*pos), member); \ &pos->member != (head); \ pos = n, n = list_entry(n->member.prev, typeof(*n), member)) /* * Double linked lists with a single pointer list head. * Mostly useful for hash tables where the two pointer list head is * too wasteful. * You lose the ability to access the tail in O(1). */ struct hlist_head { struct hlist_node *first; }; struct hlist_node { struct hlist_node *next, **pprev; }; #define HLIST_HEAD_INIT { .first = NULL } #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) static inline void INIT_HLIST_NODE(struct hlist_node *h) { h->next = NULL; h->pprev = NULL; } static inline int hlist_unhashed(const struct hlist_node *h) { return !h->pprev; } static inline int hlist_empty(const struct hlist_head *h) { return !h->first; } static inline void __hlist_del(struct hlist_node *n) { struct hlist_node *next = n->next; struct hlist_node **pprev = n->pprev; *pprev = next; if (next) next->pprev = pprev; } static inline void hlist_del(struct hlist_node *n) { __hlist_del(n); n->next = LIST_POISON1; n->pprev = LIST_POISON2; } static inline void hlist_del_init(struct hlist_node *n) { if (!hlist_unhashed(n)) { __hlist_del(n); INIT_HLIST_NODE(n); } } static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) { struct hlist_node *first = h->first; n->next = first; if (first) first->pprev = &n->next; h->first = n; n->pprev = &h->first; } /* next must be != NULL */ static inline void hlist_add_before(struct hlist_node *n, struct hlist_node *next) { n->pprev = next->pprev; n->next = next; next->pprev = &n->next; *(n->pprev) = n; } static inline void hlist_add_after(struct hlist_node *n, struct hlist_node *next) { next->next = n->next; n->next = next; next->pprev = &n->next; if(next->next) next->next->pprev = &next->next; } #define hlist_entry(ptr, type, member) container_of(ptr,type,member) #define hlist_for_each(pos, head) \ for (pos = (head)->first; pos; \ pos = pos->next) #define hlist_for_each_safe(pos, n, head) \ for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ pos = n) /** * hlist_for_each_entry - iterate over list of given type * @tpos: the type * to use as a loop cursor. * @pos: the &struct hlist_node to use as a loop cursor. * @head: the head for your list. * @member: the name of the hlist_node within the struct. */ #define hlist_for_each_entry(tpos, pos, head, member) \ for (pos = (head)->first; \ pos && \ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ pos = pos->next) /** * hlist_for_each_entry_continue - iterate over a hlist continuing after current point * @tpos: the type * to use as a loop cursor. * @pos: the &struct hlist_node to use as a loop cursor. * @member: the name of the hlist_node within the struct. */ #define hlist_for_each_entry_continue(tpos, pos, member) \ for (pos = (pos)->next; \ pos && \ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ pos = pos->next) /** * hlist_for_each_entry_from - iterate over a hlist continuing from current point * @tpos: the type * to use as a loop cursor. * @pos: the &struct hlist_node to use as a loop cursor. * @member: the name of the hlist_node within the struct. */ #define hlist_for_each_entry_from(tpos, pos, member) \ for (; pos && \ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ pos = pos->next) /** * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry * @tpos: the type * to use as a loop cursor. * @pos: the &struct hlist_node to use as a loop cursor. * @n: another &struct hlist_node to use as temporary storage * @head: the head for your list. * @member: the name of the hlist_node within the struct. */ #define hlist_for_each_entry_safe(tpos, pos, n, head, member) \ for (pos = (head)->first; \ pos && ({ n = pos->next; 1; }) && \ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ pos = n) #endif dwarves-dfsg-1.15/man-pages/000077500000000000000000000000001350511416500157055ustar00rootroot00000000000000dwarves-dfsg-1.15/man-pages/pahole.1000066400000000000000000000143711350511416500172450ustar00rootroot00000000000000.\" Man page for pahole .\" Arnaldo Carvalho de Melo, 2009 .\" Licensed under version 2 of the GNU General Public License. .TH pahole 1 "February 13, 2009" "dwarves" "dwarves" .\" .SH NAME pahole \- Shows and manipulates data structure layout. .SH SYNOPSIS \fBpahole\fR [\fIoptions\fR] \fIfiles\fR .SH DESCRIPTION .B pahole shows data structure layouts encoded in debugging information formats, DWARF and CTF being supported. This is useful for, among other things: optimizing important data structures by reducing its size, figuring out what is the field sitting at an offset from the start of a data structure, investigating ABI changes and more generally understanding a new codebase you have to work with. The files must have associated debugging information. This information may be inside the file itself, in ELF sections, or in another file. One way to have this information is to specify the \fB\-g\fR option to the compiler when building it. When this is done the information will be stored in an ELF section. For the DWARF debugging information format this, adds, among others, the \fB.debug_info\fR ELF section. For CTF it is found in just one ELF section, \fB.SUNW_ctf\fR. The \fBdebuginfo\fR packages available in most Linux distributions are also supported by \fBpahole\fR, where the debugging information is available in a separate file. By default, \fBpahole\fR shows the layout of all named structs in the files specified. .SH OPTIONS pahole supports the following options. .TP .B \-C, \-\-class_name=CLASS_NAMES Show just these classes. This can be a comma separated list of class names or file URLs (e.g.: file://class_list.txt) .TP .B \-c, \-\-cacheline_size=SIZE Set cacheline size to SIZE bytes. .TP .B \-E, \-\-expand_types Expand class members. Useful to find in what member of inner structs where an offset from the beginning of a struct is. .TP .B \-F, \-\-format_path Allows specifying a list of debugging formats to try, in order. Right now this includes "ctf" and "dwarf". The default format path used is equivalent to "-F dwarf,ctf". .TP .B \-\-hex Print offsets and sizes in hexadecimal. .TP .B \-r, \-\-rel_offset Show relative offsets of members in inner structs. .TP .B \-p, \-\-expand_pointers Expand class pointer members. .TP .B \-R, \-\-reorganize Reorganize struct, demoting and combining bitfields, moving members to remove alignment holes and padding. .TP .B \-S, \-\-show_reorg_steps Show the struct layout at each reorganization step. .TP .B \-i, \-\-contains=CLASS_NAME Show classes that contains CLASS_NAME. .TP .B \-a, \-\-anon_include Include anonymous classes. .TP .B \-A, \-\-nested_anon_include Include nested (inside other structs) anonymous classes. .TP .B \-B, \-\-bit_holes=NR_HOLES Show only structs at least NR_HOLES bit holes. .TP .B \-d, \-\-recursive Recursive mode, affects several other flags. .TP .B \-D, \-\-decl_exclude=PREFIX exclude classes declared in files with PREFIX. .TP .B \-f, \-\-find_pointers_to=CLASS_NAME Find pointers to CLASS_NAME. .TP .B \-H, \-\-holes=NR_HOLES Show only structs with at least NR_HOLES holes. .TP .B \-I, \-\-show_decl_info Show the file and line number where the tags were defined, if available in the debugging information. .TP .B \-l, \-\-show_first_biggest_size_base_type_member Show first biggest size base_type member. .TP .B \-m, \-\-nr_methods Show number of methods. .TP .B \-M, \-\-show_only_data_members Show only the members that use space in the class layout. C++ methods will be suppressed. .TP .B \-n, \-\-nr_members Show number of members. .TP .B \-N, \-\-class_name_len Show size of classes. .TP .B \-O, \-\-dwarf_offset=OFFSET Show tag with DWARF OFFSET. .TP .B \-P, \-\-packable Show only structs that has holes that can be packed if members are reorganized, for instance when using the \fB\-\-reorganize\fR option. .TP .B \-q, \-\-quiet Be quieter. .TP .B \-s, \-\-sizes Show size of classes. .TP .B \-t, \-\-separator=SEP Use SEP as the field separator. .TP .B \-T, \-\-nr_definitions Show how many times struct was defined. .TP .B \-u, \-\-defined_in Show CUs where CLASS_NAME (-C) is defined. .TP .B \-\-flat_arrays Flatten arrays, so that array[10][2] becomes array[20]. Useful when generating from both CTF/BTF and DWARF encodings for the same binary for testing purposes. .TP .B \-\-suppress_aligned_attribute Suppress forced alignment markers, so that one can compare BTF or CTF output, that don't have that info, to output from DWARF >= 5. .TP .B \-\-suppress_force_paddings Suppress bitfield forced padding at the end of structs, as this requires something like DWARF's DW_AT_alignment, so that one can compare BTF or CTF output, that don't have that info. .TP .B \-\-suppress_packed Suppress the output of the inference of __attribute__((__packed__)), so that one can compare BTF or CTF output, the inference algorithm uses things like DW_AT_alignment, so until it is improved to infer that as well for BTF, allow disabling this output. .TP .B \-\-fixup_silly_bitfields Converts silly bitfields such as "int foo:32" to plain "int foo". .TP .B \-V, \-\-verbose be verbose .TP .B \-w, \-\-word_size=WORD_SIZE Change the arch word size to WORD_SIZE. .TP .B \-x, \-\-exclude=PREFIX Exclude PREFIXed classes. .TP .B \-X, \-\-cu_exclude=PREFIX Exclude PREFIXed compilation units. .TP .B \-y, \-\-prefix_filter=PREFIX Include PREFIXed classes. .TP .B \-z, \-\-hole_size_ge=HOLE_SIZE Show only structs with at least one hole greater or equal to HOLE_SIZE. .SH NOTES To enable the generation of debugging information in the Linux kernel build process select CONFIG_DEBUG_INFO. This can be done using make menuconfig by this path: "Kernel Hacking" -> "Kernel Debugging" -> "Compile the kernel with debug info". Many distributions also come with debuginfo packages, so just enable it in your package manager repository configuration and install the kernel-debuginfo, or any other userspace program written in a language that the compiler generates debuginfo (C, C++, for instance). .SH SEE ALSO \fIeu-readelf\fR(1), \fIreadelf\fR(1), \fIobjdump\fR(1). .P \fIhttps://www.kernel.org/doc/ols/2007/ols2007v2-pages-35-44.pdf\fR. .SH AUTHOR \fBpahole\fR was written by Arnaldo Carvalho de Melo . Please send bug reports to . .P No\ subscription is required. dwarves-dfsg-1.15/ostra/000077500000000000000000000000001350511416500151655ustar00rootroot00000000000000dwarves-dfsg-1.15/ostra/ostra-cg000077500000000000000000000272551350511416500166450ustar00rootroot00000000000000#!/usr/bin/python3 # ostra-cg - generate callgraphs from encoded trace # # Arnaldo Carvalho de Melo # # # Copyright (C) 2005, 2006, 2007 Arnaldo Carvalho de Melo # # This program is free software; you can redistribute it and/or modify it # under the terms of version 2 of the GNU General Public License as # published by the Free Software Foundation. import sys, datetime, os, ostra class_def = None ident = 0 verbose = False valid_html = False print_exits = True print_exit_details = False print_function_times = True gen_html = True html_file_seq = 0 nr_lines_per_page = 256 output_file = None callgraph = None print_nr_exit_points = False first_table_row = True plot_min_samples = 4 plot = False tab_space = 10 my_object = None # @import url(file:///home/acme/git/ostra/ostra.css); html_style_import=''' ''' def emit_html_page_sequence_links(page): global output_file output_file.write("
") if page != 1: if page == 2: prev = "index" else: prev = str(page - 2) output_file.write("Index | ") output_file.write("Previous | " % prev) output_file.write("Next | " % page) output_file.write("Where fields changed | ") output_file.write("Methods statistics | ") output_file.write("Last\n") output_file.write("
") def close_callgraph_file(): if gen_html: output_file.write("\n") emit_html_page_sequence_links(html_file_seq) if valid_html: output_file.write('''

Valid HTML 4.01!

''') output_file.write("\n\n") output_file.close() def new_callgraph_file(traced_class): global html_file_seq, output_file, first_table_row if not gen_html: if output_file == None: output_file = file("%s.txt" % callgraph, "w") return first_table_row = True if html_file_seq == 0: os.mkdir(callgraph) if output_file != None: output_file.close() filename = "index" help = '''

Tracing struct %s methods (functions with a struct %s * argument)

Click on the timestamps to see the object state

Click on the functions to go to its definition in LXR (http://lxr.linux.no/)

Red timestamps means the state changed

''' % (traced_class, traced_class) else: close_callgraph_file() filename = str(html_file_seq) help = " " output_file = file("%s/%s.html" % (callgraph, filename), "w") output_file.write(''' OSTRA Callgraph: %s, file %d %s ''' % (callgraph, html_file_seq, html_style_import)) html_file_seq += 1 emit_html_page_sequence_links(html_file_seq) output_file.write("\n%s\n\n" % help) def trim_tstamp(tstamp): return str(tstamp).strip().lstrip('0').lstrip(':').lstrip('0').lstrip(':').lstrip('0').lstrip('.').lstrip('0') def object_state(): output = "
" state_changed = False for field in class_def.fields.values(): if not field.cg: continue value_changed_or_not_zero = False value = field.value if field.changed(): state_changed = True last_value = field.last_value if field.table and last_value and field.table.has_key(int(last_value)): last_value = field.table[int(last_value)] transition = "%s -> " % last_value color = " class=\"odd\"" value_changed_or_not_zero = True else: field_changed = False transition = "" color = "" if value != "0" and value != None: value_changed_or_not_zero = True if value_changed_or_not_zero: if field.table and value and field.table.has_key(int(value)): value = field.table[int(value)] output = output.strip() + "" % \ (color, field, transition, value) output += "
%s%s%s
" return (output, state_changed) total_lines = 0 def tstamp_str(): global total_lines, first_table_row total_lines += 1 if gen_html: state, changed = object_state() if changed: anchor = "%d.%d" % (class_def.tstamp.seconds, class_def.tstamp.microseconds) anchor_color = " class=\"red\"" else: anchor = "" anchor_color = "" if total_lines % 2 == 1: row_color = "odd" else: row_color = "evn" if first_table_row: close_last_tr = "" first_table_row = False else: close_last_tr = "\n" return "%s%04d.%06d%s" % \ (close_last_tr, row_color, anchor, anchor_color, class_def.tstamp.seconds, class_def.tstamp.microseconds, state) else: return "%06d.%06d" % (class_def.tstamp.seconds, class_def.tstamp.microseconds) def indent_str(indent, text): if gen_html: method = class_def.current_method() time_so_far = method.total_time.seconds * 10000 + method.total_time.microseconds tooltip = "%s: calls=%d, total time=%dus" % (method.name, method.calls, time_so_far) if class_def.fields["action"].value[0] == 'o': if class_def.fields.has_key("exit_point"): tooltip += ", exit point=%d" % (int(class_def.fields["exit_point"].value) + 1) else: text = "%s" % (method.name, text) return "%s%s" % (tooltip, " " * tab_space * indent, text) else: return "%s%s" % ("\t" * ident, text) def function_time_str(time): if gen_html: if class_def.current_method().print_return_value: ret_value = "%s" % class_def.fields["return_value"].value else: ret_value = "0" if ret_value == "0": ret_value = "" else: ret_value=" title=\"returned %s\"" % ret_value return "%sus" % (ret_value, time) else: return " %sus\n" % time previous_was_entry = False nr_lines = 0 def process_record(): global ident, previous_was_entry, nr_lines if gen_html: nr_lines += 1 if nr_lines > nr_lines_per_page: if ident == 0 or nr_lines > nr_lines_per_page * 5: new_callgraph_file(traced_class) nr_lines = 0 method = class_def.current_method() if class_def.fields["action"].value[0] == 'i': output = "%s()" % method.name if print_exits and previous_was_entry: if gen_html: last_open = " { " else: last_open = " {\n" else: last_open = "" output_file.write("%s%s %s" % (last_open, tstamp_str(), indent_str(ident, output.strip()))) if not print_exits: output_file.write("\n") ident += 1 method.calls += 1 method.last_tstamp = class_def.tstamp previous_was_entry = True else: if not method.last_tstamp: method.last_tstamp = class_def.tstamp tstamp_delta = class_def.tstamp - method.last_tstamp if tstamp_delta < datetime.timedelta(): tstamp_delta = datetime.timedelta() method.total_time += tstamp_delta if ident > 0: ident -= 1 if print_exits: if print_exit_details: exit_point = int(class_def.fields["exit_point"].value) + 1 if class_def.last_method.name != method.name: output_file.write("%s %s" % (tstamp_str(), indent_str(ident, "}"))) if print_exit_details: output_file.write(" EXIT #%d (%s)" % (exit_point, method.name)) else: if print_exit_details: output_file.write("EXIT #%d" % exit_point) function_time = trim_tstamp(tstamp_delta) if len(function_time) == 0: function_time = "0" if print_exits: if print_function_times: output_file.write(function_time_str(function_time)) else: output_file.write("\n") if print_nr_exit_points: if method.exits.has_key(exit_point): method.exits[exit_point] += 1 else: method.exits[exit_point] = 1 previous_was_entry = False return html_file_seq - 1 def print_where_fields_changed(): f = file("%s/changes.html" % callgraph, "w") f.write(''' OSTRA Callgraph: %s, Where the Fields Changed %s

Click on the values to go to where it was changed

Click on the field names to see a plotting of its value over time

''' % (callgraph, html_style_import)) output_file.write("
") f.write("Index\n") f.write("Last\n" % (html_file_seq - 1)) f.write("") max_samples = 50 for key in class_def.fields.keys(): fields = class_def.fields[key] changes = fields.changes changes_str="" link_pre="" link_pos="" if len(changes) == 0: changes_str="Unchanged\n" elif plot and len(changes) >= plot_min_samples and fields.plot_fmt != "dev_null": link_pre="" % key link_pos="" f.write("\n") f.write("
%s%s%s%s" % (link_pre, key, link_pos, changes_str)) if len(changes) == 0: continue f.write("\n") nr_samples = 0 for change in changes: nr_samples += 1 if nr_samples <= max_samples: if change.seq == 0: filename="index" else: filename = str(change.seq) f.write("" % \ (filename, change.tstamp.seconds, change.tstamp.microseconds, change.value)) if nr_samples > max_samples: f.write("" % (max_samples, nr_samples)) f.write("
%s
Only %d samples out of %d were printed
\n
") output_file.write("
") f.write("\n\n") f.close() os.symlink("changes.html", "%s/%d.html" % (callgraph, html_file_seq)) os.symlink("%d.html" % (html_file_seq - 1), "%s/last.html" % callgraph) def method_stats(class_def, callgraph): os.mkdir("%s/methods" % callgraph) f = file("%s/methods/index.html" % callgraph, "w") f.write(''' OSTRA Callgraph: %s, Methods Statistics %s

Click on the methods names to see a plotting of the times for each call

''' % (callgraph, html_style_import)) if plot: class_def.plot_methods(callgraph) f.write("") for method in class_def.methods.values(): changes_str="" link_pre="" link_pos="" if len(method.times) < 4: changes_str="Less than 4 calls\n" else: if plot: link_pre="" % method.name link_pos="" changes_str="%d calls\n" % len(method.times) f.write("
%s%s%s%s" % \ (link_pre, method.name, link_pos, changes_str)) f.write("
") f.write("\n\n") f.close() if __name__ == '__main__': if len(sys.argv) not in [ 3, 4 ]: print("usage: ostra-cg [object]") sys.exit(1) gen_html = True traced_class = sys.argv[1] callgraph = "%s.callgraph" % traced_class encoded_trace = sys.argv[2] if len(sys.argv) == 4: my_object = sys.argv[3] if my_object == "none": my_object = None plot = True class_def = ostra.class_definition(class_def_file = "%s.fields" % traced_class, class_methods_file = "%s.functions" % traced_class) new_callgraph_file(traced_class) class_def.parse_file(encoded_trace, verbose = verbose, process_record = process_record, my_object = my_object) if gen_html: print_where_fields_changed() close_callgraph_file() method_stats(class_def, callgraph) if plot: ostra.plot(class_def, callgraph) dwarves-dfsg-1.15/ostra/python/000077500000000000000000000000001350511416500165065ustar00rootroot00000000000000dwarves-dfsg-1.15/ostra/python/ostra.py000077500000000000000000000251541350511416500202220ustar00rootroot00000000000000#!/usr/bin/python3 # # Copyright (C) 2005, 2006, 2007 Arnaldo Carvalho de Melo # # This program is free software; you can redistribute it and/or modify it # under the terms of version 2 of the GNU General Public License as # published by the Free Software Foundation. from datetime import timedelta class trace_points: def __init__(self, hooks): self.entry = "entry" in hooks self.exit = "exit" in hooks def __repr__(self): return str(self.__dict__.values()) __str__ = __repr__ class change_point: def __init__(self, tstamp, value, seq): self.tstamp = tstamp self.value = value self.seq = seq class class_field: def __init__(self, line, class_def_file): field, self.name, cgtraced, self.grab_expr, \ self.collector_fmt, hooks, self.plot_fmt = line.strip().split(':') self.field = int(field) self.cg = cgtraced == "yes" self.hooks = trace_points(hooks.split(',')) self.value = None self.last_value = None self.changes = [] self._load_text_table(class_def_file) def _text_table_tokenizer(self, line): tokens = line.split(":") return int(tokens[0]), tokens[1][:-1] def _load_text_table(self, class_def_file): try: f = file("%s.%s.table" % (class_def_file, self.name)) except: self.table = {} return self.table = dict([self._text_table_tokenizer(line) for line in f.readlines()]) f.close() def set_last_value(self, tstamp, seq): if self.value != None: if self.cg and self.changed(): self.changes.append(change_point(tstamp, self.value, seq)) self.last_value = self.value def changed(self): return self.value != None and self.value != self.last_value def __repr__(self): return self.name __str__ = __repr__ class class_method: def __init__(self, line): fields = line.strip().split(':') self.function_id = fields[0] self.name = fields[1] self.print_return_value = fields[-1] self.function_id = int(self.function_id) self.print_return_value = self.print_return_value == "yes" self.calls = 0 self.total_time = timedelta() self.last_tstamp = None self.times = [] self.exits = {} def begin(self, tstamp): self.calls += 1 self.last_tstamp = tstamp def end(self, tstamp): tstamp_delta = tstamp - self.last_tstamp if tstamp_delta < timedelta(): tstamp_delta = timedelta() self.total_time += tstamp_delta self.times.append(tstamp_delta.seconds * 1000000 + tstamp_delta.microseconds) def plot(self, directory, entries, samples, nr_samples, verbose = False): from matplotlib import use as muse muse('Agg') from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas from matplotlib.figure import Figure from matplotlib.ticker import FuncFormatter, FixedFormatter, LinearLocator from matplotlib.mlab import std as std_deviation from matplotlib.mlab import mean from time import asctime yfont = { 'fontname' : 'Bitstream Vera Sans', 'color' : 'r', 'fontsize' : 8 } xfont = { 'fontname' : 'Bitstream Vera Sans', 'color' : 'b', 'fontsize' : 8 } titlefont = { 'fontname' : 'Bitstream Vera Sans', 'color' : 'g', 'fontweight' : 'bold', 'fontsize' : 10 } inches = 0.00666667 width = 950 * inches height = 680 * inches fig = Figure(figsize = (width, height)) canvas = FigureCanvas(fig) ax = fig.add_subplot(111) ax.grid(False) xtickfontsize = 5 ytickfontsize = 5 plot_type = 'b-' field_mean = mean(samples) yaxis_plot_fmt = FuncFormatter(pylab_formatter_ms) ax.plot(entries, samples, "b-") ax.set_xlabel("samples", xfont) ax.set_ylabel("time", yfont) for label in ax.get_xticklabels(): label.set(fontsize = xtickfontsize) for label in ax.get_yticklabels(): label.set(fontsize = ytickfontsize) ax.yaxis.set_major_formatter(yaxis_plot_fmt) ax.set_title("%d %s samples (%s)" % (nr_samples, self.name, asctime()), titlefont) canvas.print_figure("%s/methods/%s.png" % (directory, self.name)) del fig, canvas, ax class class_definition: def __init__(self, class_def_file = None, class_methods_file = None): self.fields = {} self.methods = {} self.tstamp = None self.last_tstamp = None self.last_method = None self.epoch = None if class_def_file: f = file(class_def_file) for line in f.readlines(): field = class_field(line, class_def_file) self.fields[field.name] = field f.close() if class_methods_file: f = file(class_methods_file) self.methods = dict([self._method_tokenizer(line) for line in f.readlines()]) f.close() def _method_tokenizer(self, line): method = class_method(line) return method.function_id, method def set_last_values(self, seq = 0): self.last_method = self.current_method() for field in self.fields.values(): field.set_last_value(self.tstamp, seq) self.last_tstamp = self.tstamp def parse_record(self, line): nsec, record = line[:-1].split(' ', 1) line_fields = record.split(':') self.tstamp = timedelta(microseconds = int(nsec) / 1000) if self.epoch == None: self.epoch = self.tstamp self.tstamp -= self.epoch action = line_fields[0][0] nr_fields = len(line_fields) for field in self.fields.values(): if field.field >= nr_fields or \ (action == 'i' and not field.hooks.entry) or \ (action == 'o' and not field.hooks.exit): field.value = None continue field.value = line_fields[field.field] def parse_file(self, filename, process_record = None, verbose = False, my_object = None): f = file(filename) current_object = None object_stack = [] if verbose: nr_lines = 0 while True: line = f.readline() if not line: break if verbose: nr_lines += 1 print("\r%d" % nr_lines,) self.parse_record(line) method = self.current_method() # print method.name if my_object: if self.fields["action"].value[0] == 'i': current_object = self.fields["object"].value object_stack.append(current_object) else: current_object = object_stack.pop() if current_object != my_object: continue if self.fields["action"].value[0] == 'i': method.begin(self.tstamp) else: method.end(self.tstamp) seq = 0 if process_record: seq = process_record() self.set_last_values(seq) f.close() if verbose: print def current_method(self): return self.methods[int(self.fields["function_id"].value)] def plot_methods(self, callgraph, verbose = False): for current_method in self.methods.values(): nr_samples = len(current_method.times) if nr_samples < 4: continue if verbose: print("plot_methods: plotting %s method (%d samples)" % \ (current_method.name, nr_samples)) entries = [float("%d.0" % entry) for entry in range(nr_samples)] samples = current_method.times current_method.plot(callgraph, entries, samples, nr_samples, verbose) def pylab_formatter_kbps(x): mb = 1024 * 1024 if x > mb: return "%d,%d Mbps" % (x / mb, x % mb) else: return "%d,%d Kbps" % (x / 1024, x % 1024) def pylab_formatter_ms(x, pos = 0): ms = x / 1000 us = x % 1000 s = "%d" % ms if us > 0: s += ".%03d" % us s = s.rstrip('0') s += "ms" return s def pylab_formatter(x, pos = 0): if current_plot_fmt == "kbps": return pylab_formatter_kbps(x) elif current_plot_fmt == "ms": return pylab_formatter_ms(x) else: return "%s" % str(int(x)) def plot_field(name, directory, tstamps, samples, nr_samples, plot_fmt = None, table = None, verbose = False): global current_plot_fmt from matplotlib import use as muse muse('Agg') from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas from matplotlib.figure import Figure from matplotlib.ticker import FuncFormatter, FixedFormatter, LinearLocator from matplotlib.mlab import std as std_deviation from matplotlib.mlab import mean from time import asctime yfont = { 'fontname' : 'Bitstream Vera Sans', 'color' : 'r', 'fontsize' : 8 } xfont = { 'fontname' : 'Bitstream Vera Sans', 'color' : 'b', 'fontsize' : 8 } titlefont = { 'fontname' : 'Bitstream Vera Sans', 'color' : 'g', 'fontweight' : 'bold', 'fontsize' : 10 } inches = 0.00666667 width = 950 * inches height = 680 * inches fig = Figure(figsize = (width, height)) canvas = FigureCanvas(fig) ax = fig.add_subplot(111) ax.grid(False) xtickfontsize = 5 ytickfontsize = 5 current_plot_fmt = plot_fmt field_mean = None plot_type = 'b-' if current_plot_fmt == "filter_dev": std = std_deviation(samples) * 2 if verbose: print("filter_dev(%s) std=%d" % (name, std)) for i in range(nr_samples): if samples[i] > std: if verbose: print("%s: filtering out %d" % (name, samples[i])) samples[i] = 0 field_mean = mean(samples) yaxis_plot_fmt = FuncFormatter(pylab_formatter) elif current_plot_fmt == "table": ax.grid(True) plot_type = 'bo-' max_value = max(samples) without_zero = 1 if table.has_key(0): without_zero = 0 max_value += 1 ax.yaxis.set_major_locator(LinearLocator(max_value)) tstamps = range(nr_samples) seq = [ " " ] * max_value for key in table.keys(): if key in samples: seq[key - without_zero] = "%s(%d)" % (table[key], key) ytickfontsize = 4 yaxis_plot_fmt = FixedFormatter(seq) else: field_mean = mean(samples) yaxis_plot_fmt = FuncFormatter(pylab_formatter) ax.plot(tstamps, samples, plot_type) ax.set_xlabel("time", xfont) yname = name if field_mean: yname += " (mean=%s)" % pylab_formatter(field_mean) ax.set_ylabel(yname, yfont) for label in ax.get_xticklabels(): label.set(fontsize = xtickfontsize) for label in ax.get_yticklabels(): label.set(fontsize = ytickfontsize) ax.yaxis.set_major_formatter(yaxis_plot_fmt) ax.set_title("%d %s samples (%s)" % (nr_samples, name, asctime()), titlefont) canvas.print_figure("%s/%s.png" % (directory, name)) del fig, canvas, ax def plot(class_def, callgraph, verbose = False): for current_field in class_def.fields.values(): nr_samples = len(current_field.changes) if nr_samples < 4: continue if verbose: print("ostra-plot: plotting %s field (%d samples)" % (current_field.name, nr_samples)) tstamps = [float("%d.%06d" % (entry.tstamp.seconds, entry.tstamp.microseconds)) \ for entry in current_field.changes] try: samples = [int(entry.value) for entry in current_field.changes] except: continue plot_field(current_field.name, callgraph, tstamps, samples, nr_samples, current_field.plot_fmt, current_field.table, verbose) if __name__ == '__main__': import sys c = class_definition(sys.argv[1], sys.argv[2]) for field in c.fields.values(): print("%s: %s" % (field, field.table)) for method in c.methods.values(): print("%d: %s" % (method.function_id, method.name)) dwarves-dfsg-1.15/pahole.c000066400000000000000000000754741350511416500154720ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only Copyright (C) 2006 Mandriva Conectiva S.A. Copyright (C) 2006 Arnaldo Carvalho de Melo Copyright (C) 2007-2008 Arnaldo Carvalho de Melo */ #include #include #include #include #include #include #include #include #include "dwarves_reorganize.h" #include "dwarves.h" #include "dutil.h" #include "ctf_encoder.h" #include "btf_encoder.h" static bool btf_encode; static bool ctf_encode; static bool first_obj_only; static uint8_t class__include_anonymous; static uint8_t class__include_nested_anonymous; static uint8_t word_size, original_word_size; static char *class__exclude_prefix; static size_t class__exclude_prefix_len; static char *class__include_prefix; static size_t class__include_prefix_len; static char *cu__exclude_prefix; static size_t cu__exclude_prefix_len; static char *decl_exclude_prefix; static size_t decl_exclude_prefix_len; static uint16_t nr_holes; static uint16_t nr_bit_holes; static uint16_t hole_size_ge; static uint8_t show_packable; static uint8_t global_verbose; static uint8_t recursive; static size_t cacheline_size; static uint8_t find_containers; static uint8_t find_pointers_in_structs; static int reorganize; static bool show_private_classes; static bool defined_in; static int show_reorg_steps; static char *class_name; static struct strlist *class_names; static char separator = '\t'; static struct conf_fprintf conf = { .emit_stats = 1, }; static struct conf_load conf_load = { .conf_fprintf = &conf, }; struct structure { struct list_head node; struct rb_node rb_node; char *name; uint32_t nr_files; uint32_t nr_methods; }; static struct structure *structure__new(const char *name) { struct structure *st = malloc(sizeof(*st)); if (st != NULL) { st->name = strdup(name); if (st->name == NULL) { free(st); return NULL; } st->nr_files = 1; st->nr_methods = 0; } return st; } static void structure__delete(struct structure *st) { free(st->name); free(st); } static struct rb_root structures__tree = RB_ROOT; static LIST_HEAD(structures__list); static struct structure *structures__add(struct class *class, const struct cu *cu, bool *existing_entry) { struct rb_node **p = &structures__tree.rb_node; struct rb_node *parent = NULL; struct structure *str; const char *new_class_name = class__name(class, cu); while (*p != NULL) { int rc; parent = *p; str = rb_entry(parent, struct structure, rb_node); rc = strcmp(str->name, new_class_name); if (rc > 0) p = &(*p)->rb_left; else if (rc < 0) p = &(*p)->rb_right; else { *existing_entry = true; return str; } } str = structure__new(new_class_name); if (str == NULL) return NULL; *existing_entry = false; rb_link_node(&str->rb_node, parent, p); rb_insert_color(&str->rb_node, &structures__tree); /* For linear traversals */ list_add_tail(&str->node, &structures__list); return str; } void structures__delete(void) { struct rb_node *next = rb_first(&structures__tree); while (next) { struct structure *pos = rb_entry(next, struct structure, rb_node); next = rb_next(&pos->rb_node); rb_erase(&pos->rb_node, &structures__tree); structure__delete(pos); } } static void nr_definitions_formatter(struct structure *st) { printf("%s%c%u\n", st->name, separator, st->nr_files); } static void nr_members_formatter(struct class *class, struct cu *cu, uint32_t id __unused) { printf("%s%c%u\n", class__name(class, cu), separator, class__nr_members(class)); } static void nr_methods_formatter(struct structure *st) { printf("%s%c%u\n", st->name, separator, st->nr_methods); } static void size_formatter(struct class *class, struct cu *cu, uint32_t id __unused) { printf("%s%c%d%c%u\n", class__name(class, cu), separator, class__size(class), separator, class->nr_holes); } static void class_name_len_formatter(struct class *class, struct cu *cu, uint32_t id __unused) { const char *name = class__name(class, cu); printf("%s%c%zd\n", name, separator, strlen(name)); } static void class_name_formatter(struct class *class, struct cu *cu, uint32_t id __unused) { puts(class__name(class, cu)); } static void class_formatter(struct class *class, struct cu *cu, uint32_t id) { struct tag *typedef_alias = NULL; struct tag *tag = class__tag(class); const char *name = class__name(class, cu); if (name == NULL) { /* * Find the first typedef for this struct, this is enough * as if we optimize the struct all the typedefs will be * affected. */ typedef_alias = cu__find_first_typedef_of_type(cu, id); /* * If there is no typedefs for this anonymous struct it is * found just inside another struct, and in this case it'll * be printed when the type it is in is printed, but if * the user still wants to see its statistics, just use * --nested_anon_include. */ if (typedef_alias == NULL && !class__include_nested_anonymous) return; } if (typedef_alias != NULL) { struct type *tdef = tag__type(typedef_alias); conf.prefix = "typedef"; conf.suffix = type__name(tdef, cu); } else conf.prefix = conf.suffix = NULL; tag__fprintf(tag, cu, &conf, stdout); putchar('\n'); } static void print_packable_info(struct class *c, struct cu *cu, uint32_t id) { const struct tag *t = class__tag(c); const size_t orig_size = class__size(c); const size_t new_size = class__size(c->priv); const size_t savings = orig_size - new_size; const char *name = class__name(c, cu); /* Anonymous struct? Try finding a typedef */ if (name == NULL) { const struct tag *tdef = cu__find_first_typedef_of_type(cu, id); if (tdef != NULL) name = class__name(tag__class(tdef), cu); } if (name != NULL) printf("%s%c%zd%c%zd%c%zd\n", name, separator, orig_size, separator, new_size, separator, savings); else printf("%s(%d)%c%zd%c%zd%c%zd\n", tag__decl_file(t, cu), tag__decl_line(t, cu), separator, orig_size, separator, new_size, separator, savings); } static void (*stats_formatter)(struct structure *st); static void print_stats(void) { struct structure *pos; list_for_each_entry(pos, &structures__list, node) stats_formatter(pos); } static struct class *class__filter(struct class *class, struct cu *cu, uint32_t tag_id); static void (*formatter)(struct class *class, struct cu *cu, uint32_t id) = class_formatter; static void print_classes(struct cu *cu) { uint32_t id; struct class *pos; cu__for_each_struct_or_union(cu, id, pos) { bool existing_entry; struct structure *str; if (pos->type.namespace.name == 0 && !(class__include_anonymous || class__include_nested_anonymous)) continue; if (!class__filter(pos, cu, id)) continue; /* * FIXME: No sense in adding an anonymous struct to the list of * structs already printed, as we look for the name... The * right fix probably will be to call class__fprintf on a * in-memory FILE, do a hash, and look it by full contents, not * by name. And this is needed for CTF as well, but its late now * and I'm sleepy, will leave for later... */ if (pos->type.namespace.name != 0) { str = structures__add(pos, cu, &existing_entry); if (str == NULL) { fprintf(stderr, "pahole: insufficient memory for " "processing %s, skipping it...\n", cu->name); return; } /* Already printed... */ if (existing_entry) { str->nr_files++; continue; } } if (show_packable && !global_verbose) print_packable_info(pos, cu, id); else if (formatter != NULL) formatter(pos, cu, id); } } static struct cu *cu__filter(struct cu *cu) { if (cu__exclude_prefix != NULL && (cu->name == NULL || strncmp(cu__exclude_prefix, cu->name, cu__exclude_prefix_len) == 0)) return NULL; return cu; } static int class__packable(struct class *class, struct cu *cu) { struct class *clone; if (class->nr_holes == 0 && class->nr_bit_holes == 0) return 0; clone = class__clone(class, NULL, cu); if (clone == NULL) return 0; class__reorganize(clone, cu, 0, stdout); if (class__size(class) > class__size(clone)) { class->priv = clone; return 1; } /* FIXME: we need to free in the right order, * cu->obstack is being corrupted... class__delete(clone, cu); */ return 0; } static struct class *class__filter(struct class *class, struct cu *cu, uint32_t tag_id) { struct tag *tag = class__tag(class); const char *name; if (!tag->top_level) { class__find_holes(class); if (!show_private_classes) return NULL; } name = class__name(class, cu); if (class__is_declaration(class)) return NULL; if (!class__include_anonymous && name == NULL) return NULL; if (class__exclude_prefix != NULL) { if (name == NULL) { const struct tag *tdef = cu__find_first_typedef_of_type(cu, tag_id); if (tdef != NULL) { struct class *c = tag__class(tdef); name = class__name(c, cu); } } if (name != NULL && strncmp(class__exclude_prefix, name, class__exclude_prefix_len) == 0) return NULL; } if (class__include_prefix != NULL) { if (name == NULL) { const struct tag *tdef = cu__find_first_typedef_of_type(cu, tag_id); if (tdef != NULL) { struct class *c = tag__class(tdef); name = class__name(c, cu); } } if (name != NULL && strncmp(class__include_prefix, name, class__include_prefix_len) != 0) return NULL; } if (decl_exclude_prefix != NULL && (!tag__decl_file(tag, cu) || strncmp(decl_exclude_prefix, tag__decl_file(tag, cu), decl_exclude_prefix_len) == 0)) return NULL; /* * The following only make sense for structs, i.e. 'struct class', * and as we can get here with a union, that is represented by a 'struct type', * bail out if we get here with an union */ if (!tag__is_struct(class__tag(class))) return show_packable ? NULL : class; if (tag->top_level) class__find_holes(class); if (class->nr_holes < nr_holes || class->nr_bit_holes < nr_bit_holes || (hole_size_ge != 0 && !class__has_hole_ge(class, hole_size_ge))) return NULL; if (show_packable && !class__packable(class, cu)) return NULL; return class; } static void union__find_new_size(struct tag *tag, struct cu *cu); static void class__resize_LP(struct tag *tag, struct cu *cu) { struct tag *tag_pos; struct class *class = tag__class(tag); size_t word_size_diff; size_t orig_size = class->type.size; if (tag__type(tag)->resized) return; tag__type(tag)->resized = 1; if (original_word_size > word_size) word_size_diff = original_word_size - word_size; else word_size_diff = word_size - original_word_size; type__for_each_tag(tag__type(tag), tag_pos) { struct tag *type; size_t diff = 0; size_t array_multiplier = 1; /* we want only data members, i.e. with byte_offset attr */ if (tag_pos->tag != DW_TAG_member && tag_pos->tag != DW_TAG_inheritance) continue; type = cu__type(cu, tag_pos->type); tag__assert_search_result(type); if (type->tag == DW_TAG_array_type) { int i; for (i = 0; i < tag__array_type(type)->dimensions; ++i) array_multiplier *= tag__array_type(type)->nr_entries[i]; type = cu__type(cu, type->type); tag__assert_search_result(type); } if (tag__is_typedef(type)) { type = tag__follow_typedef(type, cu); tag__assert_search_result(type); } switch (type->tag) { case DW_TAG_base_type: { struct base_type *bt = tag__base_type(type); char bf[64]; const char *name = base_type__name(bt, cu, bf, sizeof(bf)); if (strcmp(name, "long int") != 0 && strcmp(name, "long unsigned int") != 0) break; /* fallthru */ } case DW_TAG_pointer_type: diff = word_size_diff; break; case DW_TAG_structure_type: case DW_TAG_union_type: if (tag__is_union(type)) union__find_new_size(type, cu); else class__resize_LP(type, cu); diff = tag__type(type)->size_diff; break; } diff *= array_multiplier; if (diff != 0) { struct class_member *m = tag__class_member(tag_pos); if (original_word_size > word_size) { class->type.size -= diff; class__subtract_offsets_from(class, m, diff); } else { class->type.size += diff; class__add_offsets_from(class, m, diff); } } } if (original_word_size > word_size) tag__type(tag)->size_diff = orig_size - class->type.size; else tag__type(tag)->size_diff = class->type.size - orig_size; class__find_holes(class); class__fixup_alignment(class, cu); } static void union__find_new_size(struct tag *tag, struct cu *cu) { struct tag *tag_pos; struct type *type = tag__type(tag); size_t max_size = 0; if (type->resized) return; type->resized = 1; type__for_each_tag(type, tag_pos) { struct tag *type; size_t size; /* we want only data members, i.e. with byte_offset attr */ if (tag_pos->tag != DW_TAG_member && tag_pos->tag != DW_TAG_inheritance) continue; type = cu__type(cu, tag_pos->type); tag__assert_search_result(type); if (tag__is_typedef(type)) type = tag__follow_typedef(type, cu); if (tag__is_union(type)) union__find_new_size(type, cu); else if (tag__is_struct(type)) class__resize_LP(type, cu); size = tag__size(type, cu); if (size > max_size) max_size = size; } if (max_size > type->size) type->size_diff = max_size - type->size; else type->size_diff = type->size - max_size; type->size = max_size; } static void tag__fixup_word_size(struct tag *tag, struct cu *cu) { if (tag__is_struct(tag) || tag__is_union(tag)) { struct tag *pos; namespace__for_each_tag(tag__namespace(tag), pos) tag__fixup_word_size(pos, cu); } switch (tag->tag) { case DW_TAG_base_type: { struct base_type *bt = tag__base_type(tag); /* * This shouldn't happen, but at least on a tcp_ipv6.c * built with GNU C 4.3.0 20080130 (Red Hat 4.3.0-0.7), * one was found, so just bail out. */ if (!bt->name) return; char bf[64]; const char *name = base_type__name(bt, cu, bf, sizeof(bf)); if (strcmp(name, "long int") == 0 || strcmp(name, "long unsigned int") == 0) bt->bit_size = word_size * 8; } break; case DW_TAG_structure_type: class__resize_LP(tag, cu); break; case DW_TAG_union_type: union__find_new_size(tag, cu); break; } return; } static void cu_fixup_word_size_iterator(struct cu *cu) { original_word_size = cu->addr_size; cu->addr_size = word_size; uint32_t id; struct tag *pos; cu__for_each_type(cu, id, pos) tag__fixup_word_size(pos, cu); } static void cu__account_nr_methods(struct cu *cu) { struct function *pos_function; struct structure *str; uint32_t id; cu__for_each_function(cu, id, pos_function) { struct class_member *pos; list_for_each_entry(pos, &pos_function->proto.parms, tag.node) { struct tag *type = cu__type(cu, pos->tag.type); if (type == NULL || !tag__is_pointer(type)) continue; type = cu__type(cu, type->type); if (type == NULL || !tag__is_struct(type)) continue; struct type *ctype = tag__type(type); if (ctype->namespace.name == 0) continue; struct class *class = tag__class(type); if (!class__filter(class, cu, 0)) continue; bool existing_entry; str = structures__add(class, cu, &existing_entry); if (str == NULL) { fprintf(stderr, "pahole: insufficient memory " "for processing %s, skipping it...\n", cu->name); return; } if (!existing_entry) class__find_holes(class); ++str->nr_methods; } } } static char tab[128]; static void print_structs_with_pointer_to(const struct cu *cu, uint32_t type) { struct class *pos; struct class_member *pos_member; uint32_t id; cu__for_each_struct(cu, id, pos) { bool looked = false; struct structure *str; if (pos->type.namespace.name == 0) continue; type__for_each_member(&pos->type, pos_member) { struct tag *ctype = cu__type(cu, pos_member->tag.type); tag__assert_search_result(ctype); if (!tag__is_pointer_to(ctype, type)) continue; if (!looked) { bool existing_entry; str = structures__add(pos, cu, &existing_entry); if (str == NULL) { fprintf(stderr, "pahole: insufficient memory for " "processing %s, skipping it...\n", cu->name); return; } /* * We already printed this struct in another CU */ if (existing_entry) break; looked = true; } printf("%s: %s\n", str->name, class_member__name(pos_member, cu)); } } } static void print_containers(const struct cu *cu, uint32_t type, int ident) { struct class *pos; uint32_t id; cu__for_each_struct(cu, id, pos) { if (pos->type.namespace.name == 0) continue; const uint32_t n = type__nr_members_of_type(&pos->type, type); if (n == 0) continue; if (ident == 0) { bool existing_entry; struct structure *str = structures__add(pos, cu, &existing_entry); if (str == NULL) { fprintf(stderr, "pahole: insufficient memory for " "processing %s, skipping it...\n", cu->name); return; } /* * We already printed this struct in another CU */ if (existing_entry) break; } printf("%.*s%s", ident * 2, tab, class__name(pos, cu)); if (global_verbose) printf(": %u", n); putchar('\n'); if (recursive) print_containers(cu, id, ident + 1); } } /* Name and version of program. */ ARGP_PROGRAM_VERSION_HOOK_DEF = dwarves_print_version; #define ARGP_flat_arrays 300 #define ARGP_show_private_classes 301 #define ARGP_fixup_silly_bitfields 302 #define ARGP_first_obj_only 303 #define ARGP_classes_as_structs 304 #define ARGP_hex_fmt 305 #define ARGP_suppress_aligned_attribute 306 #define ARGP_suppress_force_paddings 307 #define ARGP_suppress_packed 308 static const struct argp_option pahole__options[] = { { .name = "bit_holes", .key = 'B', .arg = "NR_HOLES", .doc = "Show only structs at least NR_HOLES bit holes" }, { .name = "cacheline_size", .key = 'c', .arg = "SIZE", .doc = "set cacheline size to SIZE" }, { .name = "class_name", .key = 'C', .arg = "CLASS_NAME", .doc = "Show just this class" }, { .name = "find_pointers_to", .key = 'f', .arg = "CLASS_NAME", .doc = "Find pointers to CLASS_NAME" }, { .name = "format_path", .key = 'F', .arg = "FORMAT_LIST", .doc = "List of debugging formats to try" }, { .name = "contains", .key = 'i', .arg = "CLASS_NAME", .doc = "Show classes that contains CLASS_NAME" }, { .name = "show_decl_info", .key = 'I', .doc = "Show the file and line number where the tags were defined" }, { .name = "holes", .key = 'H', .arg = "NR_HOLES", .doc = "show only structs with at least NR_HOLES holes", }, { .name = "hole_size_ge", .key = 'z', .arg = "HOLE_SIZE", .doc = "show only structs with at least one hole greater " "or equal to HOLE_SIZE", }, { .name = "packable", .key = 'P', .doc = "show only structs that has holes that can be packed", }, { .name = "expand_types", .key = 'E', .doc = "expand class members", }, { .name = "nr_members", .key = 'n', .doc = "show number of members", }, { .name = "rel_offset", .key = 'r', .doc = "show relative offsets of members in inner structs" }, { .name = "recursive", .key = 'd', .doc = "recursive mode, affects several other flags", }, { .name = "reorganize", .key = 'R', .doc = "reorg struct trying to kill holes", }, { .name = "show_reorg_steps", .key = 'S', .doc = "show the struct layout at each reorganization step", }, { .name = "class_name_len", .key = 'N', .doc = "show size of classes", }, { .name = "show_first_biggest_size_base_type_member", .key = 'l', .doc = "show first biggest size base_type member", }, { .name = "nr_methods", .key = 'm', .doc = "show number of methods", }, { .name = "show_only_data_members", .key = 'M', .doc = "show only the members that use space in the class layout", }, { .name = "expand_pointers", .key = 'p', .doc = "expand class pointer members", }, { .name = "sizes", .key = 's', .doc = "show size of classes", }, { .name = "separator", .key = 't', .arg = "SEP", .doc = "use SEP as the field separator", }, { .name = "nr_definitions", .key = 'T', .doc = "show how many times struct was defined", }, { .name = "decl_exclude", .key = 'D', .arg = "PREFIX", .doc = "exclude classes declared in files with PREFIX", }, { .name = "exclude", .key = 'x', .arg = "PREFIX", .doc = "exclude PREFIXed classes", }, { .name = "prefix_filter", .key = 'y', .arg = "PREFIX", .doc = "include PREFIXed classes", }, { .name = "cu_exclude", .key = 'X', .arg = "PREFIX", .doc = "exclude PREFIXed compilation units", }, { .name = "anon_include", .key = 'a', .doc = "include anonymous classes", }, { .name = "nested_anon_include", .key = 'A', .doc = "include nested (inside other structs) anonymous classes", }, { .name = "quiet", .key = 'q', .doc = "be quieter", }, { .name = "defined_in", .key = 'u', .doc = "show CUs where CLASS_NAME (-C) is defined", }, { .name = "verbose", .key = 'V', .doc = "be verbose", }, { .name = "word_size", .key = 'w', .arg = "WORD_SIZE", .doc = "change the arch word size to WORD_SIZE" }, { .name = "ctf_encode", .key = 'Z', .doc = "Encode as CTF", }, { .name = "flat_arrays", .key = ARGP_flat_arrays, .doc = "Flat arrays", }, { .name = "suppress_aligned_attribute", .key = ARGP_suppress_aligned_attribute, .doc = "Suppress __attribute__((aligned(N))", }, { .name = "suppress_force_paddings", .key = ARGP_suppress_force_paddings, .doc = "Suppress int :N paddings at the end", }, { .name = "suppress_packed", .key = ARGP_suppress_packed, .doc = "Suppress output of inferred __attribute__((__packed__))", }, { .name = "show_private_classes", .key = ARGP_show_private_classes, .doc = "Show classes that are defined inside other classes or in functions", }, { .name = "fixup_silly_bitfields", .key = ARGP_fixup_silly_bitfields, .doc = "Fix silly bitfields such as int foo:32", }, { .name = "first_obj_only", .key = ARGP_first_obj_only, .doc = "Only process the first object file in the binary", }, { .name = "classes_as_structs", .key = ARGP_classes_as_structs, .doc = "Use 'struct' when printing classes", }, { .name = "hex", .key = ARGP_hex_fmt, .doc = "Print offsets and sizes in hexadecimal", }, { .name = "btf_encode", .key = 'J', .doc = "Encode as BTF", }, { .name = NULL, } }; static error_t pahole__options_parser(int key, char *arg, struct argp_state *state) { switch (key) { case ARGP_KEY_INIT: if (state->child_inputs != NULL) state->child_inputs[0] = state->input; break; case 'A': class__include_nested_anonymous = 1; break; case 'a': class__include_anonymous = 1; break; case 'B': nr_bit_holes = atoi(arg); break; case 'C': class_name = arg; break; case 'c': cacheline_size = atoi(arg); break; case 'D': decl_exclude_prefix = arg; decl_exclude_prefix_len = strlen(decl_exclude_prefix); conf_load.extra_dbg_info = 1; break; case 'd': recursive = 1; break; case 'E': conf.expand_types = 1; break; case 'f': find_pointers_in_structs = 1; class_name = arg; break; case 'F': conf_load.format_path = arg; break; case 'H': nr_holes = atoi(arg); break; case 'I': conf.show_decl_info = 1; conf_load.extra_dbg_info = 1; break; case 'i': find_containers = 1; class_name = arg; break; case 'J': btf_encode = 1; no_bitfield_type_recode = true; break; case 'l': conf.show_first_biggest_size_base_type_member = 1; break; case 'M': conf.show_only_data_members = 1; break; case 'm': stats_formatter = nr_methods_formatter; break; case 'N': formatter = class_name_len_formatter; break; case 'n': formatter = nr_members_formatter; break; case 'P': show_packable = 1; conf_load.extra_dbg_info = 1; break; case 'p': conf.expand_pointers = 1; break; case 'q': conf.emit_stats = 0; conf.suppress_comments = 1; conf.suppress_offset_comment = 1; break; case 'R': reorganize = 1; break; case 'r': conf.rel_offset = 1; break; case 'S': show_reorg_steps = 1; break; case 's': formatter = size_formatter; break; case 'T': stats_formatter = nr_definitions_formatter; formatter = NULL; break; case 't': separator = arg[0]; break; case 'u': defined_in = 1; break; case 'V': global_verbose = 1; break; case 'w': word_size = atoi(arg); break; case 'X': cu__exclude_prefix = arg; cu__exclude_prefix_len = strlen(cu__exclude_prefix); break; case 'x': class__exclude_prefix = arg; class__exclude_prefix_len = strlen(class__exclude_prefix); break; case 'y': class__include_prefix = arg; class__include_prefix_len = strlen(class__include_prefix); break; case 'z': hole_size_ge = atoi(arg); if (!global_verbose) formatter = class_name_formatter; break; case 'Z': ctf_encode = 1; break; case ARGP_flat_arrays: conf.flat_arrays = 1; break; case ARGP_suppress_aligned_attribute: conf.suppress_aligned_attribute = 1; break; case ARGP_suppress_force_paddings: conf.suppress_force_paddings = 1; break; case ARGP_suppress_packed: conf.suppress_packed = 1; break; case ARGP_show_private_classes: show_private_classes = true; conf.show_only_data_members = 1; break; case ARGP_fixup_silly_bitfields: conf_load.fixup_silly_bitfields = 1; break; case ARGP_first_obj_only: first_obj_only = true; break; case ARGP_classes_as_structs: conf.classes_as_structs = 1; break; case ARGP_hex_fmt: conf.hex_fmt = 1; break; default: return ARGP_ERR_UNKNOWN; } return 0; } static const char pahole__args_doc[] = "FILE"; static struct argp pahole__argp = { .options = pahole__options, .parser = pahole__options_parser, .args_doc = pahole__args_doc, }; static void do_reorg(struct tag *class, struct cu *cu) { int savings; const uint8_t reorg_verbose = show_reorg_steps ? 2 : global_verbose; struct class *clone = class__clone(tag__class(class), NULL, cu); if (clone == NULL) { fprintf(stderr, "pahole: out of memory!\n"); exit(EXIT_FAILURE); } class__reorganize(clone, cu, reorg_verbose, stdout); savings = class__size(tag__class(class)) - class__size(clone); if (savings != 0 && reorg_verbose) { putchar('\n'); if (show_reorg_steps) puts("/* Final reorganized struct: */"); } tag__fprintf(class__tag(clone), cu, &conf, stdout); if (savings != 0) { const size_t cacheline_savings = (tag__nr_cachelines(class, cu) - tag__nr_cachelines(class__tag(clone), cu)); printf(" /* saved %d byte%s", savings, savings != 1 ? "s" : ""); if (cacheline_savings != 0) printf(" and %zu cacheline%s", cacheline_savings, cacheline_savings != 1 ? "s" : ""); puts("! */"); } else putchar('\n'); /* FIXME: we need to free in the right order, * cu->obstack is being corrupted... class__delete(clone, cu); */ } static enum load_steal_kind pahole_stealer(struct cu *cu, struct conf_load *conf_load __unused) { int ret = LSK__DELETE; if (!cu__filter(cu)) goto filter_it; if (btf_encode) { cu__encode_btf(cu, global_verbose); return LSK__KEEPIT; } if (ctf_encode) { cu__encode_ctf(cu, global_verbose); /* * We still have to get the type signature code merged to eliminate * dups, reference another CTF file, etc, so for now just encode the * first cu that is let thru by cu__filter. */ goto dump_and_stop; } if (class_name == NULL) { if (stats_formatter == nr_methods_formatter) { cu__account_nr_methods(cu); goto dump_it; } if (word_size != 0) cu_fixup_word_size_iterator(cu); memset(tab, ' ', sizeof(tab) - 1); print_classes(cu); goto dump_it; } struct str_node *pos; struct rb_node *next = rb_first(&class_names->entries); while (next) { pos = rb_entry(next, struct str_node, rb_node); next = rb_next(&pos->rb_node); static type_id_t class_id; bool include_decls = find_pointers_in_structs != 0 || stats_formatter == nr_methods_formatter; struct tag *class = cu__find_struct_or_union_by_name(cu, pos->s, include_decls, &class_id); if (class == NULL) continue; if (defined_in) { puts(cu->name); goto dump_it; } /* * Ok, found it, so remove from the list to avoid printing it * twice, in another CU. */ strlist__remove(class_names, pos); class__find_holes(tag__class(class)); if (reorganize) { if (tag__is_struct(class)) do_reorg(class, cu); } else if (find_containers) print_containers(cu, class_id, 0); else if (find_pointers_in_structs) print_structs_with_pointer_to(cu, class_id); else { /* * We don't need to print it for every compile unit * but the previous options need */ tag__fprintf(class, cu, &conf, stdout); putchar('\n'); } } /* * If we found all the entries in --class_name, stop */ if (strlist__empty(class_names)) { dump_and_stop: ret = LSK__STOP_LOADING; } dump_it: if (first_obj_only) ret = LSK__STOP_LOADING; filter_it: return ret; } static int add_class_name_entry(const char *s) { if (strncmp(s, "file://", 7) == 0) { if (strlist__load(class_names, s + 7)) return -1; } else switch (strlist__add(class_names, s)) { case -EEXIST: if (global_verbose) fprintf(stderr, "pahole: %s dup in -C, ignoring\n", s); break; case -ENOMEM: return -1; } return 0; } static int populate_class_names(void) { char *s = class_name, *sep; while ((sep = strchr(s, ',')) != NULL) { *sep = '\0'; if (add_class_name_entry(s)) return -1; *sep = ','; s = sep + 1; } return *s ? add_class_name_entry(s) : 0; } int main(int argc, char *argv[]) { int err, remaining, rc = EXIT_FAILURE; if (argp_parse(&pahole__argp, argc, argv, 0, &remaining, NULL) || (remaining == argc && class_name == NULL)) { argp_help(&pahole__argp, stderr, ARGP_HELP_SEE, argv[0]); goto out; } class_names = strlist__new(true); if (class_names == NULL || dwarves__init(cacheline_size)) { fputs("pahole: insufficient memory\n", stderr); goto out; } if (class_name && populate_class_names()) goto out_dwarves_exit; struct cus *cus = cus__new(); if (cus == NULL) { fputs("pahole: insufficient memory\n", stderr); goto out_dwarves_exit; } conf_load.steal = pahole_stealer; err = cus__load_files(cus, &conf_load, argv + remaining); if (err != 0) { cus__fprintf_load_files_err(cus, "pahole", argv + remaining, err, stderr); goto out_cus_delete; } if (btf_encode) { err = btf_encoder__encode(); if (err) { fputs("Failed to encode BTF\n", stderr); goto out_cus_delete; } } if (stats_formatter != NULL) print_stats(); rc = EXIT_SUCCESS; out_cus_delete: #ifdef DEBUG_CHECK_LEAKS cus__delete(cus); structures__delete(); #endif out_dwarves_exit: #ifdef DEBUG_CHECK_LEAKS dwarves__exit(); #endif out: #ifdef DEBUG_CHECK_LEAKS strlist__delete(class_names); #endif return rc; } dwarves-dfsg-1.15/pdwtags.c000066400000000000000000000066221350511416500156600ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only Copyright (C) 2007-2016 Arnaldo Carvalho de Melo */ #include #include #include #include #include "dwarves.h" #include "dutil.h" static struct conf_fprintf conf = { .emit_stats = 1, }; static void emit_tag(struct tag *tag, uint32_t tag_id, struct cu *cu) { printf("/* %d */\n", tag_id); if (tag__is_struct(tag)) class__find_holes(tag__class(tag)); if (tag->tag == DW_TAG_base_type) { char bf[64]; const char *name = base_type__name(tag__base_type(tag), cu, bf, sizeof(bf)); if (name == NULL) printf("anonymous base_type\n"); else puts(name); } else if (tag__is_pointer(tag)) printf(" /* pointer to %lld */\n", (unsigned long long)tag->type); else tag__fprintf(tag, cu, &conf, stdout); printf(" /* size: %zd */\n\n", tag__size(tag, cu)); } static int cu__emit_tags(struct cu *cu) { uint32_t i; struct tag *tag; puts("/* Types: */\n"); cu__for_each_type(cu, i, tag) emit_tag(tag, i, cu); puts("/* Functions: */\n"); conf.no_semicolon = true; struct function *function; cu__for_each_function(cu, i, function) { tag__fprintf(function__tag(function), cu, &conf, stdout); putchar('\n'); lexblock__fprintf(&function->lexblock, cu, function, 0, &conf, stdout); printf(" /* size: %zd */\n\n", tag__size(function__tag(function), cu)); } conf.no_semicolon = false; puts("\n\n/* Variables: */\n"); cu__for_each_variable(cu, i, tag) { tag__fprintf(tag, cu, NULL, stdout); printf(" /* size: %zd */\n\n", tag__size(tag, cu)); } return 0; } static enum load_steal_kind pdwtags_stealer(struct cu *cu, struct conf_load *conf_load __unused) { cu__emit_tags(cu); return LSK__DELETE; } static struct conf_load pdwtags_conf_load = { .steal = pdwtags_stealer, .conf_fprintf = &conf, }; /* Name and version of program. */ ARGP_PROGRAM_VERSION_HOOK_DEF = dwarves_print_version; static const struct argp_option pdwtags__options[] = { { .name = "format_path", .key = 'F', .arg = "FORMAT_LIST", .doc = "List of debugging formats to try" }, { .key = 'V', .name = "verbose", .doc = "show details", }, { .name = NULL, } }; static error_t pdwtags__options_parser(int key, char *arg __unused, struct argp_state *state) { switch (key) { case ARGP_KEY_INIT: if (state->child_inputs != NULL) state->child_inputs[0] = state->input; break; case 'F': pdwtags_conf_load.format_path = arg; break; case 'V': conf.show_decl_info = 1; break; default: return ARGP_ERR_UNKNOWN; } return 0; } static const char pdwtags__args_doc[] = "FILE"; static struct argp pdwtags__argp = { .options = pdwtags__options, .parser = pdwtags__options_parser, .args_doc = pdwtags__args_doc, }; int main(int argc, char *argv[]) { int remaining, rc = EXIT_FAILURE, err; struct cus *cus = cus__new(); if (dwarves__init(0) || cus == NULL) { fputs("pwdtags: insufficient memory\n", stderr); goto out; } if (argp_parse(&pdwtags__argp, argc, argv, 0, &remaining, NULL) || remaining == argc) { argp_help(&pdwtags__argp, stderr, ARGP_HELP_SEE, argv[0]); goto out; } err = cus__load_files(cus, &pdwtags_conf_load, argv + remaining); if (err == 0) { rc = EXIT_SUCCESS; goto out; } cus__fprintf_load_files_err(cus, "pdwtags", argv + remaining, err, stderr); out: cus__delete(cus); dwarves__exit(); return rc; } dwarves-dfsg-1.15/pfunct.c000066400000000000000000000440221350511416500155020ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only Copyright (C) 2006 Mandriva Conectiva S.A. Copyright (C) 2006 Arnaldo Carvalho de Melo Copyright (C) 2007 Arnaldo Carvalho de Melo */ #include #include #include #include #include #include #include #include #include #include "dwarves.h" #include "dwarves_emit.h" #include "dutil.h" #include "elf_symtab.h" static int verbose; static int show_inline_expansions; static int show_variables; static int show_externals; static int show_cc_inlined; static int show_cc_uninlined; static char *symtab_name; static bool show_prototypes; static bool expand_types; static bool compilable_output; static struct type_emissions emissions; static uint64_t addr; static struct conf_fprintf conf; static struct conf_load conf_load = { .conf_fprintf = &conf, }; struct fn_stats { struct list_head node; struct tag *tag; const struct cu *cu; uint32_t nr_expansions; uint32_t size_expansions; uint32_t nr_files; }; static struct fn_stats *fn_stats__new(struct tag *tag, const struct cu *cu) { struct fn_stats *stats = malloc(sizeof(*stats)); if (stats != NULL) { const struct function *fn = tag__function(tag); stats->tag = tag; stats->cu = cu; stats->nr_files = 1; stats->nr_expansions = fn->cu_total_nr_inline_expansions; stats->size_expansions = fn->cu_total_size_inline_expansions; } return stats; } static void fn_stats__delete(struct fn_stats *stats) { free(stats); } static LIST_HEAD(fn_stats__list); static struct fn_stats *fn_stats__find(const char *name) { struct fn_stats *pos; list_for_each_entry(pos, &fn_stats__list, node) if (strcmp(function__name(tag__function(pos->tag), pos->cu), name) == 0) return pos; return NULL; } static void fn_stats__delete_list(void) { struct fn_stats *pos, *n; list_for_each_entry_safe(pos, n, &fn_stats__list, node) { list_del_init(&pos->node); fn_stats__delete(pos); } } static void fn_stats__add(struct tag *tag, const struct cu *cu) { struct fn_stats *fns = fn_stats__new(tag, cu); if (fns != NULL) list_add(&fns->node, &fn_stats__list); } static void fn_stats_inline_exps_fmtr(const struct fn_stats *stats) { struct function *fn = tag__function(stats->tag); if (fn->lexblock.nr_inline_expansions > 0) printf("%s: %u %d\n", function__name(fn, stats->cu), fn->lexblock.nr_inline_expansions, fn->lexblock.size_inline_expansions); } static void fn_stats_labels_fmtr(const struct fn_stats *stats) { struct function *fn = tag__function(stats->tag); if (fn->lexblock.nr_labels > 0) printf("%s: %u\n", function__name(fn, stats->cu), fn->lexblock.nr_labels); } static void fn_stats_variables_fmtr(const struct fn_stats *stats) { struct function *fn = tag__function(stats->tag); if (fn->lexblock.nr_variables > 0) printf("%s: %u\n", function__name(fn, stats->cu), fn->lexblock.nr_variables); } static void fn_stats_nr_parms_fmtr(const struct fn_stats *stats) { struct function *fn = tag__function(stats->tag); printf("%s: %u\n", function__name(fn, stats->cu), fn->proto.nr_parms); } static void fn_stats_name_len_fmtr(const struct fn_stats *stats) { struct function *fn = tag__function(stats->tag); const char *name = function__name(fn, stats->cu); printf("%s: %zd\n", name, strlen(name)); } static void fn_stats_size_fmtr(const struct fn_stats *stats) { struct function *fn = tag__function(stats->tag); const size_t size = function__size(fn); if (size != 0) printf("%s: %zd\n", function__name(fn, stats->cu), size); } static void fn_stats_fmtr(const struct fn_stats *stats) { if (verbose || show_prototypes) { tag__fprintf(stats->tag, stats->cu, &conf, stdout); putchar('\n'); if (show_prototypes) return; if (show_variables || show_inline_expansions) function__fprintf_stats(stats->tag, stats->cu, &conf, stdout); printf("/* definitions: %u */\n", stats->nr_files); putchar('\n'); } else { struct function *fn = tag__function(stats->tag); puts(function__name(fn, stats->cu)); } } static void print_fn_stats(void (*formatter)(const struct fn_stats *f)) { struct fn_stats *pos; list_for_each_entry(pos, &fn_stats__list, node) formatter(pos); } static void fn_stats_inline_stats_fmtr(const struct fn_stats *stats) { if (stats->nr_expansions > 1) printf("%-31.31s %6u %7u %6u %6u\n", function__name(tag__function(stats->tag), stats->cu), stats->size_expansions, stats->nr_expansions, stats->size_expansions / stats->nr_expansions, stats->nr_files); } static void print_total_inline_stats(void) { printf("%-32.32s %5.5s / %5.5s = %5.5s %s\n", "name", "totsz", "exp#", "avgsz", "src#"); print_fn_stats(fn_stats_inline_stats_fmtr); } static void fn_stats__dupmsg(struct function *func, const struct cu *func_cu, struct function *dup __unused, const struct cu *dup_cu, char *hdr, const char *fmt, ...) { va_list args; if (!*hdr) printf("function: %s\nfirst: %s\ncurrent: %s\n", function__name(func, func_cu), func_cu->name, dup_cu->name); va_start(args, fmt); vprintf(fmt, args); va_end(args); *hdr = 1; } static void fn_stats__chkdupdef(struct function *func, const struct cu *func_cu, struct function *dup, const struct cu *dup_cu) { char hdr = 0; const size_t func_size = function__size(func); const size_t dup_size = function__size(dup); if (func_size != dup_size) fn_stats__dupmsg(func, func_cu, dup, dup_cu, &hdr, "size: %zd != %zd\n", func_size, dup_size); if (func->proto.nr_parms != dup->proto.nr_parms) fn_stats__dupmsg(func, func_cu, dup, dup_cu, &hdr, "nr_parms: %u != %u\n", func->proto.nr_parms, dup->proto.nr_parms); /* XXX put more checks here: member types, member ordering, etc */ if (hdr) putchar('\n'); } static bool function__filter(struct function *function, struct cu *cu) { struct fn_stats *fstats; const char *name; if (!function__tag(function)->top_level) return true; /* * FIXME: remove this check and try to fix the parameter abstract * origin code someday... */ if (!function->name) return true; name = function__name(function, cu); if (show_externals && !function->external) return true; if (show_cc_uninlined && function->inlined != DW_INL_declared_not_inlined) return true; if (show_cc_inlined && function->inlined != DW_INL_inlined) return true; fstats = fn_stats__find(name); if (fstats != NULL) { struct function *fn = tag__function(fstats->tag); if (!fn->external) return false; if (verbose) fn_stats__chkdupdef(fn, fstats->cu, function, cu); fstats->nr_expansions += function->cu_total_nr_inline_expansions; fstats->size_expansions += function->cu_total_size_inline_expansions; fstats->nr_files++; return true; } return false; } static int cu_unique_iterator(struct cu *cu, void *cookie __unused) { cu__account_inline_expansions(cu); struct function *pos; uint32_t id; cu__for_each_function(cu, id, pos) if (!function__filter(pos, cu)) fn_stats__add(function__tag(pos), cu); return 0; } static int cu_class_iterator(struct cu *cu, void *cookie) { type_id_t target_id; struct tag *target = cu__find_struct_by_name(cu, cookie, 0, &target_id); if (target == NULL) return 0; struct function *pos; uint32_t id; cu__for_each_function(cu, id, pos) { if (pos->inlined || !ftype__has_parm_of_type(&pos->proto, target_id, cu)) continue; if (verbose) tag__fprintf(function__tag(pos), cu, &conf, stdout); else fputs(function__name(pos, cu), stdout); putchar('\n'); } return 0; } static int function__emit_type_definitions(struct function *func, struct cu *cu, FILE *fp) { struct parameter *pos; struct tag *type = cu__type(cu, func->proto.tag.type); retry_return_type: /* type == NULL means the return is void */ if (type == NULL) goto do_parameters; if (tag__is_pointer(type) || tag__is_modifier(type)) { type = cu__type(cu, type->type); goto retry_return_type; } if (tag__is_type(type) && !tag__type(type)->definition_emitted) { type__emit_definitions(type, cu, &emissions, fp); type__emit(type, cu, NULL, NULL, fp); } do_parameters: function__for_each_parameter(func, pos) { type = cu__type(cu, pos->tag.type); try_again: if (type == NULL) continue; if (tag__is_pointer(type) || tag__is_modifier(type)) { type = cu__type(cu, type->type); goto try_again; } if (type->tag == DW_TAG_subroutine_type) { ftype__emit_definitions(tag__ftype(type), cu, &emissions, fp); } else if (tag__is_type(type) && !tag__type(type)->definition_emitted) { type__emit_definitions(type, cu, &emissions, fp); if (!tag__is_typedef(type)) type__emit(type, cu, NULL, NULL, fp); putchar('\n'); } } return 0; } static void function__show(struct function *func, struct cu *cu) { struct tag *tag = function__tag(func); if (func->abstract_origin || func->external) return; if (expand_types) function__emit_type_definitions(func, cu, stdout); tag__fprintf(tag, cu, &conf, stdout); if (compilable_output) { struct tag *type = cu__type(cu, func->proto.tag.type); fprintf(stdout, "\n{"); if (type != NULL) { /* NULL == void */ if (tag__is_pointer(type)) fprintf(stdout, "\n\treturn (void *)0;"); else if (tag__is_struct(type)) fprintf(stdout, "\n\treturn *(struct %s *)1;", class__name(tag__class(type), cu)); else if (tag__is_union(type)) fprintf(stdout, "\n\treturn *(union %s *)1;", type__name(tag__type(type), cu)); else if (tag__is_typedef(type)) fprintf(stdout, "\n\treturn *(%s *)1;", type__name(tag__type(type), cu)); else fprintf(stdout, "\n\treturn 0;"); } fprintf(stdout, "\n}\n"); } putchar('\n'); if (show_variables || show_inline_expansions) function__fprintf_stats(tag, cu, &conf, stdout); } static int cu_function_iterator(struct cu *cu, void *cookie) { struct function *function; uint32_t id; cu__for_each_function(cu, id, function) { if (cookie && strcmp(function__name(function, cu), cookie) != 0) continue; function__show(function, cu); if (!expand_types) return 1; } return 0; } int elf_symtab__show(char *filename) { int fd = open(filename, O_RDONLY), err = -1; if (fd < 0) return -1; if (elf_version(EV_CURRENT) == EV_NONE) { fprintf(stderr, "%s: cannot set libelf version.\n", __func__); goto out_close; } Elf *elf = elf_begin(fd, ELF_C_READ_MMAP, NULL); if (elf == NULL) { fprintf(stderr, "%s: cannot read %s ELF file.\n", __func__, filename); goto out_close; } GElf_Ehdr ehdr; if (gelf_getehdr(elf, &ehdr) == NULL) { fprintf(stderr, "%s: cannot get elf header.\n", __func__); goto out_elf_end; } struct elf_symtab *symtab = elf_symtab__new(symtab_name, elf, &ehdr); if (symtab == NULL) goto out_elf_end; GElf_Sym sym; uint32_t index; int longest_name = 0; elf_symtab__for_each_symbol(symtab, index, sym) { if (!elf_sym__is_local_function(&sym)) continue; int len = strlen(elf_sym__name(&sym, symtab)); if (len > longest_name) longest_name = len; } if (longest_name > 32) longest_name = 32; int index_spacing = 0; int nr = elf_symtab__nr_symbols(symtab); while (nr) { ++index_spacing; nr /= 10; } elf_symtab__for_each_symbol(symtab, index, sym) { if (!elf_sym__is_local_function(&sym)) continue; printf("%*d: %-*s %#llx %5u\n", index_spacing, index, longest_name, elf_sym__name(&sym, symtab), (unsigned long long)elf_sym__value(&sym), elf_sym__size(&sym)); } elf_symtab__delete(symtab); err = 0; out_elf_end: elf_end(elf); out_close: close(fd); return err; } int elf_symtabs__show(char *filenames[]) { int i = 0; while (filenames[i] != NULL) { if (elf_symtab__show(filenames[i])) return EXIT_FAILURE; ++i; } return EXIT_SUCCESS; } /* Name and version of program. */ ARGP_PROGRAM_VERSION_HOOK_DEF = dwarves_print_version; #define ARGP_symtab 300 #define ARGP_no_parm_names 301 #define ARGP_compile 302 static const struct argp_option pfunct__options[] = { { .key = 'a', .name = "addr", .arg = "ADDR", .doc = "show just the function that where ADDR is", }, { .key = 'b', .name = "expand_types", .doc = "Expand types needed by the prototype", }, { .key = 'c', .name = "class", .arg = "CLASS", .doc = "functions that have CLASS pointer parameters", }, { .key = 'E', .name = "externals", .doc = "show just external functions", }, { .key = 'f', .name = "function", .arg = "FUNCTION", .doc = "show just FUNCTION", }, { .name = "format_path", .key = 'F', .arg = "FORMAT_LIST", .doc = "List of debugging formats to try" }, { .key = 'g', .name = "goto_labels", .doc = "show number of goto labels", }, { .key = 'G', .name = "cc_uninlined", .doc = "declared inline, uninlined by compiler", }, { .key = 'H', .name = "cc_inlined", .doc = "not declared inline, inlined by compiler", }, { .key = 'i', .name = "inline_expansions", .doc = "show inline expansions", }, { .key = 'I', .name = "inline_expansions_stats", .doc = "show inline expansions stats", }, { .key = 'l', .name = "decl_info", .doc = "show source code info", }, { .key = 't', .name = "total_inline_stats", .doc = "show Multi-CU total inline expansions stats", }, { .key = 's', .name = "sizes", .doc = "show size of functions", }, { .key = 'N', .name = "function_name_len", .doc = "show size of functions names", }, { .key = 'p', .name = "nr_parms", .doc = "show number of parameters", }, { .key = 'P', .name = "prototypes", .doc = "show function prototypes", }, { .key = 'S', .name = "nr_variables", .doc = "show number of variables", }, { .key = 'T', .name = "variables", .doc = "show variables", }, { .key = 'V', .name = "verbose", .doc = "be verbose", }, { .name = "symtab", .key = ARGP_symtab, .arg = "NAME", .flags = OPTION_ARG_OPTIONAL, .doc = "show symbol table NAME (Default .symtab)", }, { .name = "compile", .key = ARGP_compile, .arg = "FUNCTION", .flags = OPTION_ARG_OPTIONAL, .doc = "Generate compilable source code with types expanded (Default all functions)", }, { .name = "no_parm_names", .key = ARGP_no_parm_names, .doc = "Don't show parameter names", }, { .name = NULL, } }; static void (*formatter)(const struct fn_stats *f) = fn_stats_fmtr; static char *class_name; static char *function_name; static int show_total_inline_expansion_stats; static error_t pfunct__options_parser(int key, char *arg, struct argp_state *state) { switch (key) { case ARGP_KEY_INIT: if (state->child_inputs != NULL) state->child_inputs[0] = state->input; break; case 'a': addr = strtoull(arg, NULL, 0); conf_load.get_addr_info = true; break; case 'b': expand_types = true; type_emissions__init(&emissions); break; case 'c': class_name = arg; break; case 'f': function_name = arg; break; case 'F': conf_load.format_path = arg; break; case 'E': show_externals = 1; break; case 's': formatter = fn_stats_size_fmtr; conf_load.get_addr_info = true; break; case 'S': formatter = fn_stats_variables_fmtr; break; case 'p': formatter = fn_stats_nr_parms_fmtr; break; case 'P': show_prototypes = true; break; case 'g': formatter = fn_stats_labels_fmtr; break; case 'G': show_cc_uninlined = 1; break; case 'H': show_cc_inlined = 1; break; case 'i': show_inline_expansions = verbose = 1; conf_load.extra_dbg_info = true; conf_load.get_addr_info = true; break; case 'I': formatter = fn_stats_inline_exps_fmtr; conf_load.get_addr_info = true; break; case 'l': conf.show_decl_info = 1; conf_load.extra_dbg_info = 1; break; case 't': show_total_inline_expansion_stats = true; conf_load.get_addr_info = true; break; case 'T': show_variables = 1; break; case 'N': formatter = fn_stats_name_len_fmtr; break; case 'V': verbose = 1; conf_load.extra_dbg_info = true; conf_load.get_addr_info = true; break; case ARGP_symtab: symtab_name = arg ?: ".symtab"; break; case ARGP_no_parm_names: conf.no_parm_names = 1; break; case ARGP_compile: expand_types = true; type_emissions__init(&emissions); compilable_output = true; conf.no_semicolon = true; conf.strip_inline = true; if (arg) function_name = arg; break; default: return ARGP_ERR_UNKNOWN; } return 0; } static const char pfunct__args_doc[] = "FILE"; static struct argp pfunct__argp = { .options = pfunct__options, .parser = pfunct__options_parser, .args_doc = pfunct__args_doc, }; int main(int argc, char *argv[]) { int err, remaining, rc = EXIT_FAILURE; if (argp_parse(&pfunct__argp, argc, argv, 0, &remaining, NULL) || (remaining == argc && class_name == NULL && function_name == NULL)) { argp_help(&pfunct__argp, stderr, ARGP_HELP_SEE, argv[0]); goto out; } if (symtab_name != NULL) return elf_symtabs__show(argv + remaining); if (dwarves__init(0)) { fputs("pfunct: insufficient memory\n", stderr); goto out; } struct cus *cus = cus__new(); if (cus == NULL) { fputs("pfunct: insufficient memory\n", stderr); goto out_dwarves_exit; } err = cus__load_files(cus, &conf_load, argv + remaining); if (err != 0) { cus__fprintf_load_files_err(cus, "pfunct", argv + remaining, err, stderr); goto out_cus_delete; } cus__for_each_cu(cus, cu_unique_iterator, NULL, NULL); if (addr) { struct cu *cu; struct function *f = cus__find_function_at_addr(cus, addr, &cu); if (f == NULL) { fprintf(stderr, "pfunct: No function found at %#llx!\n", (unsigned long long)addr); goto out_cus_delete; } function__show(f, cu); } else if (show_total_inline_expansion_stats) print_total_inline_stats(); else if (class_name != NULL) cus__for_each_cu(cus, cu_class_iterator, class_name, NULL); else if (function_name != NULL || expand_types) cus__for_each_cu(cus, cu_function_iterator, function_name, NULL); else print_fn_stats(formatter); rc = EXIT_SUCCESS; out_cus_delete: cus__delete(cus); fn_stats__delete_list(); out_dwarves_exit: dwarves__exit(); out: return rc; } dwarves-dfsg-1.15/pglobal.c000066400000000000000000000142671350511416500156330ustar00rootroot00000000000000/* * SPDX-License-Identifier: GPL-2.0-only * * Copyright (C) 2007 Davi E. M. Arnaut */ #include #include #include #include #include #include #include #include "dwarves.h" #include "dutil.h" static int verbose; struct extvar { struct extvar *next; const char *name; const struct variable *var; const struct cu *cu; }; struct extfun { struct extfun *next; const char *name; const struct function *fun; const struct cu *cu; }; static void *tree; static void oom(const char *msg) { fprintf(stderr, "pglobal: out of memory (%s)\n", msg); exit(EXIT_FAILURE); } static struct extvar *extvar__new(const struct variable *var, const struct cu *cu) { struct extvar *gvar = malloc(sizeof(*gvar)); if (gvar != NULL) { gvar->next = NULL; gvar->var = var; gvar->cu = cu; gvar->name = variable__name(var, cu); } return gvar; } static struct extfun *extfun__new(struct function *fun, const struct cu *cu) { struct extfun *gfun = malloc(sizeof(*gfun)); if (gfun != NULL) { gfun->next = NULL; gfun->fun = fun; gfun->cu = cu; gfun->name = function__name(fun, cu); } return gfun; } static int extvar__compare(const void *a, const void *b) { const struct extvar *ga = a, *gb = b; return strcmp(ga->name, gb->name); } static int extfun__compare(const void *a, const void *b) { const struct extfun *ga = a, *gb = b; return strcmp(ga->name, gb->name); } static void extvar__add(const struct variable *var, const struct cu *cu) { struct extvar **nodep, *gvar = extvar__new(var, cu); if (gvar != NULL) { nodep = tsearch(gvar, &tree, extvar__compare); if (nodep == NULL) oom("tsearch"); else if (*nodep != gvar) if (gvar->var->declaration) { gvar->next = (*nodep)->next; (*nodep)->next = gvar; } else { gvar->next = *nodep; *nodep = gvar; } } } static void extfun__add(struct function *fun, const struct cu *cu) { struct extfun **nodep, *gfun = extfun__new(fun, cu); if (gfun != NULL) { nodep = tsearch(gfun, &tree, extfun__compare); if (nodep == NULL) oom("tsearch"); else if (*nodep != gfun) { gfun->next = (*nodep)->next; (*nodep)->next = gfun; } } } static int cu_extvar_iterator(struct cu *cu, void *cookie __unused) { struct tag *pos; uint32_t id; cu__for_each_variable(cu, id, pos) { struct variable *var = tag__variable(pos); if (var->external) extvar__add(var, cu); } return 0; } static int cu_extfun_iterator(struct cu *cu, void *cookie __unused) { struct function *pos; uint32_t id; cu__for_each_function(cu, id, pos) if (pos->external) extfun__add(pos, cu); return 0; } static inline const struct extvar *node__variable(const void *nodep) { return *((const struct extvar **)nodep); } static inline const struct extfun *node__function(const void *nodep) { return *((const struct extfun **)nodep); } static inline struct tag *extvar__tag(const struct extvar *gvar) { return (struct tag *)gvar->var; } static inline struct tag *extfun__tag(const struct extfun *gfun) { return (struct tag *)gfun->fun; } static void declaration_action__walk(const void *nodep, const VISIT which, const int depth __unused) { uint32_t count = 0; struct tag *tag; const struct extvar *pos, *gvar = NULL; switch(which) { case preorder: break; case postorder: gvar = node__variable(nodep); break; case endorder: break; case leaf: gvar = node__variable(nodep); break; } if (gvar == NULL) return; tag = extvar__tag(gvar); tag__fprintf(tag, gvar->cu, NULL, stdout); for (pos = gvar->next; pos; pos = pos->next) count++; printf("; /* %u */\n\n", count); } static void function_action__walk(const void *nodep, const VISIT which, const int depth __unused) { struct tag *tag; const struct extfun *gfun = NULL; switch(which) { case preorder: break; case postorder: gfun = node__function(nodep); break; case endorder: break; case leaf: gfun = node__function(nodep); break; } if (gfun == NULL) return; tag = extfun__tag(gfun); tag__fprintf(tag, gfun->cu, NULL, stdout); fputs("\n\n", stdout); } static void free_node(void *nodep) { void **node = nodep; free(*node); } /* Name and version of program. */ ARGP_PROGRAM_VERSION_HOOK_DEF = dwarves_print_version; static const struct argp_option pglobal__options[] = { { .key = 'v', .name = "variables", .doc = "show global variables", }, { .key = 'f', .name = "functions", .doc = "show global functions", }, { .key = 'V', .name = "verbose", .doc = "be verbose", }, { .name = NULL, } }; static int walk_var, walk_fun; static error_t pglobal__options_parser(int key, char *arg __unused, struct argp_state *state) { switch (key) { case ARGP_KEY_INIT: if (state->child_inputs != NULL) state->child_inputs[0] = state->input; break; case 'v': walk_var = 1; break; case 'f': walk_fun = 1; break; case 'V': verbose = 1; break; default: return ARGP_ERR_UNKNOWN; } return 0; } static const char pglobal__args_doc[] = "FILE"; static struct argp pglobal__argp = { .options = pglobal__options, .parser = pglobal__options_parser, .args_doc = pglobal__args_doc, }; int main(int argc, char *argv[]) { int err, remaining, rc = EXIT_FAILURE; if (argp_parse(&pglobal__argp, argc, argv, 0, &remaining, NULL) || remaining == argc) { argp_help(&pglobal__argp, stderr, ARGP_HELP_SEE, argv[0]); goto out; } if (dwarves__init(0)) { fputs("pglobal: insufficient memory\n", stderr); goto out; } struct cus *cus = cus__new(); if (cus == NULL) { fputs("pglobal: insufficient memory\n", stderr); goto out_dwarves_exit; } err = cus__load_files(cus, NULL, argv + remaining); if (err != 0) { cus__fprintf_load_files_err(cus, "pglobal", argv + remaining, err, stderr); goto out_cus_delete; } if (walk_var) { cus__for_each_cu(cus, cu_extvar_iterator, NULL, NULL); twalk(tree, declaration_action__walk); } else if (walk_fun) { cus__for_each_cu(cus, cu_extfun_iterator, NULL, NULL); twalk(tree, function_action__walk); } tdestroy(tree, free_node); rc = EXIT_SUCCESS; out_cus_delete: cus__delete(cus); out_dwarves_exit: dwarves__exit(); out: return rc; } dwarves-dfsg-1.15/prefcnt.c000066400000000000000000000067531350511416500156550ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only Copyright (C) 2006 Mandriva Conectiva S.A. Copyright (C) 2006 Arnaldo Carvalho de Melo */ #include #include #include #include #include #include "dwarves.h" #include "dutil.h" static void refcnt_tag(struct tag *tag, const struct cu *cu); static void refcnt_member(struct class_member *member, const struct cu *cu) { if (member->visited) return; member->visited = 1; if (member->tag.type != 0) { /* if not void */ struct tag *type = cu__type(cu, member->tag.type); if (type != NULL) refcnt_tag(type, cu); } } static void refcnt_parameter(const struct parameter *parameter, const struct cu *cu) { if (parameter->tag.type != 0) { /* if not void */ struct tag *type = cu__type(cu, parameter->tag.type); if (type != NULL) refcnt_tag(type, cu); } } static void refcnt_variable(const struct variable *variable, const struct cu *cu) { if (variable->ip.tag.type != 0) { /* if not void */ struct tag *type = cu__type(cu, variable->ip.tag.type); if (type != NULL) refcnt_tag(type, cu); } } static void refcnt_inline_expansion(const struct inline_expansion *exp, const struct cu *cu) { if (exp->ip.tag.type != 0) { /* if not void */ struct tag *type = cu__function(cu, exp->ip.tag.type); if (type != NULL) refcnt_tag(type, cu); } } static void refcnt_tag(struct tag *tag, const struct cu *cu) { struct class_member *member; tag->visited = 1; if (tag__is_struct(tag) || tag__is_union(tag)) type__for_each_member(tag__type(tag), member) refcnt_member(member, cu); } static void refcnt_lexblock(const struct lexblock *lexblock, const struct cu *cu) { struct tag *pos; list_for_each_entry(pos, &lexblock->tags, node) switch (pos->tag) { case DW_TAG_variable: refcnt_variable(tag__variable(pos), cu); break; case DW_TAG_inlined_subroutine: refcnt_inline_expansion(tag__inline_expansion(pos), cu); break; case DW_TAG_lexical_block: refcnt_lexblock(tag__lexblock(pos), cu); break; } } static void refcnt_function(struct function *function, const struct cu *cu) { struct parameter *parameter; function->proto.tag.visited = 1; if (function->proto.tag.type != 0) /* if not void */ { struct tag *type = cu__type(cu, function->proto.tag.type); if (type != NULL) refcnt_tag(type, cu); } list_for_each_entry(parameter, &function->proto.parms, tag.node) refcnt_parameter(parameter, cu); refcnt_lexblock(&function->lexblock, cu); } static int cu_refcnt_iterator(struct cu *cu, void *cookie __unused) { struct function *pos; uint32_t id; cu__for_each_function(cu, id, pos) refcnt_function(pos, cu); return 0; } static int lost_iterator(struct tag *tag, struct cu *cu, void *cookie __unused) { if (!tag->visited && tag__decl_file(tag, cu)) { tag__fprintf(tag, cu, NULL, stdout); puts(";\n"); } return 0; } static int cu_lost_iterator(struct cu *cu, void *cookie) { return cu__for_all_tags(cu, lost_iterator, cookie); } int main(int argc __unused, char *argv[]) { int err; struct cus *cus = cus__new(); if (dwarves__init(0) || cus == NULL) { fputs("prefcnt: insufficient memory\n", stderr); return EXIT_FAILURE; } err = cus__load_files(cus, NULL, argv + 1); if (err != 0) { cus__fprintf_load_files_err(cus, "prefcnt", argv + 1, err, stderr); return EXIT_FAILURE; } cus__for_each_cu(cus, cu_refcnt_iterator, NULL, NULL); cus__for_each_cu(cus, cu_lost_iterator, NULL, NULL); return EXIT_SUCCESS; } dwarves-dfsg-1.15/rbtree.c000066400000000000000000000172231350511416500154710ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-or-later Red Black Trees (C) 1999 Andrea Arcangeli (C) 2002 David Woodhouse linux/lib/rbtree.c */ #include "rbtree.h" static void __rb_rotate_left(struct rb_node *node, struct rb_root *root) { struct rb_node *right = node->rb_right; struct rb_node *parent = rb_parent(node); if ((node->rb_right = right->rb_left)) rb_set_parent(right->rb_left, node); right->rb_left = node; rb_set_parent(right, parent); if (parent) { if (node == parent->rb_left) parent->rb_left = right; else parent->rb_right = right; } else root->rb_node = right; rb_set_parent(node, right); } static void __rb_rotate_right(struct rb_node *node, struct rb_root *root) { struct rb_node *left = node->rb_left; struct rb_node *parent = rb_parent(node); if ((node->rb_left = left->rb_right)) rb_set_parent(left->rb_right, node); left->rb_right = node; rb_set_parent(left, parent); if (parent) { if (node == parent->rb_right) parent->rb_right = left; else parent->rb_left = left; } else root->rb_node = left; rb_set_parent(node, left); } void rb_insert_color(struct rb_node *node, struct rb_root *root) { struct rb_node *parent, *gparent; while ((parent = rb_parent(node)) && rb_is_red(parent)) { gparent = rb_parent(parent); if (parent == gparent->rb_left) { { register struct rb_node *uncle = gparent->rb_right; if (uncle && rb_is_red(uncle)) { rb_set_black(uncle); rb_set_black(parent); rb_set_red(gparent); node = gparent; continue; } } if (parent->rb_right == node) { register struct rb_node *tmp; __rb_rotate_left(parent, root); tmp = parent; parent = node; node = tmp; } rb_set_black(parent); rb_set_red(gparent); __rb_rotate_right(gparent, root); } else { { register struct rb_node *uncle = gparent->rb_left; if (uncle && rb_is_red(uncle)) { rb_set_black(uncle); rb_set_black(parent); rb_set_red(gparent); node = gparent; continue; } } if (parent->rb_left == node) { register struct rb_node *tmp; __rb_rotate_right(parent, root); tmp = parent; parent = node; node = tmp; } rb_set_black(parent); rb_set_red(gparent); __rb_rotate_left(gparent, root); } } rb_set_black(root->rb_node); } static void __rb_erase_color(struct rb_node *node, struct rb_node *parent, struct rb_root *root) { struct rb_node *other; while ((!node || rb_is_black(node)) && node != root->rb_node) { if (parent->rb_left == node) { other = parent->rb_right; if (rb_is_red(other)) { rb_set_black(other); rb_set_red(parent); __rb_rotate_left(parent, root); other = parent->rb_right; } if ((!other->rb_left || rb_is_black(other->rb_left)) && (!other->rb_right || rb_is_black(other->rb_right))) { rb_set_red(other); node = parent; parent = rb_parent(node); } else { if (!other->rb_right || rb_is_black(other->rb_right)) { rb_set_black(other->rb_left); rb_set_red(other); __rb_rotate_right(other, root); other = parent->rb_right; } rb_set_color(other, rb_color(parent)); rb_set_black(parent); rb_set_black(other->rb_right); __rb_rotate_left(parent, root); node = root->rb_node; break; } } else { other = parent->rb_left; if (rb_is_red(other)) { rb_set_black(other); rb_set_red(parent); __rb_rotate_right(parent, root); other = parent->rb_left; } if ((!other->rb_left || rb_is_black(other->rb_left)) && (!other->rb_right || rb_is_black(other->rb_right))) { rb_set_red(other); node = parent; parent = rb_parent(node); } else { if (!other->rb_left || rb_is_black(other->rb_left)) { rb_set_black(other->rb_right); rb_set_red(other); __rb_rotate_left(other, root); other = parent->rb_left; } rb_set_color(other, rb_color(parent)); rb_set_black(parent); rb_set_black(other->rb_left); __rb_rotate_right(parent, root); node = root->rb_node; break; } } } if (node) rb_set_black(node); } void rb_erase(struct rb_node *node, struct rb_root *root) { struct rb_node *child, *parent; int color; if (!node->rb_left) child = node->rb_right; else if (!node->rb_right) child = node->rb_left; else { struct rb_node *old = node, *left; node = node->rb_right; while ((left = node->rb_left) != NULL) node = left; child = node->rb_right; parent = rb_parent(node); color = rb_color(node); if (child) rb_set_parent(child, parent); if (parent == old) { parent->rb_right = child; parent = node; } else parent->rb_left = child; node->rb_parent_color = old->rb_parent_color; node->rb_right = old->rb_right; node->rb_left = old->rb_left; if (rb_parent(old)) { if (rb_parent(old)->rb_left == old) rb_parent(old)->rb_left = node; else rb_parent(old)->rb_right = node; } else root->rb_node = node; rb_set_parent(old->rb_left, node); if (old->rb_right) rb_set_parent(old->rb_right, node); goto color; } parent = rb_parent(node); color = rb_color(node); if (child) rb_set_parent(child, parent); if (parent) { if (parent->rb_left == node) parent->rb_left = child; else parent->rb_right = child; } else root->rb_node = child; color: if (color == RB_BLACK) __rb_erase_color(child, parent, root); } /* * This function returns the first node (in sort order) of the tree. */ struct rb_node *rb_first(const struct rb_root *root) { struct rb_node *n; n = root->rb_node; if (!n) return NULL; while (n->rb_left) n = n->rb_left; return n; } struct rb_node *rb_last(const struct rb_root *root) { struct rb_node *n; n = root->rb_node; if (!n) return NULL; while (n->rb_right) n = n->rb_right; return n; } struct rb_node *rb_next(const struct rb_node *node) { struct rb_node *parent; if (rb_parent(node) == node) return NULL; /* If we have a right-hand child, go down and then left as far as we can. */ if (node->rb_right) { node = node->rb_right; while (node->rb_left) node=node->rb_left; return (struct rb_node *)node; } /* No right-hand children. Everything down and left is smaller than us, so any 'next' node must be in the general direction of our parent. Go up the tree; any time the ancestor is a right-hand child of its parent, keep going up. First time it's a left-hand child of its parent, said parent is our 'next' node. */ while ((parent = rb_parent(node)) && node == parent->rb_right) node = parent; return parent; } struct rb_node *rb_prev(const struct rb_node *node) { struct rb_node *parent; if (rb_parent(node) == node) return NULL; /* If we have a left-hand child, go down and then right as far as we can. */ if (node->rb_left) { node = node->rb_left; while (node->rb_right) node=node->rb_right; return (struct rb_node *)node; } /* No left-hand children. Go up till we find an ancestor which is a right-hand child of its parent */ while ((parent = rb_parent(node)) && node == parent->rb_left) node = parent; return parent; } void rb_replace_node(struct rb_node *victim, struct rb_node *new, struct rb_root *root) { struct rb_node *parent = rb_parent(victim); /* Set the surrounding nodes to point to the replacement */ if (parent) { if (victim == parent->rb_left) parent->rb_left = new; else parent->rb_right = new; } else { root->rb_node = new; } if (victim->rb_left) rb_set_parent(victim->rb_left, new); if (victim->rb_right) rb_set_parent(victim->rb_right, new); /* Copy the pointers/colour from the victim to the replacement */ *new = *victim; } dwarves-dfsg-1.15/rbtree.h000066400000000000000000000110531350511416500154710ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-or-later Red Black Trees (C) 1999 Andrea Arcangeli linux/include/linux/rbtree.h To use rbtrees you'll have to implement your own insert and search cores. This will avoid us to use callbacks and to drop drammatically performances. I know it's not the cleaner way, but in C (not in C++) to get performances and genericity... Some example of insert and search follows here. The search is a plain normal search over an ordered tree. The insert instead must be implemented int two steps: as first thing the code must insert the element in order as a red leaf in the tree, then the support library function rb_insert_color() must be called. Such function will do the not trivial work to rebalance the rbtree if necessary. ----------------------------------------------------------------------- static inline struct page * rb_search_page_cache(struct inode * inode, unsigned long offset) { struct rb_node * n = inode->i_rb_page_cache.rb_node; struct page * page; while (n) { page = rb_entry(n, struct page, rb_page_cache); if (offset < page->offset) n = n->rb_left; else if (offset > page->offset) n = n->rb_right; else return page; } return NULL; } static inline struct page * __rb_insert_page_cache(struct inode * inode, unsigned long offset, struct rb_node * node) { struct rb_node ** p = &inode->i_rb_page_cache.rb_node; struct rb_node * parent = NULL; struct page * page; while (*p) { parent = *p; page = rb_entry(parent, struct page, rb_page_cache); if (offset < page->offset) p = &(*p)->rb_left; else if (offset > page->offset) p = &(*p)->rb_right; else return page; } rb_link_node(node, parent, p); return NULL; } static inline struct page * rb_insert_page_cache(struct inode * inode, unsigned long offset, struct rb_node * node) { struct page * ret; if ((ret = __rb_insert_page_cache(inode, offset, node))) goto out; rb_insert_color(node, &inode->i_rb_page_cache); out: return ret; } ----------------------------------------------------------------------- */ #ifndef _LINUX_RBTREE_H #define _LINUX_RBTREE_H #include /** * container_of - cast a member of a structure out to the containing structure * @ptr: the pointer to the member. * @type: the type of the container struct this is embedded in. * @member: the name of the member within the struct. * */ #define container_of(ptr, type, member) ({ \ const typeof( ((type *)0)->member ) *__mptr = (ptr); \ (type *)( (char *)__mptr - offsetof(type,member) );}) struct rb_node { unsigned long rb_parent_color; #define RB_RED 0 #define RB_BLACK 1 struct rb_node *rb_right; struct rb_node *rb_left; } __attribute__((aligned(sizeof(long)))); /* The alignment might seem pointless, but allegedly CRIS needs it */ struct rb_root { struct rb_node *rb_node; }; #define rb_parent(r) ((struct rb_node *)((r)->rb_parent_color & ~3)) #define rb_color(r) ((r)->rb_parent_color & 1) #define rb_is_red(r) (!rb_color(r)) #define rb_is_black(r) rb_color(r) #define rb_set_red(r) do { (r)->rb_parent_color &= ~1; } while (0) #define rb_set_black(r) do { (r)->rb_parent_color |= 1; } while (0) static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p) { rb->rb_parent_color = (rb->rb_parent_color & 3) | (unsigned long)p; } static inline void rb_set_color(struct rb_node *rb, int color) { rb->rb_parent_color = (rb->rb_parent_color & ~1) | color; } #define RB_ROOT (struct rb_root) { NULL, } #define rb_entry(ptr, type, member) container_of(ptr, type, member) #define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL) #define RB_EMPTY_NODE(node) (rb_parent(node) == node) #define RB_CLEAR_NODE(node) (rb_set_parent(node, node)) extern void rb_insert_color(struct rb_node *, struct rb_root *); extern void rb_erase(struct rb_node *, struct rb_root *); /* Find logical next and previous nodes in a tree */ extern struct rb_node *rb_next(const struct rb_node *); extern struct rb_node *rb_prev(const struct rb_node *); extern struct rb_node *rb_first(const struct rb_root *); extern struct rb_node *rb_last(const struct rb_root *); /* Fast replacement of a single node without remove/rebalance/add/rebalance */ extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, struct rb_root *root); static inline void rb_link_node(struct rb_node * node, struct rb_node * parent, struct rb_node ** rb_link) { node->rb_parent_color = (unsigned long )parent; node->rb_left = node->rb_right = NULL; *rb_link = node; } #endif /* _LINUX_RBTREE_H */ dwarves-dfsg-1.15/regtest000077500000000000000000000201021350511416500154330ustar00rootroot00000000000000#! /usr/bin/python # -*- python -*- # -*- coding: utf-8 -*- # tuna - Application Tuning GUI # Copyright (C) 2009 Arnaldo Carvalho de Melo # Arnaldo Carvalho de Melo # # This application is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; version 2. # # This application is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. import filecmp, getopt, os, posix, signal, sys, tempfile regtest_output_dir = "/media/tb/pahole/regtest/" regtest_obj_dir = "/media/tb/debuginfo/usr/lib/debug/" tools = {"pahole": { "dwarf": "--flat_arrays --show_private_classes --fixup_silly_bitfields --first_obj_only --classes_as_structs" }} all_formats = ("ctf", "dwarf") formats = all_formats len_debug_dir = len(regtest_obj_dir) verbose = 1 # Turn this on when testing CTF generated files use_options = False def diff_file(from_filename, to_filename): fd, diff_filename = tempfile.mkstemp() command = 'diff -up "%s" "%s" > %s' % (from_filename, to_filename, diff_filename) if verbose > 1: print command try: os.system(command) os.system("vim %s" % diff_filename) finally: os.unlink(diff_filename) def dir_has_no_diffs(dirname): return os.access(os.path.join(dirname, ".no_diffs"), os.F_OK) def set_dir_has_no_diffs(dirname): f = file(os.path.join(dirname, ".no_diffs"), "w") f.close() def reset_dir_has_no_diffs(dirname): os.unlink(os.path.join(dirname, ".no_diffs")) def diff_dir(from_dir, to_dir, dir = None, recursive = True): if dir: from_dir = os.path.join(from_dir, dir) to_dir = os.path.join(to_dir, dir) print "\r%-130s" % from_dir sys.stdout.flush() diff = filecmp.dircmp(from_dir, to_dir) if not dir_has_no_diffs(to_dir): diff_files = diff.diff_files if diff_files: diff_files.sort() print "\n %s" % from_dir sys.stdout.flush() for f in diff_files: diff_file(os.path.join(from_dir, f), os.path.join(to_dir, f)) else: set_dir_has_no_diffs(to_dir) if not recursive: return common_dirs = diff.common_dirs if not common_dirs: return common_dirs.sort() for dir in common_dirs: diff_dir(from_dir, to_dir, dir) def do_diff_dwarfs2ctfs(): diff_dir(os.path.join(regtest_output_dir, "after", "pahole", "dwarf"), os.path.join(regtest_output_dir, "after", "pahole", "ctf")) def do_diff_dwarfs(): diff_dir(os.path.join(regtest_output_dir, "before", "pahole", "dwarf"), os.path.join(regtest_output_dir, "after", "pahole", "dwarf")) def do_tool(tool, before_after, format, dirname, fname, prepend_obj_dir = False): if prepend_obj_dir: fname += ".debug" fixed_dirname = dirname else: fixed_dirname = dirname[len_debug_dir:] tool_output_dir = os.path.join(regtest_output_dir, before_after, tool, format, fixed_dirname) obj_path = os.path.join(dirname, fname) if prepend_obj_dir: obj_path = os.path.join(regtest_obj_dir, obj_path) if os.path.islink(obj_path) or os.path.isdir(obj_path): return try: os.makedirs(tool_output_dir) except: pass if dir_has_no_diffs(tool_output_dir): reset_dir_has_no_diffs(tool_output_dir) output_file = os.path.join(tool_output_dir, fname[:-6]) if use_options and tools[tool].has_key(format): options = tools[tool][format] else: options = "" command = '%s -F %s %s %s > "%s"' % (tool, format, options, obj_path, output_file) if verbose > 1: print command sys.stdout.flush() elif verbose > 0: print "%s: %s" % (format, os.path.join(fixed_dirname, fname[:-6])) os.system(command) def do_tool_on_files(arg, dirname, fnames, prepend_obj_dir = False): if dirname.find("/.") >= 0: return tool, before_after = arg for fname in fnames: if not prepend_obj_dir and fname[-6:] != ".debug": continue for format in formats: do_tool(tool, before_after, format, dirname, fname, prepend_obj_dir) def do_tools(before_after): for tool in tools.keys(): os.path.walk(regtest_obj_dir, do_tool_on_files, (tool, before_after)) def do_ctf(dirname, fname, prepend_obj_dir = False): if prepend_obj_dir: fname += ".debug" fixed_dirname = dirname else: fixed_dirname = dirname[len_debug_dir:] obj_path = os.path.join(dirname, fname) if prepend_obj_dir: obj_path = os.path.join(regtest_obj_dir, obj_path) if os.path.islink(obj_path) or os.path.isdir(obj_path): return command = 'pahole -Z "%s" 2> /dev/null' % obj_path if verbose > 1: print command elif verbose > 0: print os.path.join(fixed_dirname, fname[:-6]) os.system(command) def do_ctf_on_files(arg, dirname, fnames, prepend_obj_dir = False): if dirname.find("/.") >= 0: return for fname in fnames: if not prepend_obj_dir and fname[-6:] != ".debug": continue do_ctf(dirname, fname, prepend_obj_dir) def do_ctfs(): os.path.walk(regtest_obj_dir, do_ctf_on_files, None) def sig_exit(sig_number, stack_frame): sys.exit(1) def listdebugs(dirname): fnames = [] for fname in os.listdir(os.path.join(regtest_obj_dir, dirname)): if fname[-6:] != ".debug": continue obj_path = os.path.join(regtest_obj_dir, dirname, fname) if os.path.islink(obj_path) or os.path.isdir(obj_path): continue fnames.append(fname[:-6]) return fnames def usage(): print 'Usage: regtest [OPTIONS]' fmt = '\t%-20s %s' print fmt % ('-h, --help', 'Give this help list') print fmt % ('-a, --after', 'Generate new output') print fmt % ('-b, --before', 'Generate old output') print fmt % ('-c, --ctf_diff', 'Diff between DWARF and CTF for new output') print fmt % ('-C, --ctf_encode', 'Encode CTF into object files') print fmt % ('-d, --diff', 'Diff between old and new output') print fmt % ('-f, --formats', 'formats used (default: %s)' ','.join(formats)) def main(argv): global formats for sig in (signal.SIGHUP, signal.SIGINT, signal.SIGTERM): signal.signal(sig, sig_exit) try: short = "habcCdf:" long = ("help", "after", "before", "ctf_diff", "ctf_encode", "diff", "formats") opts, args = getopt.getopt(sys.argv[1:], short, long) except getopt.GetoptError, err: usage() print str(err) sys.exit(2) for o, a in opts: if o in ("-h", "--help"): usage() return elif o in ("-f", "--formats"): formats = a.split(',') elif o in ("-a", "--after", "-b", "--before", "-c", "--ctf_diff", "-C", "--ctf_encode", "-d", "--diff"): if len(args) > 0: dirname = args[0] if len(args) > 1: fnames = args[1:] elif o in ('-a', '--after', '-b', '--before', '-C', '--ctf_encode'): fnames = listdebugs(dirname) if o in ('-b', '--before', '-a', '--after'): if o in ('-b', '--before'): when = 'before' else: when = 'after' if len(args) > 0: for tool in tools.keys(): arg = (tool, when) do_tool_on_files(arg, dirname, fnames, True) else: do_tools(when) elif o in ('-d', '--diff'): if len(args) > 0: from_dir = os.path.join(regtest_output_dir, "before", "pahole", "dwarf", dirname) to_dir = os.path.join(regtest_output_dir, "after", "pahole", "dwarf", dirname) if len(args) > 1: for fname in fnames: diff_file(os.path.join(from_dir, fname), os.path.join(to_dir, fname)) else: diff_dir(from_dir, to_dir, recursive = False) else: do_diff_dwarfs() elif o in ('-C', 'ctf'): if len(args) > 0: do_ctf_on_files(None, dirname, fnames, True) else: do_ctfs() elif o in ('-c', 'ctf_diff'): if len(args) > 0: from_dir = os.path.join(regtest_output_dir, "after", "pahole", "dwarf", dirname) to_dir = os.path.join(regtest_output_dir, "after", "pahole", "ctf", dirname) if len(args) > 1: for fname in fnames: diff_file(os.path.join(from_dir, fname), os.path.join(to_dir, fname)) else: diff_dir(from_dir, to_dir, recursive = False) else: do_diff_dwarfs2ctfs() if __name__ == '__main__': main(sys.argv) dwarves-dfsg-1.15/rpm/000077500000000000000000000000001350511416500146335ustar00rootroot00000000000000dwarves-dfsg-1.15/rpm/SPECS/000077500000000000000000000000001350511416500155105ustar00rootroot00000000000000dwarves-dfsg-1.15/rpm/SPECS/dwarves.spec000066400000000000000000000267671350511416500200610ustar00rootroot00000000000000%define libname libdwarves %define libver 1 Name: dwarves Version: 1.15 Release: 1%{?dist} License: GPLv2 Summary: Debugging Information Manipulation Tools (pahole & friends) URL: http://acmel.wordpress.com Source: http://fedorapeople.org/~acme/dwarves/%{name}-%{version}.tar.xz BuildRequires: gcc BuildRequires: cmake BuildRequires: zlib-devel BuildRequires: elfutils-devel >= 0.130 %description dwarves is a set of tools that use the debugging information inserted in ELF binaries by compilers such as GCC, used by well known debuggers such as GDB, and more recent ones such as systemtap. Utilities in the dwarves suite include pahole, that can be used to find alignment holes in structs and classes in languages such as C, C++, but not limited to these. It also extracts other information such as CPU cacheline alignment, helping pack those structures to achieve more cache hits. These tools can also be used to encode and read the BTF type information format used with the Linux kernel bpf syscall, using 'pahole -J' and 'pahole -F btf'. A diff like tool, codiff can be used to compare the effects changes in source code generate on the resulting binaries. Another tool is pfunct, that can be used to find all sorts of information about functions, inlines, decisions made by the compiler about inlining, etc. %package -n %{libname}%{libver} Summary: Debugging information processing library %description -n %{libname}%{libver} Debugging information processing library. %package -n %{libname}%{libver}-devel Summary: Debugging information library development files Requires: %{libname}%{libver} = %{version}-%{release} %description -n %{libname}%{libver}-devel Debugging information processing library development files. %prep %setup -q -c -n %{name}-%{version} %build %cmake . make VERBOSE=1 %{?_smp_mflags} %install rm -Rf %{buildroot} make install DESTDIR=%{buildroot} %ldconfig_scriptlets -n %{libname}%{libver} %files %doc README.ctracer %doc README.btf %doc NEWS %{_bindir}/btfdiff %{_bindir}/codiff %{_bindir}/ctracer %{_bindir}/dtagnames %{_bindir}/fullcircle %{_bindir}/pahole %{_bindir}/pdwtags %{_bindir}/pfunct %{_bindir}/pglobal %{_bindir}/prefcnt %{_bindir}/scncopy %{_bindir}/syscse %{_bindir}/ostra-cg %dir %{_datadir}/dwarves/ %dir %{_datadir}/dwarves/runtime/ %dir %{_datadir}/dwarves/runtime/python/ %defattr(0644,root,root,0755) %{_mandir}/man1/pahole.1* %{_datadir}/dwarves/runtime/Makefile %{_datadir}/dwarves/runtime/linux.blacklist.cu %{_datadir}/dwarves/runtime/ctracer_relay.c %{_datadir}/dwarves/runtime/ctracer_relay.h %attr(0755,root,root) %{_datadir}/dwarves/runtime/python/ostra.py* %files -n %{libname}%{libver} %{_libdir}/%{libname}.so.* %{_libdir}/%{libname}_emit.so.* %{_libdir}/%{libname}_reorganize.so.* %files -n %{libname}%{libver}-devel %doc MANIFEST README %{_includedir}/dwarves/btf_encoder.h %{_includedir}/dwarves/config.h %{_includedir}/dwarves/ctf_encoder.h %{_includedir}/dwarves/ctf.h %{_includedir}/dwarves/dutil.h %{_includedir}/dwarves/dwarves.h %{_includedir}/dwarves/dwarves_emit.h %{_includedir}/dwarves/dwarves_reorganize.h %{_includedir}/dwarves/elfcreator.h %{_includedir}/dwarves/elf_symtab.h %{_includedir}/dwarves/gobuffer.h %{_includedir}/dwarves/hash.h %{_includedir}/dwarves/libbtf.h %{_includedir}/dwarves/libctf.h %{_includedir}/dwarves/list.h %{_includedir}/dwarves/rbtree.h %{_includedir}/dwarves/strings.h %{_libdir}/%{libname}.so %{_libdir}/%{libname}_emit.so %{_libdir}/%{libname}_reorganize.so %changelog * Fri May 27 2019 Arnaldo Carvalho de Melo - 1.15-1 - New release: 1.15 - Fix --expand_types/-E segfault - Fixup endless printing named structs inside structs in --expand_types - Avoid NULL deref with num config in __class__fprintf() * Tue Apr 23 2019 Arnaldo Carvalho de Melo - 1.13-1 - New release: 1.13 - Infer __packed__ attributes, i.e. __attribute__((__packed__)) - Support DW_AT_alignment, i.e. __attribute__((__aligned__(N))) - Decode BTF type format and pretty print it - BTF encoding fixes - Use libbpf's BTF deduplication - Support unions as arguments to -C/--class - New 'pfunct --compile' generates compilable output with type definitions * Thu Aug 16 2018 Arnaldo Carvalho de Melo - 1.12-1 - New release: - union member cacheline boundaries for all inner structs - print union member offsets - Document 'pahole --hex' - Encode BTF type format for use with eBPF * Wed Jun 28 2017 Arnaldo Carvalho de Melo - 1.11-1 - New release * Wed May 30 2012 Arnaldo Carvalho de Melo - 1.10-1 - New release * Sat Nov 20 2010 Arnaldo Carvalho de Melo - 1.9-1 - New release * Fri Dec 4 2009 Arnaldo Carvalho de Melo - 1.8-1 - New release * Fri Feb 13 2009 Arnaldo Carvalho de Melo - 1.7-2 - Own /usr/share/dwarves, fixes #473645 * Fri Feb 13 2009 Arnaldo Carvalho de Melo - 1.7-1 - A CTF decoder based on work done by David S. Miller - Handle DW_TAG_class_type, - Add support for showing classes with a prefix - Add support to DW_TAG_ptr_to_member_type - Handle typedef definitions in functions - Print the number of members in a struct/class - Handle the empty base optimization trick (Zero sized C++ class) - codiff detect changes in the prototype even when function size doesn't change - pfunct: Implement --expand_types - Reduce memory consumption by using a strings table - Speed up struct search by name - Several minor bug fixes and infrastructure improvements. - Initial man page for pahole * Mon Feb 11 2008 Arnaldo Carvalho de Melo - 1.6-1 - c83d935a4fd561a3807f520c126c2a61ae1f4d83 - [DWARVES]: Use a hash table for the tags in a CU * Thu Feb 7 2008 Arnaldo Carvalho de Melo - 1.5-1 - c4e49add9e48ff08a8ba4187ea43d795af995136 - PAHOLE: Introduce --defined_in - DWARVES: Another fix for DW_TAG_base_type entries without DW_AT_name - PAHOLE: Cope with DW_TAG_basic_type entries without DW_AT_name - CODIFF: Allow passing /dev/null as one of the files to compare - DWARVES: Allow passing NULL as self to cu__find_ - DWARVES: Fixup usage messages - DWARVES: Find holes in inner, nameless structs - DWARVES: Adopt tag__follow_typedef from pahole - DWARVES: Add some destructors: tag, cu, namespace - CODIFF: Check if the objects are the same when we have build-id - DWARVES: Introduce cu__same_build_id - DWARVES_REORGANIZE: Proper tail padding fixup - DWARVES: Don't search in empty structs - DWARVES: Follow const and volatile tags to its ultimate types - PAHOLE: Add a newline after the --class_dwarf_offset output - PAHOLE: Expose type__find_first_biggest_size_base_type_member - DWARVES: Introduce type__find_first_biggest_size_base_type_member - PAHOLE: Account arrays properly when changing word-size - PAHOLE: Follow typedefs too when resizing unions - PAHOLE: Follow typedefs to find if they are resized structs/unions - PAHOLE: Check if types of struct and union members were already resized - DWARVES_REORGANIZE: Fixup class__fixup_alingment - PAHOLE: Allow changing the architecture word-size - DWARVES_REORGANIZE: Adopt class__add_offsets_from and class__fixup_alignment from ctracer - DWARVES: build id support requires a recent elfutils package * Sat Jan 5 2008 Arnaldo Carvalho de Melo - 1.4-1 - 8e099cf5d1f204e9ea1a9c8c0f1a09a43458d9d3 - codiff fixes * Sun Dec 9 2007 Arnaldo Carvalho de Melo - 1.3-2 - c6c71398cd2481e219ea3ef63f32c6479ba4f08f - SPEC file adjustments to follow http://fedoraproject.org/wiki/Packaging/cmake * Sat Dec 8 2007 Arnaldo Carvalho de Melo - 1.3-1 - c4ee21aa122f51f2601893b2118b7f7902d2f410 - Fixed bitfield byte offset handling, now there are no more BRAIN FART alerts on a x86_64 linux kernel and on an old openbsd kernel image. * Thu Dec 6 2007 Arnaldo Carvalho de Melo - 1.2-1 - 07e0974f2c3798acb8e9a2d06f6b2ece7a01c508 - Fix a patological bitfield case * Thu Dec 6 2007 Arnaldo Carvalho de Melo - 1.1-1 - 2c01420b51e889196b42a204910b46811ab22f1a - ctracer now generates systemtap scripts - Lots of other fixes, see git changelog. * Tue May 8 2007 Arnaldo Carvalho de Melo - 1.0-1 - 161c6712f4ae1b7e2ea50df3a0d5c28310905cec - handle --help, -? --usage on with_executable_option() * Tue May 8 2007 Arnaldo Carvalho de Melo - b8eb5eb214f3897ea6faa3272879baa8bf2573c0 - Fix cus__loadfl detection of --executable * Sun May 6 2007 Arnaldo Carvalho de Melo - 05351ece16e5203717dd21a6fc1ad2e6ff87c203 - libdwarves_emit * Tue Apr 3 2007 Arnaldo Carvalho de Melo - f3c4f527f70053e39b402005107ead6cb10e0b4a - Fix some --reorganize bugs * Mon Apr 2 2007 Arnaldo Carvalho de Melo - 1ec66565a12ce7f197cd40e3901ed6be84935781 - --reorganize improvements - --packable uses --reorganize code to show structs that can be packed by reorganization done with --reorganize. * Fri Mar 30 2007 Arnaldo Carvalho de Melo - fd3542317508d04e8178c5d391385d2aa50d6fb7 - Use libdwfl in all tools that handle just one file, codiff and ctracer still need work and are still using plain libdw. * Sun Feb 25 2007 Arnaldo Carvalho de Melo - 3c148cd84b74b89663febdefab23356952906502 - _snprintf routines changed to _fprintf - codiff shows diffs in number and total size of inline expansions - codiff shows diffs in the number of lexblocks - better alignment in the --expand_types case - CMake improvements * Fri Feb 2 2007 Arnaldo Carvalho de Melo - d37f41df58c375412badf827e24dfc346cea2ff2 - ostra-cg - relay/debugfs - mini-structs - ctracer2ostra - All this in the Makefile * Fri Feb 2 2007 Arnaldo Carvalho de Melo - b7cad1782d683571ffb2601b429ab151bddad5d7 - pglobal, by Davi Arnaut - pahole --show_reorg_steps - Reorganize bitfields in pahole --reorganize * Tue Jan 30 2007 Arnaldo Carvalho de Melo - 8e236f4ca37b8a3d2057f4ede5a14ab1fa99f73c - x86-64 lib install fixes * Tue Jan 30 2007 Arnaldo Carvalho de Melo - 4a4b75e75a6d7f34215d320cc4a9f669b6ba4075 - pahole --reorganize * Mon Jan 29 2007 Arnaldo Carvalho de Melo - 2de67fcaf401ac1e20feca5fa88dfc63fbc4203e - Type expansion! * Sat Jan 27 2007 Arnaldo Carvalho de Melo - 6bf2d2d7707b65e7ca21a13706d8d07824cd6f2f - ctracer improvements, /usr/lib/ctracer/, etc * Fri Jan 26 2007 Arnaldo Carvalho de Melo - c49f2c963425d5c09c429370e10d9af3d7d7fe32 - Emit typedefs of typedef arrays - Detect typedef loops - Fix emission of arrays of structs, unions, etc - use sysconf for the default cacheline size * Wed Jan 18 2007 Arnaldo Carvalho de Melo - fab0db03ea9046893ca110bb2b7d71b764f61033 - pdwtags added * Wed Jan 17 2007 Arnaldo Carvalho de Melo - e3786105c007a39ff3dbfb36a3037e786021e0c6 - First Fedora native build - struct, enum, enum, void typedefs * Sat Jan 13 2007 Arnaldo Carvalho de Melo - 9a413e60a3875980d99817722bf019cba3a24573 - pahole --nr_methods, improvements in tag__print, better support for unions * Fri Jan 12 2007 Arnaldo Carvalho de Melo - a1f5422656a91568a8b4edbcebaae9c1837b5cbd - Support a DW_TAG_reference_type * Fri Jan 12 2007 Arnaldo Carvalho de Melo - 0ad467a32187e1929c14054a0fc7326bc4d235c8 - Added a description * Thu Jan 11 2007 Arnaldo Carvalho de Melo - new release with type not found asserts replaced by error messages * Thu Jan 11 2007 Arnaldo Carvalho de Melo - package created dwarves-dfsg-1.15/scncopy.c000066400000000000000000000053331350511416500156630ustar00rootroot00000000000000/* * SPDX-License-Identifier: GPL-2.0-only * * Copyright 2009 Red Hat, Inc. * * Author: Peter Jones */ #include #include #include #include #include #include #include #include "elfcreator.h" #include "dutil.h" static int should_copy_scn(Elf *elf, GElf_Shdr *shdr, struct strlist *scns) { char *name; size_t shstrndx; if (elf_getshdrstrndx(elf, &shstrndx) < 0) return 0; name = elf_strptr(elf, shstrndx, shdr->sh_name); if (name == NULL) return 0; if (strlist__has_entry(scns, name)) return 1; return 0; } int main(int argc, char *argv[]) { int n; struct strlist *sections; char *infile = NULL, *outfile = NULL; int fd; Elf *elf; Elf_Scn *scn; int copy_all_sections = 0; ElfCreator *ctor; sections = strlist__new(false); for (n = 1; n < argc; n++) { if (!strcmp(argv[n], "-a")) { copy_all_sections = 1; } else if (!strcmp(argv[n], "-s")) { if (n == argc-1) { fprintf(stderr, "Missing argument to -s\n"); return -1; } n++; strlist__add(sections, argv[n]); continue; } else if (!strcmp(argv[n], "-o")) { if (n == argc-1) { fprintf(stderr, "Missing argument to -o\n"); return -1; } n++; outfile = argv[n]; continue; } else if (!strcmp(argv[n], "-?") || !strcmp(argv[n], "--help") || !strcmp(argv[n], "--usage")) { printf("usage: scncopy [-s section0 [[-s section1] ... -s sectionN] | -a ] -o outfile infile\n"); return 0; } else if (n == argc-1) { infile = argv[n]; } else { fprintf(stderr, "usage: pjoc -s section 0 [[-s section1] ... -s sectionN] -o outfile infile\n"); return 1; } } if (!infile || !outfile) { fprintf(stderr, "usage: pjoc -s section 0 [[-s section1] ... -s sectionN] -o outfile infile\n"); return 1; } if (!(fd = open(infile, O_RDONLY))) { fprintf(stderr, "Could not open \"%s\" for reading: %m\n", infile); return 1; } elf_version(EV_CURRENT); if ((elf = elf_begin(fd, ELF_C_READ_MMAP_PRIVATE, NULL)) == NULL) { fprintf(stderr, "cannot get elf descriptor for \"%s\": %s\n", infile, elf_errmsg(-1)); close(fd); return 1; } if (elf_kind(elf) != ELF_K_ELF) { fprintf(stderr, "\"%s\" is not an ELF file\n", infile); err: elf_end(elf); close(fd); return 1; } if ((ctor = elfcreator_begin(outfile, elf)) == NULL) { fprintf(stderr, "could not initialize ELF creator\n"); goto err; } scn = NULL; while ((scn = elf_nextscn(elf, scn)) != NULL) { GElf_Shdr shdr_mem, *shdr; shdr = gelf_getshdr(scn, &shdr_mem); if (shdr == NULL) continue; if (!should_copy_scn(elf, shdr, sections) && !copy_all_sections) continue; elfcreator_copy_scn(ctor, elf, scn); } elfcreator_end(ctor); return 0; } dwarves-dfsg-1.15/strings.c000066400000000000000000000037671350511416500157070ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only Copyright (C) 2008 Arnaldo Carvalho de Melo */ #include "strings.h" #include "gobuffer.h" #include #include #include #include #include #include #include "dutil.h" struct strings *strings__new(void) { struct strings *strs = malloc(sizeof(*strs)); if (strs != NULL) { strs->tree = NULL; gobuffer__init(&strs->gb); } return strs; } static void do_nothing(void *ptr __unused) { } void strings__delete(struct strings *strs) { if (strs == NULL) return; tdestroy(strs->tree, do_nothing); __gobuffer__delete(&strs->gb); free(strs); } static strings_t strings__insert(struct strings *strs, const char *s) { return gobuffer__add(&strs->gb, s, strlen(s) + 1); } struct search_key { struct strings *strs; const char *str; }; static int strings__compare(const void *a, const void *b) { const struct search_key *key = a; return strcmp(key->str, key->strs->gb.entries + (unsigned long)b); } strings_t strings__add(struct strings *strs, const char *str) { unsigned long *s; strings_t index; struct search_key key = { .strs = strs, .str = str, }; if (str == NULL) return 0; s = tsearch(&key, &strs->tree, strings__compare); if (s != NULL) { if (*(struct search_key **)s == (void *)&key) { /* Not found, replace with the right key */ index = strings__insert(strs, str); if (index != 0) *s = (unsigned long)index; else { tdelete(&key, &strs->tree, strings__compare); return 0; } } else /* Found! */ index = *s; } else return 0; return index; } strings_t strings__find(struct strings *strs, const char *str) { strings_t *s; struct search_key key = { .strs = strs, .str = str, }; if (str == NULL) return 0; s = tfind(&key, &strs->tree, strings__compare); return s ? *s : 0; } int strings__cmp(const struct strings *strs, strings_t a, strings_t b) { return a == b ? 0 : strcmp(strings__ptr(strs, a), strings__ptr(strs, b)); } dwarves-dfsg-1.15/strings.h000066400000000000000000000023101350511416500156730ustar00rootroot00000000000000#ifndef _STRINGS_H_ #define _STRINGS_H_ 1 /* SPDX-License-Identifier: GPL-2.0-only Copyright (C) 2008 Arnaldo Carvalho de Melo */ #include "gobuffer.h" typedef unsigned int strings_t; struct strings { void *tree; struct gobuffer gb; }; struct strings *strings__new(void); void strings__delete(struct strings *strings); strings_t strings__add(struct strings *strings, const char *str); strings_t strings__find(struct strings *strings, const char *str); int strings__cmp(const struct strings *strings, strings_t a, strings_t b); static inline const char *strings__ptr(const struct strings *strings, strings_t s) { return gobuffer__ptr(&strings->gb, s); } static inline const char *strings__entries(const struct strings *strings) { return gobuffer__entries(&strings->gb); } static inline unsigned int strings__nr_entries(const struct strings *strings) { return gobuffer__nr_entries(&strings->gb); } static inline strings_t strings__size(const struct strings *strings) { return gobuffer__size(&strings->gb); } static inline const char *strings__compress(struct strings *strings, unsigned int *size) { return gobuffer__compress(&strings->gb, size); } #endif /* _STRINGS_H_ */ dwarves-dfsg-1.15/syscse.c000066400000000000000000000067251350511416500155240ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only Copyright (C) 2007-2016 Arnaldo Carvalho de Melo System call sign extender */ #include #include #include #include #include #include "dwarves.h" #include "dutil.h" static const char *prefix = "sys_"; static size_t prefix_len = 4; static bool filter(struct function *f, struct cu *cu) { if (f->proto.nr_parms != 0) { const char *name = function__name(f, cu); if (strlen(name) > prefix_len && memcmp(name, prefix, prefix_len) == 0) return false; } return true; } static void zero_extend(const int regparm, const struct base_type *bt, struct cu *cu, const char *parm) { const char *instr = "INVALID"; switch (bt->bit_size) { case 32: instr = "sll"; break; case 16: instr = "slw"; break; case 8: instr = "slb"; break; } char bf[64]; printf("\t%s\t$a%d, $a%d, 0" "\t/* zero extend $a%d(%s %s) from %d to 64-bit */\n", instr, regparm, regparm, regparm, base_type__name(bt, cu, bf, sizeof(bf)), parm, bt->bit_size); } static void emit_wrapper(struct function *f, struct cu *cu) { struct parameter *parm; const char *name = function__name(f, cu); int regparm = 0, needs_wrapper = 0; function__for_each_parameter(f, parm) { const type_id_t type_id = parm->tag.type; struct tag *type = cu__type(cu, type_id); tag__assert_search_result(type); if (type->tag == DW_TAG_base_type) { struct base_type *bt = tag__base_type(type); char bf[64]; if (bt->bit_size < 64 && strncmp(base_type__name(bt, cu, bf, sizeof(bf)), "unsigned", 8) == 0) { if (!needs_wrapper) { printf("wrap_%s:\n", name); needs_wrapper = 1; } zero_extend(regparm, bt, cu, parameter__name(parm, cu)); } } ++regparm; } if (needs_wrapper) printf("\tj\t%s\n\n", name); } static int cu__emit_wrapper(struct cu *cu, void *cookie __unused) { struct function *pos; uint32_t id; cu__for_each_function(cu, id, pos) if (!filter(pos, cu)) emit_wrapper(pos, cu); return 0; } static void cus__emit_wrapper(struct cus *cu) { cus__for_each_cu(cu, cu__emit_wrapper, NULL, NULL); } /* Name and version of program. */ ARGP_PROGRAM_VERSION_HOOK_DEF = dwarves_print_version; static const struct argp_option options[] = { { .key = 'p', .name = "prefix", .arg = "PREFIX", .doc = "function prefix", }, { .name = NULL, } }; static error_t options_parser(int key, char *arg, struct argp_state *state) { switch (key) { case ARGP_KEY_INIT: if (state->child_inputs != NULL) state->child_inputs[0] = state->input; break; case 'p': prefix = arg; prefix_len = strlen(prefix); break; default: return ARGP_ERR_UNKNOWN; } return 0; } static const char args_doc[] = "FILE"; static struct argp argp = { .options = options, .parser = options_parser, .args_doc = args_doc, }; int main(int argc, char *argv[]) { int err, remaining; struct cus *cus = cus__new(); if (cus == NULL) { fprintf(stderr, "%s: insufficient memory\n", argv[0]); return EXIT_FAILURE; } if (argp_parse(&argp, argc, argv, 0, &remaining, NULL) || remaining == argc) { argp_help(&argp, stderr, ARGP_HELP_SEE, argv[0]); return EXIT_FAILURE; } err = cus__load_files(cus, NULL, argv + remaining); if (err != 0) { cus__fprintf_load_files_err(cus, "syscse", argv + remaining, err, stderr); return EXIT_FAILURE; } cus__emit_wrapper(cus); return EXIT_SUCCESS; } pax_global_header00006660000000000000000000000064134473041130014512gustar00rootroot0000000000000052 comment=27a5f60a8b6bcb2d460ac03772ae387eba36f2b6 dwarves-dfsg-1.15/lib/bpf/000077500000000000000000000000001344730411300153515ustar00rootroot00000000000000dwarves-dfsg-1.15/lib/bpf/CHECKPOINT-COMMIT000066400000000000000000000000511344730411300177050ustar00rootroot00000000000000dd399ac9e343c7573c47d6820e4a23013c54749d dwarves-dfsg-1.15/lib/bpf/README000066400000000000000000000024061344730411300162330ustar00rootroot00000000000000 This is a mirror of bpf-next linux tree (https://kernel.googlesource.com/pub/scm/linux/kernel/git/bpf/bpf-next) bpf-next/tools/lib/bpf directory plus its supporting header files. The following files will by sync'ed with bpf-next repo: src/ <-> bpf-next/tools/lib/bpf/ include/uapi/linux/bpf_common.h <-> bpf-next/tools/include/uapi/linux/bpf_common.h include/uapi/linux/bpf.h <-> bpf-next/tools/include/uapi/linux/bpf.h include/uapi/linux/btf.h <-> bpf-next/tools/include/uapi/linux/btf.h include/uapi/linux/if_link.h <-> bpf-next/tools/include/uapi/linux/if_link.h include/uapi/linux/if_xdp.h <-> bpf-next/tools/include/uapi/linux/if_xdp.h include/uapi/linux/netlink.h <-> bpf-next/tools/include/uapi/linux/netlink.h include/tools/libc_compat.h <-> bpf-next/tools/include/tools/libc_compat.h Other header files at this repo (include/linux/*.h) are reduced versions of their counterpart files at bpf-next/tools/include/linux/*.h to make compilation successful. Build ===== To build static library libbpf.a: cd src make To build both static libbpf.a and shared libbpf.so libraries in directory build/ and install them together with libbpf headers in a staging directory root/: cd src mkdir build root BUILD_SHARED=y OBJDIR=build DESTDIR=root make install dwarves-dfsg-1.15/lib/bpf/include/000077500000000000000000000000001344730411300167745ustar00rootroot00000000000000dwarves-dfsg-1.15/lib/bpf/include/asm/000077500000000000000000000000001344730411300175545ustar00rootroot00000000000000dwarves-dfsg-1.15/lib/bpf/include/asm/barrier.h000066400000000000000000000002171344730411300213530ustar00rootroot00000000000000/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ #ifndef __ASM_BARRIER_H #define __ASM_BARRIER_H #include #endif dwarves-dfsg-1.15/lib/bpf/include/linux/000077500000000000000000000000001344730411300201335ustar00rootroot00000000000000dwarves-dfsg-1.15/lib/bpf/include/linux/compiler.h000066400000000000000000000020711344730411300221160ustar00rootroot00000000000000/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ #ifndef __LINUX_COMPILER_H #define __LINUX_COMPILER_H #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #define READ_ONCE(x) (*(volatile typeof(x) *)&x) #define WRITE_ONCE(x, v) (*(volatile typeof(x) *)&x) = (v) #define barrier() asm volatile("" ::: "memory") #if defined(__x86_64__) # define smp_rmb() asm volatile("lfence" ::: "memory") # define smp_wmb() asm volatile("sfence" ::: "memory") # define smp_store_release(p, v) \ do { \ barrier(); \ WRITE_ONCE(*p, v); \ } while (0) # define smp_load_acquire(p) \ ({ \ typeof(*p) ___p = READ_ONCE(*p); \ barrier(); \ ___p; \ }) #else # define smp_mb() __sync_synchronize() # define smp_rmb() smp_mb() # define smp_wmb() smp_mb() # define smp_store_release(p, v) \ do { \ smp_mb(); \ WRITE_ONCE(*p, v); \ } while (0) # define smp_load_acquire(p) \ ({ \ typeof(*p) ___p = READ_ONCE(*p); \ smp_mb(); \ ___p; \ }) #endif /* defined(__x86_64__) */ #endif dwarves-dfsg-1.15/lib/bpf/include/linux/err.h000066400000000000000000000011151344730411300210720ustar00rootroot00000000000000/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ #ifndef __LINUX_ERR_H #define __LINUX_ERR_H #include #include #define MAX_ERRNO 4095 #define IS_ERR_VALUE(x) ((x) >= (unsigned long)-MAX_ERRNO) static inline void * ERR_PTR(long error_) { return (void *) error_; } static inline long PTR_ERR(const void *ptr) { return (long) ptr; } static inline bool IS_ERR(const void *ptr) { return IS_ERR_VALUE((unsigned long)ptr); } static inline bool IS_ERR_OR_NULL(const void *ptr) { return (!ptr) || IS_ERR_VALUE((unsigned long)ptr); } #endif dwarves-dfsg-1.15/lib/bpf/include/linux/filter.h000066400000000000000000000045021344730411300215720ustar00rootroot00000000000000/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ #ifndef __LINUX_FILTER_H #define __LINUX_FILTER_H #include #define BPF_ALU64_IMM(OP, DST, IMM) \ ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ .off = 0, \ .imm = IMM }) #define BPF_MOV64_IMM(DST, IMM) \ ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_MOV | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ .off = 0, \ .imm = IMM }) #define BPF_EXIT_INSN() \ ((struct bpf_insn) { \ .code = BPF_JMP | BPF_EXIT, \ .dst_reg = 0, \ .src_reg = 0, \ .off = 0, \ .imm = 0 }) #define BPF_EMIT_CALL(FUNC) \ ((struct bpf_insn) { \ .code = BPF_JMP | BPF_CALL, \ .dst_reg = 0, \ .src_reg = 0, \ .off = 0, \ .imm = ((FUNC) - BPF_FUNC_unspec) }) #define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \ ((struct bpf_insn) { \ .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = OFF, \ .imm = 0 }) #define BPF_STX_MEM(SIZE, DST, SRC, OFF) \ ((struct bpf_insn) { \ .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = OFF, \ .imm = 0 }) #define BPF_MOV64_REG(DST, SRC) \ ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_MOV | BPF_X, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = 0, \ .imm = 0 }) #define BPF_MOV32_IMM(DST, IMM) \ ((struct bpf_insn) { \ .code = BPF_ALU | BPF_MOV | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ .off = 0, \ .imm = IMM }) #define BPF_LD_IMM64_RAW(DST, SRC, IMM) \ ((struct bpf_insn) { \ .code = BPF_LD | BPF_DW | BPF_IMM, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = 0, \ .imm = (__u32) (IMM) }), \ ((struct bpf_insn) { \ .code = 0, \ .dst_reg = 0, \ .src_reg = 0, \ .off = 0, \ .imm = ((__u64) (IMM)) >> 32 }) #define BPF_LD_MAP_FD(DST, MAP_FD) \ BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD) #define BPF_JMP_IMM(OP, DST, IMM, OFF) \ ((struct bpf_insn) { \ .code = BPF_JMP | BPF_OP(OP) | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ .off = OFF, \ .imm = IMM }) #endif dwarves-dfsg-1.15/lib/bpf/include/linux/kernel.h000066400000000000000000000017271344730411300215730ustar00rootroot00000000000000/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ #ifndef __LINUX_KERNEL_H #define __LINUX_KERNEL_H #ifndef offsetof #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) #endif #ifndef container_of #define container_of(ptr, type, member) ({ \ const typeof(((type *)0)->member) * __mptr = (ptr); \ (type *)((char *)__mptr - offsetof(type, member)); }) #endif #ifndef max #define max(x, y) ({ \ typeof(x) _max1 = (x); \ typeof(y) _max2 = (y); \ (void) (&_max1 == &_max2); \ _max1 > _max2 ? _max1 : _max2; }) #endif #ifndef min #define min(x, y) ({ \ typeof(x) _min1 = (x); \ typeof(y) _min2 = (y); \ (void) (&_min1 == &_min2); \ _min1 < _min2 ? _min1 : _min2; }) #endif #ifndef roundup #define roundup(x, y) ( \ { \ const typeof(y) __y = y; \ (((x) + (__y - 1)) / __y) * __y; \ } \ ) #endif #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) #define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) #endif dwarves-dfsg-1.15/lib/bpf/include/linux/list.h000066400000000000000000000042171344730411300212630ustar00rootroot00000000000000/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ #ifndef __LINUX_LIST_H #define __LINUX_LIST_H #define LIST_HEAD_INIT(name) { &(name), &(name) } #define LIST_HEAD(name) \ struct list_head name = LIST_HEAD_INIT(name) #define POISON_POINTER_DELTA 0 #define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA) #define LIST_POISON2 ((void *) 0x200 + POISON_POINTER_DELTA) static inline void INIT_LIST_HEAD(struct list_head *list) { list->next = list; list->prev = list; } static inline void __list_add(struct list_head *new, struct list_head *prev, struct list_head *next) { next->prev = new; new->next = next; new->prev = prev; prev->next = new; } /** * list_add - add a new entry * @new: new entry to be added * @head: list head to add it after * * Insert a new entry after the specified head. * This is good for implementing stacks. */ static inline void list_add(struct list_head *new, struct list_head *head) { __list_add(new, head, head->next); } /* * Delete a list entry by making the prev/next entries * point to each other. * * This is only for internal list manipulation where we know * the prev/next entries already! */ static inline void __list_del(struct list_head * prev, struct list_head * next) { next->prev = prev; prev->next = next; } /** * list_del - deletes entry from list. * @entry: the element to delete from the list. * Note: list_empty() on entry does not return true after this, the entry is * in an undefined state. */ static inline void __list_del_entry(struct list_head *entry) { __list_del(entry->prev, entry->next); } static inline void list_del(struct list_head *entry) { __list_del(entry->prev, entry->next); entry->next = LIST_POISON1; entry->prev = LIST_POISON2; } #define list_entry(ptr, type, member) \ container_of(ptr, type, member) #define list_first_entry(ptr, type, member) \ list_entry((ptr)->next, type, member) #define list_next_entry(pos, member) \ list_entry((pos)->member.next, typeof(*(pos)), member) #endif dwarves-dfsg-1.15/lib/bpf/include/linux/overflow.h000066400000000000000000000052611344730411300221530ustar00rootroot00000000000000/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ #ifndef __LINUX_OVERFLOW_H #define __LINUX_OVERFLOW_H #define is_signed_type(type) (((type)(-1)) < (type)1) #define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type))) #define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T))) #define type_min(T) ((T)((T)-type_max(T)-(T)1)) #ifndef unlikely #define unlikely(x) __builtin_expect(!!(x), 0) #endif #ifdef __GNUC__ #define GCC_VERSION (__GNUC__ * 10000 \ + __GNUC_MINOR__ * 100 \ + __GNUC_PATCHLEVEL__) #if GCC_VERSION >= 50100 #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 #endif #endif #ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW #define check_mul_overflow(a, b, d) ({ \ typeof(a) __a = (a); \ typeof(b) __b = (b); \ typeof(d) __d = (d); \ (void) (&__a == &__b); \ (void) (&__a == __d); \ __builtin_mul_overflow(__a, __b, __d); \ }) #else /* * If one of a or b is a compile-time constant, this avoids a division. */ #define __unsigned_mul_overflow(a, b, d) ({ \ typeof(a) __a = (a); \ typeof(b) __b = (b); \ typeof(d) __d = (d); \ (void) (&__a == &__b); \ (void) (&__a == __d); \ *__d = __a * __b; \ __builtin_constant_p(__b) ? \ __b > 0 && __a > type_max(typeof(__a)) / __b : \ __a > 0 && __b > type_max(typeof(__b)) / __a; \ }) /* * Signed multiplication is rather hard. gcc always follows C99, so * division is truncated towards 0. This means that we can write the * overflow check like this: * * (a > 0 && (b > MAX/a || b < MIN/a)) || * (a < -1 && (b > MIN/a || b < MAX/a) || * (a == -1 && b == MIN) * * The redundant casts of -1 are to silence an annoying -Wtype-limits * (included in -Wextra) warning: When the type is u8 or u16, the * __b_c_e in check_mul_overflow obviously selects * __unsigned_mul_overflow, but unfortunately gcc still parses this * code and warns about the limited range of __b. */ #define __signed_mul_overflow(a, b, d) ({ \ typeof(a) __a = (a); \ typeof(b) __b = (b); \ typeof(d) __d = (d); \ typeof(a) __tmax = type_max(typeof(a)); \ typeof(a) __tmin = type_min(typeof(a)); \ (void) (&__a == &__b); \ (void) (&__a == __d); \ *__d = (__u64)__a * (__u64)__b; \ (__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \ (__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \ (__b == (typeof(__b))-1 && __a == __tmin); \ }) #define check_mul_overflow(a, b, d) \ __builtin_choose_expr(is_signed_type(typeof(a)), \ __signed_mul_overflow(a, b, d), \ __unsigned_mul_overflow(a, b, d)) #endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */ #endif dwarves-dfsg-1.15/lib/bpf/include/linux/ring_buffer.h000066400000000000000000000007301344730411300225740ustar00rootroot00000000000000/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ #ifndef _TOOLS_LINUX_RING_BUFFER_H_ #define _TOOLS_LINUX_RING_BUFFER_H_ #include static inline __u64 ring_buffer_read_head(struct perf_event_mmap_page *base) { return smp_load_acquire(&base->data_head); } static inline void ring_buffer_write_tail(struct perf_event_mmap_page *base, __u64 tail) { smp_store_release(&base->data_tail, tail); } #endif /* _TOOLS_LINUX_RING_BUFFER_H_ */ dwarves-dfsg-1.15/lib/bpf/include/linux/types.h000066400000000000000000000011501344730411300214450ustar00rootroot00000000000000/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ #ifndef __LINUX_TYPES_H #define __LINUX_TYPES_H #include #include #include #include #include #define __bitwise__ #define __bitwise __bitwise__ typedef __u16 __bitwise __le16; typedef __u16 __bitwise __be16; typedef __u32 __bitwise __le32; typedef __u32 __bitwise __be32; typedef __u64 __bitwise __le64; typedef __u64 __bitwise __be64; #ifndef __aligned_u64 # define __aligned_u64 __u64 __attribute__((aligned(8))) #endif struct list_head { struct list_head *next, *prev; }; #endif dwarves-dfsg-1.15/lib/bpf/include/tools/000077500000000000000000000000001344730411300201345ustar00rootroot00000000000000dwarves-dfsg-1.15/lib/bpf/include/tools/libc_compat.h000066400000000000000000000007051344730411300225630ustar00rootroot00000000000000// SPDX-License-Identifier: (LGPL-2.0+ OR BSD-2-Clause) /* Copyright (C) 2018 Netronome Systems, Inc. */ #ifndef __TOOLS_LIBC_COMPAT_H #define __TOOLS_LIBC_COMPAT_H #include #include #ifdef COMPAT_NEED_REALLOCARRAY static inline void *reallocarray(void *ptr, size_t nmemb, size_t size) { size_t bytes; if (unlikely(check_mul_overflow(nmemb, size, &bytes))) return NULL; return realloc(ptr, bytes); } #endif #endif dwarves-dfsg-1.15/lib/bpf/include/uapi/000077500000000000000000000000001344730411300177325ustar00rootroot00000000000000dwarves-dfsg-1.15/lib/bpf/include/uapi/linux/000077500000000000000000000000001344730411300210715ustar00rootroot00000000000000dwarves-dfsg-1.15/lib/bpf/include/uapi/linux/bpf.h000066400000000000000000003661031344730411300220220ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. */ #ifndef _UAPI__LINUX_BPF_H__ #define _UAPI__LINUX_BPF_H__ #include #include /* Extended instruction set based on top of classic BPF */ /* instruction classes */ #define BPF_JMP32 0x06 /* jmp mode in word width */ #define BPF_ALU64 0x07 /* alu mode in double word width */ /* ld/ldx fields */ #define BPF_DW 0x18 /* double word (64-bit) */ #define BPF_XADD 0xc0 /* exclusive add */ /* alu/jmp fields */ #define BPF_MOV 0xb0 /* mov reg to reg */ #define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */ /* change endianness of a register */ #define BPF_END 0xd0 /* flags for endianness conversion: */ #define BPF_TO_LE 0x00 /* convert to little-endian */ #define BPF_TO_BE 0x08 /* convert to big-endian */ #define BPF_FROM_LE BPF_TO_LE #define BPF_FROM_BE BPF_TO_BE /* jmp encodings */ #define BPF_JNE 0x50 /* jump != */ #define BPF_JLT 0xa0 /* LT is unsigned, '<' */ #define BPF_JLE 0xb0 /* LE is unsigned, '<=' */ #define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */ #define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */ #define BPF_JSLT 0xc0 /* SLT is signed, '<' */ #define BPF_JSLE 0xd0 /* SLE is signed, '<=' */ #define BPF_CALL 0x80 /* function call */ #define BPF_EXIT 0x90 /* function return */ /* Register numbers */ enum { BPF_REG_0 = 0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5, BPF_REG_6, BPF_REG_7, BPF_REG_8, BPF_REG_9, BPF_REG_10, __MAX_BPF_REG, }; /* BPF has 10 general purpose 64-bit registers and stack frame. */ #define MAX_BPF_REG __MAX_BPF_REG struct bpf_insn { __u8 code; /* opcode */ __u8 dst_reg:4; /* dest register */ __u8 src_reg:4; /* source register */ __s16 off; /* signed offset */ __s32 imm; /* signed immediate constant */ }; /* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */ struct bpf_lpm_trie_key { __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */ __u8 data[0]; /* Arbitrary size */ }; struct bpf_cgroup_storage_key { __u64 cgroup_inode_id; /* cgroup inode id */ __u32 attach_type; /* program attach type */ }; /* BPF syscall commands, see bpf(2) man-page for details. */ enum bpf_cmd { BPF_MAP_CREATE, BPF_MAP_LOOKUP_ELEM, BPF_MAP_UPDATE_ELEM, BPF_MAP_DELETE_ELEM, BPF_MAP_GET_NEXT_KEY, BPF_PROG_LOAD, BPF_OBJ_PIN, BPF_OBJ_GET, BPF_PROG_ATTACH, BPF_PROG_DETACH, BPF_PROG_TEST_RUN, BPF_PROG_GET_NEXT_ID, BPF_MAP_GET_NEXT_ID, BPF_PROG_GET_FD_BY_ID, BPF_MAP_GET_FD_BY_ID, BPF_OBJ_GET_INFO_BY_FD, BPF_PROG_QUERY, BPF_RAW_TRACEPOINT_OPEN, BPF_BTF_LOAD, BPF_BTF_GET_FD_BY_ID, BPF_TASK_FD_QUERY, BPF_MAP_LOOKUP_AND_DELETE_ELEM, }; enum bpf_map_type { BPF_MAP_TYPE_UNSPEC, BPF_MAP_TYPE_HASH, BPF_MAP_TYPE_ARRAY, BPF_MAP_TYPE_PROG_ARRAY, BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_MAP_TYPE_PERCPU_HASH, BPF_MAP_TYPE_PERCPU_ARRAY, BPF_MAP_TYPE_STACK_TRACE, BPF_MAP_TYPE_CGROUP_ARRAY, BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH, BPF_MAP_TYPE_LPM_TRIE, BPF_MAP_TYPE_ARRAY_OF_MAPS, BPF_MAP_TYPE_HASH_OF_MAPS, BPF_MAP_TYPE_DEVMAP, BPF_MAP_TYPE_SOCKMAP, BPF_MAP_TYPE_CPUMAP, BPF_MAP_TYPE_XSKMAP, BPF_MAP_TYPE_SOCKHASH, BPF_MAP_TYPE_CGROUP_STORAGE, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, BPF_MAP_TYPE_QUEUE, BPF_MAP_TYPE_STACK, }; /* Note that tracing related programs such as * BPF_PROG_TYPE_{KPROBE,TRACEPOINT,PERF_EVENT,RAW_TRACEPOINT} * are not subject to a stable API since kernel internal data * structures can change from release to release and may * therefore break existing tracing BPF programs. Tracing BPF * programs correspond to /a/ specific kernel which is to be * analyzed, and not /a/ specific kernel /and/ all future ones. */ enum bpf_prog_type { BPF_PROG_TYPE_UNSPEC, BPF_PROG_TYPE_SOCKET_FILTER, BPF_PROG_TYPE_KPROBE, BPF_PROG_TYPE_SCHED_CLS, BPF_PROG_TYPE_SCHED_ACT, BPF_PROG_TYPE_TRACEPOINT, BPF_PROG_TYPE_XDP, BPF_PROG_TYPE_PERF_EVENT, BPF_PROG_TYPE_CGROUP_SKB, BPF_PROG_TYPE_CGROUP_SOCK, BPF_PROG_TYPE_LWT_IN, BPF_PROG_TYPE_LWT_OUT, BPF_PROG_TYPE_LWT_XMIT, BPF_PROG_TYPE_SOCK_OPS, BPF_PROG_TYPE_SK_SKB, BPF_PROG_TYPE_CGROUP_DEVICE, BPF_PROG_TYPE_SK_MSG, BPF_PROG_TYPE_RAW_TRACEPOINT, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_PROG_TYPE_LWT_SEG6LOCAL, BPF_PROG_TYPE_LIRC_MODE2, BPF_PROG_TYPE_SK_REUSEPORT, BPF_PROG_TYPE_FLOW_DISSECTOR, }; enum bpf_attach_type { BPF_CGROUP_INET_INGRESS, BPF_CGROUP_INET_EGRESS, BPF_CGROUP_INET_SOCK_CREATE, BPF_CGROUP_SOCK_OPS, BPF_SK_SKB_STREAM_PARSER, BPF_SK_SKB_STREAM_VERDICT, BPF_CGROUP_DEVICE, BPF_SK_MSG_VERDICT, BPF_CGROUP_INET4_BIND, BPF_CGROUP_INET6_BIND, BPF_CGROUP_INET4_CONNECT, BPF_CGROUP_INET6_CONNECT, BPF_CGROUP_INET4_POST_BIND, BPF_CGROUP_INET6_POST_BIND, BPF_CGROUP_UDP4_SENDMSG, BPF_CGROUP_UDP6_SENDMSG, BPF_LIRC_MODE2, BPF_FLOW_DISSECTOR, __MAX_BPF_ATTACH_TYPE }; #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE /* cgroup-bpf attach flags used in BPF_PROG_ATTACH command * * NONE(default): No further bpf programs allowed in the subtree. * * BPF_F_ALLOW_OVERRIDE: If a sub-cgroup installs some bpf program, * the program in this cgroup yields to sub-cgroup program. * * BPF_F_ALLOW_MULTI: If a sub-cgroup installs some bpf program, * that cgroup program gets run in addition to the program in this cgroup. * * Only one program is allowed to be attached to a cgroup with * NONE or BPF_F_ALLOW_OVERRIDE flag. * Attaching another program on top of NONE or BPF_F_ALLOW_OVERRIDE will * release old program and attach the new one. Attach flags has to match. * * Multiple programs are allowed to be attached to a cgroup with * BPF_F_ALLOW_MULTI flag. They are executed in FIFO order * (those that were attached first, run first) * The programs of sub-cgroup are executed first, then programs of * this cgroup and then programs of parent cgroup. * When children program makes decision (like picking TCP CA or sock bind) * parent program has a chance to override it. * * A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups. * A cgroup with NONE doesn't allow any programs in sub-cgroups. * Ex1: * cgrp1 (MULTI progs A, B) -> * cgrp2 (OVERRIDE prog C) -> * cgrp3 (MULTI prog D) -> * cgrp4 (OVERRIDE prog E) -> * cgrp5 (NONE prog F) * the event in cgrp5 triggers execution of F,D,A,B in that order. * if prog F is detached, the execution is E,D,A,B * if prog F and D are detached, the execution is E,A,B * if prog F, E and D are detached, the execution is C,A,B * * All eligible programs are executed regardless of return code from * earlier programs. */ #define BPF_F_ALLOW_OVERRIDE (1U << 0) #define BPF_F_ALLOW_MULTI (1U << 1) /* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the * verifier will perform strict alignment checking as if the kernel * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set, * and NET_IP_ALIGN defined to 2. */ #define BPF_F_STRICT_ALIGNMENT (1U << 0) /* If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the * verifier will allow any alignment whatsoever. On platforms * with strict alignment requirements for loads ands stores (such * as sparc and mips) the verifier validates that all loads and * stores provably follow this requirement. This flag turns that * checking and enforcement off. * * It is mostly used for testing when we want to validate the * context and memory access aspects of the verifier, but because * of an unaligned access the alignment check would trigger before * the one we are interested in. */ #define BPF_F_ANY_ALIGNMENT (1U << 1) /* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */ #define BPF_PSEUDO_MAP_FD 1 /* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative * offset to another bpf function */ #define BPF_PSEUDO_CALL 1 /* flags for BPF_MAP_UPDATE_ELEM command */ #define BPF_ANY 0 /* create new element or update existing */ #define BPF_NOEXIST 1 /* create new element if it didn't exist */ #define BPF_EXIST 2 /* update existing element */ #define BPF_F_LOCK 4 /* spin_lock-ed map_lookup/map_update */ /* flags for BPF_MAP_CREATE command */ #define BPF_F_NO_PREALLOC (1U << 0) /* Instead of having one common LRU list in the * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list * which can scale and perform better. * Note, the LRU nodes (including free nodes) cannot be moved * across different LRU lists. */ #define BPF_F_NO_COMMON_LRU (1U << 1) /* Specify numa node during map creation */ #define BPF_F_NUMA_NODE (1U << 2) #define BPF_OBJ_NAME_LEN 16U /* Flags for accessing BPF object */ #define BPF_F_RDONLY (1U << 3) #define BPF_F_WRONLY (1U << 4) /* Flag for stack_map, store build_id+offset instead of pointer */ #define BPF_F_STACK_BUILD_ID (1U << 5) /* Zero-initialize hash function seed. This should only be used for testing. */ #define BPF_F_ZERO_SEED (1U << 6) /* flags for BPF_PROG_QUERY */ #define BPF_F_QUERY_EFFECTIVE (1U << 0) enum bpf_stack_build_id_status { /* user space need an empty entry to identify end of a trace */ BPF_STACK_BUILD_ID_EMPTY = 0, /* with valid build_id and offset */ BPF_STACK_BUILD_ID_VALID = 1, /* couldn't get build_id, fallback to ip */ BPF_STACK_BUILD_ID_IP = 2, }; #define BPF_BUILD_ID_SIZE 20 struct bpf_stack_build_id { __s32 status; unsigned char build_id[BPF_BUILD_ID_SIZE]; union { __u64 offset; __u64 ip; }; }; union bpf_attr { struct { /* anonymous struct used by BPF_MAP_CREATE command */ __u32 map_type; /* one of enum bpf_map_type */ __u32 key_size; /* size of key in bytes */ __u32 value_size; /* size of value in bytes */ __u32 max_entries; /* max number of entries in a map */ __u32 map_flags; /* BPF_MAP_CREATE related * flags defined above. */ __u32 inner_map_fd; /* fd pointing to the inner map */ __u32 numa_node; /* numa node (effective only if * BPF_F_NUMA_NODE is set). */ char map_name[BPF_OBJ_NAME_LEN]; __u32 map_ifindex; /* ifindex of netdev to create on */ __u32 btf_fd; /* fd pointing to a BTF type data */ __u32 btf_key_type_id; /* BTF type_id of the key */ __u32 btf_value_type_id; /* BTF type_id of the value */ }; struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ __u32 map_fd; __aligned_u64 key; union { __aligned_u64 value; __aligned_u64 next_key; }; __u64 flags; }; struct { /* anonymous struct used by BPF_PROG_LOAD command */ __u32 prog_type; /* one of enum bpf_prog_type */ __u32 insn_cnt; __aligned_u64 insns; __aligned_u64 license; __u32 log_level; /* verbosity level of verifier */ __u32 log_size; /* size of user buffer */ __aligned_u64 log_buf; /* user supplied buffer */ __u32 kern_version; /* not used */ __u32 prog_flags; char prog_name[BPF_OBJ_NAME_LEN]; __u32 prog_ifindex; /* ifindex of netdev to prep for */ /* For some prog types expected attach type must be known at * load time to verify attach type specific parts of prog * (context accesses, allowed helpers, etc). */ __u32 expected_attach_type; __u32 prog_btf_fd; /* fd pointing to BTF type data */ __u32 func_info_rec_size; /* userspace bpf_func_info size */ __aligned_u64 func_info; /* func info */ __u32 func_info_cnt; /* number of bpf_func_info records */ __u32 line_info_rec_size; /* userspace bpf_line_info size */ __aligned_u64 line_info; /* line info */ __u32 line_info_cnt; /* number of bpf_line_info records */ }; struct { /* anonymous struct used by BPF_OBJ_* commands */ __aligned_u64 pathname; __u32 bpf_fd; __u32 file_flags; }; struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */ __u32 target_fd; /* container object to attach to */ __u32 attach_bpf_fd; /* eBPF program to attach */ __u32 attach_type; __u32 attach_flags; }; struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */ __u32 prog_fd; __u32 retval; __u32 data_size_in; /* input: len of data_in */ __u32 data_size_out; /* input/output: len of data_out * returns ENOSPC if data_out * is too small. */ __aligned_u64 data_in; __aligned_u64 data_out; __u32 repeat; __u32 duration; } test; struct { /* anonymous struct used by BPF_*_GET_*_ID */ union { __u32 start_id; __u32 prog_id; __u32 map_id; __u32 btf_id; }; __u32 next_id; __u32 open_flags; }; struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */ __u32 bpf_fd; __u32 info_len; __aligned_u64 info; } info; struct { /* anonymous struct used by BPF_PROG_QUERY command */ __u32 target_fd; /* container object to query */ __u32 attach_type; __u32 query_flags; __u32 attach_flags; __aligned_u64 prog_ids; __u32 prog_cnt; } query; struct { __u64 name; __u32 prog_fd; } raw_tracepoint; struct { /* anonymous struct for BPF_BTF_LOAD */ __aligned_u64 btf; __aligned_u64 btf_log_buf; __u32 btf_size; __u32 btf_log_size; __u32 btf_log_level; }; struct { __u32 pid; /* input: pid */ __u32 fd; /* input: fd */ __u32 flags; /* input: flags */ __u32 buf_len; /* input/output: buf len */ __aligned_u64 buf; /* input/output: * tp_name for tracepoint * symbol for kprobe * filename for uprobe */ __u32 prog_id; /* output: prod_id */ __u32 fd_type; /* output: BPF_FD_TYPE_* */ __u64 probe_offset; /* output: probe_offset */ __u64 probe_addr; /* output: probe_addr */ } task_fd_query; } __attribute__((aligned(8))); /* The description below is an attempt at providing documentation to eBPF * developers about the multiple available eBPF helper functions. It can be * parsed and used to produce a manual page. The workflow is the following, * and requires the rst2man utility: * * $ ./scripts/bpf_helpers_doc.py \ * --filename include/uapi/linux/bpf.h > /tmp/bpf-helpers.rst * $ rst2man /tmp/bpf-helpers.rst > /tmp/bpf-helpers.7 * $ man /tmp/bpf-helpers.7 * * Note that in order to produce this external documentation, some RST * formatting is used in the descriptions to get "bold" and "italics" in * manual pages. Also note that the few trailing white spaces are * intentional, removing them would break paragraphs for rst2man. * * Start of BPF helper function descriptions: * * void *bpf_map_lookup_elem(struct bpf_map *map, const void *key) * Description * Perform a lookup in *map* for an entry associated to *key*. * Return * Map value associated to *key*, or **NULL** if no entry was * found. * * int bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags) * Description * Add or update the value of the entry associated to *key* in * *map* with *value*. *flags* is one of: * * **BPF_NOEXIST** * The entry for *key* must not exist in the map. * **BPF_EXIST** * The entry for *key* must already exist in the map. * **BPF_ANY** * No condition on the existence of the entry for *key*. * * Flag value **BPF_NOEXIST** cannot be used for maps of types * **BPF_MAP_TYPE_ARRAY** or **BPF_MAP_TYPE_PERCPU_ARRAY** (all * elements always exist), the helper would return an error. * Return * 0 on success, or a negative error in case of failure. * * int bpf_map_delete_elem(struct bpf_map *map, const void *key) * Description * Delete entry with *key* from *map*. * Return * 0 on success, or a negative error in case of failure. * * int bpf_probe_read(void *dst, u32 size, const void *src) * Description * For tracing programs, safely attempt to read *size* bytes from * address *src* and store the data in *dst*. * Return * 0 on success, or a negative error in case of failure. * * u64 bpf_ktime_get_ns(void) * Description * Return the time elapsed since system boot, in nanoseconds. * Return * Current *ktime*. * * int bpf_trace_printk(const char *fmt, u32 fmt_size, ...) * Description * This helper is a "printk()-like" facility for debugging. It * prints a message defined by format *fmt* (of size *fmt_size*) * to file *\/sys/kernel/debug/tracing/trace* from DebugFS, if * available. It can take up to three additional **u64** * arguments (as an eBPF helpers, the total number of arguments is * limited to five). * * Each time the helper is called, it appends a line to the trace. * The format of the trace is customizable, and the exact output * one will get depends on the options set in * *\/sys/kernel/debug/tracing/trace_options* (see also the * *README* file under the same directory). However, it usually * defaults to something like: * * :: * * telnet-470 [001] .N.. 419421.045894: 0x00000001: * * In the above: * * * ``telnet`` is the name of the current task. * * ``470`` is the PID of the current task. * * ``001`` is the CPU number on which the task is * running. * * In ``.N..``, each character refers to a set of * options (whether irqs are enabled, scheduling * options, whether hard/softirqs are running, level of * preempt_disabled respectively). **N** means that * **TIF_NEED_RESCHED** and **PREEMPT_NEED_RESCHED** * are set. * * ``419421.045894`` is a timestamp. * * ``0x00000001`` is a fake value used by BPF for the * instruction pointer register. * * ```` is the message formatted with * *fmt*. * * The conversion specifiers supported by *fmt* are similar, but * more limited than for printk(). They are **%d**, **%i**, * **%u**, **%x**, **%ld**, **%li**, **%lu**, **%lx**, **%lld**, * **%lli**, **%llu**, **%llx**, **%p**, **%s**. No modifier (size * of field, padding with zeroes, etc.) is available, and the * helper will return **-EINVAL** (but print nothing) if it * encounters an unknown specifier. * * Also, note that **bpf_trace_printk**\ () is slow, and should * only be used for debugging purposes. For this reason, a notice * bloc (spanning several lines) is printed to kernel logs and * states that the helper should not be used "for production use" * the first time this helper is used (or more precisely, when * **trace_printk**\ () buffers are allocated). For passing values * to user space, perf events should be preferred. * Return * The number of bytes written to the buffer, or a negative error * in case of failure. * * u32 bpf_get_prandom_u32(void) * Description * Get a pseudo-random number. * * From a security point of view, this helper uses its own * pseudo-random internal state, and cannot be used to infer the * seed of other random functions in the kernel. However, it is * essential to note that the generator used by the helper is not * cryptographically secure. * Return * A random 32-bit unsigned value. * * u32 bpf_get_smp_processor_id(void) * Description * Get the SMP (symmetric multiprocessing) processor id. Note that * all programs run with preemption disabled, which means that the * SMP processor id is stable during all the execution of the * program. * Return * The SMP id of the processor running the program. * * int bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags) * Description * Store *len* bytes from address *from* into the packet * associated to *skb*, at *offset*. *flags* are a combination of * **BPF_F_RECOMPUTE_CSUM** (automatically recompute the * checksum for the packet after storing the bytes) and * **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\ * **->swhash** and *skb*\ **->l4hash** to 0). * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * int bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size) * Description * Recompute the layer 3 (e.g. IP) checksum for the packet * associated to *skb*. Computation is incremental, so the helper * must know the former value of the header field that was * modified (*from*), the new value of this field (*to*), and the * number of bytes (2 or 4) for this field, stored in *size*. * Alternatively, it is possible to store the difference between * the previous and the new values of the header field in *to*, by * setting *from* and *size* to 0. For both methods, *offset* * indicates the location of the IP checksum within the packet. * * This helper works in combination with **bpf_csum_diff**\ (), * which does not update the checksum in-place, but offers more * flexibility and can handle sizes larger than 2 or 4 for the * checksum to update. * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * int bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags) * Description * Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the * packet associated to *skb*. Computation is incremental, so the * helper must know the former value of the header field that was * modified (*from*), the new value of this field (*to*), and the * number of bytes (2 or 4) for this field, stored on the lowest * four bits of *flags*. Alternatively, it is possible to store * the difference between the previous and the new values of the * header field in *to*, by setting *from* and the four lowest * bits of *flags* to 0. For both methods, *offset* indicates the * location of the IP checksum within the packet. In addition to * the size of the field, *flags* can be added (bitwise OR) actual * flags. With **BPF_F_MARK_MANGLED_0**, a null checksum is left * untouched (unless **BPF_F_MARK_ENFORCE** is added as well), and * for updates resulting in a null checksum the value is set to * **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates * the checksum is to be computed against a pseudo-header. * * This helper works in combination with **bpf_csum_diff**\ (), * which does not update the checksum in-place, but offers more * flexibility and can handle sizes larger than 2 or 4 for the * checksum to update. * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * int bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index) * Description * This special helper is used to trigger a "tail call", or in * other words, to jump into another eBPF program. The same stack * frame is used (but values on stack and in registers for the * caller are not accessible to the callee). This mechanism allows * for program chaining, either for raising the maximum number of * available eBPF instructions, or to execute given programs in * conditional blocks. For security reasons, there is an upper * limit to the number of successive tail calls that can be * performed. * * Upon call of this helper, the program attempts to jump into a * program referenced at index *index* in *prog_array_map*, a * special map of type **BPF_MAP_TYPE_PROG_ARRAY**, and passes * *ctx*, a pointer to the context. * * If the call succeeds, the kernel immediately runs the first * instruction of the new program. This is not a function call, * and it never returns to the previous program. If the call * fails, then the helper has no effect, and the caller continues * to run its subsequent instructions. A call can fail if the * destination program for the jump does not exist (i.e. *index* * is superior to the number of entries in *prog_array_map*), or * if the maximum number of tail calls has been reached for this * chain of programs. This limit is defined in the kernel by the * macro **MAX_TAIL_CALL_CNT** (not accessible to user space), * which is currently set to 32. * Return * 0 on success, or a negative error in case of failure. * * int bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags) * Description * Clone and redirect the packet associated to *skb* to another * net device of index *ifindex*. Both ingress and egress * interfaces can be used for redirection. The **BPF_F_INGRESS** * value in *flags* is used to make the distinction (ingress path * is selected if the flag is present, egress path otherwise). * This is the only flag supported for now. * * In comparison with **bpf_redirect**\ () helper, * **bpf_clone_redirect**\ () has the associated cost of * duplicating the packet buffer, but this can be executed out of * the eBPF program. Conversely, **bpf_redirect**\ () is more * efficient, but it is handled through an action code where the * redirection happens only after the eBPF program has returned. * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * u64 bpf_get_current_pid_tgid(void) * Return * A 64-bit integer containing the current tgid and pid, and * created as such: * *current_task*\ **->tgid << 32 \|** * *current_task*\ **->pid**. * * u64 bpf_get_current_uid_gid(void) * Return * A 64-bit integer containing the current GID and UID, and * created as such: *current_gid* **<< 32 \|** *current_uid*. * * int bpf_get_current_comm(char *buf, u32 size_of_buf) * Description * Copy the **comm** attribute of the current task into *buf* of * *size_of_buf*. The **comm** attribute contains the name of * the executable (excluding the path) for the current task. The * *size_of_buf* must be strictly positive. On success, the * helper makes sure that the *buf* is NUL-terminated. On failure, * it is filled with zeroes. * Return * 0 on success, or a negative error in case of failure. * * u32 bpf_get_cgroup_classid(struct sk_buff *skb) * Description * Retrieve the classid for the current task, i.e. for the net_cls * cgroup to which *skb* belongs. * * This helper can be used on TC egress path, but not on ingress. * * The net_cls cgroup provides an interface to tag network packets * based on a user-provided identifier for all traffic coming from * the tasks belonging to the related cgroup. See also the related * kernel documentation, available from the Linux sources in file * *Documentation/cgroup-v1/net_cls.txt*. * * The Linux kernel has two versions for cgroups: there are * cgroups v1 and cgroups v2. Both are available to users, who can * use a mixture of them, but note that the net_cls cgroup is for * cgroup v1 only. This makes it incompatible with BPF programs * run on cgroups, which is a cgroup-v2-only feature (a socket can * only hold data for one version of cgroups at a time). * * This helper is only available is the kernel was compiled with * the **CONFIG_CGROUP_NET_CLASSID** configuration option set to * "**y**" or to "**m**". * Return * The classid, or 0 for the default unconfigured classid. * * int bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) * Description * Push a *vlan_tci* (VLAN tag control information) of protocol * *vlan_proto* to the packet associated to *skb*, then update * the checksum. Note that if *vlan_proto* is different from * **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to * be **ETH_P_8021Q**. * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * int bpf_skb_vlan_pop(struct sk_buff *skb) * Description * Pop a VLAN header from the packet associated to *skb*. * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * int bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) * Description * Get tunnel metadata. This helper takes a pointer *key* to an * empty **struct bpf_tunnel_key** of **size**, that will be * filled with tunnel metadata for the packet associated to *skb*. * The *flags* can be set to **BPF_F_TUNINFO_IPV6**, which * indicates that the tunnel is based on IPv6 protocol instead of * IPv4. * * The **struct bpf_tunnel_key** is an object that generalizes the * principal parameters used by various tunneling protocols into a * single struct. This way, it can be used to easily make a * decision based on the contents of the encapsulation header, * "summarized" in this struct. In particular, it holds the IP * address of the remote end (IPv4 or IPv6, depending on the case) * in *key*\ **->remote_ipv4** or *key*\ **->remote_ipv6**. Also, * this struct exposes the *key*\ **->tunnel_id**, which is * generally mapped to a VNI (Virtual Network Identifier), making * it programmable together with the **bpf_skb_set_tunnel_key**\ * () helper. * * Let's imagine that the following code is part of a program * attached to the TC ingress interface, on one end of a GRE * tunnel, and is supposed to filter out all messages coming from * remote ends with IPv4 address other than 10.0.0.1: * * :: * * int ret; * struct bpf_tunnel_key key = {}; * * ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0); * if (ret < 0) * return TC_ACT_SHOT; // drop packet * * if (key.remote_ipv4 != 0x0a000001) * return TC_ACT_SHOT; // drop packet * * return TC_ACT_OK; // accept packet * * This interface can also be used with all encapsulation devices * that can operate in "collect metadata" mode: instead of having * one network device per specific configuration, the "collect * metadata" mode only requires a single device where the * configuration can be extracted from this helper. * * This can be used together with various tunnels such as VXLan, * Geneve, GRE or IP in IP (IPIP). * Return * 0 on success, or a negative error in case of failure. * * int bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) * Description * Populate tunnel metadata for packet associated to *skb.* The * tunnel metadata is set to the contents of *key*, of *size*. The * *flags* can be set to a combination of the following values: * * **BPF_F_TUNINFO_IPV6** * Indicate that the tunnel is based on IPv6 protocol * instead of IPv4. * **BPF_F_ZERO_CSUM_TX** * For IPv4 packets, add a flag to tunnel metadata * indicating that checksum computation should be skipped * and checksum set to zeroes. * **BPF_F_DONT_FRAGMENT** * Add a flag to tunnel metadata indicating that the * packet should not be fragmented. * **BPF_F_SEQ_NUMBER** * Add a flag to tunnel metadata indicating that a * sequence number should be added to tunnel header before * sending the packet. This flag was added for GRE * encapsulation, but might be used with other protocols * as well in the future. * * Here is a typical usage on the transmit path: * * :: * * struct bpf_tunnel_key key; * populate key ... * bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0); * bpf_clone_redirect(skb, vxlan_dev_ifindex, 0); * * See also the description of the **bpf_skb_get_tunnel_key**\ () * helper for additional information. * Return * 0 on success, or a negative error in case of failure. * * u64 bpf_perf_event_read(struct bpf_map *map, u64 flags) * Description * Read the value of a perf event counter. This helper relies on a * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of * the perf event counter is selected when *map* is updated with * perf event file descriptors. The *map* is an array whose size * is the number of available CPUs, and each cell contains a value * relative to one CPU. The value to retrieve is indicated by * *flags*, that contains the index of the CPU to look up, masked * with **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to * **BPF_F_CURRENT_CPU** to indicate that the value for the * current CPU should be retrieved. * * Note that before Linux 4.13, only hardware perf event can be * retrieved. * * Also, be aware that the newer helper * **bpf_perf_event_read_value**\ () is recommended over * **bpf_perf_event_read**\ () in general. The latter has some ABI * quirks where error and counter value are used as a return code * (which is wrong to do since ranges may overlap). This issue is * fixed with **bpf_perf_event_read_value**\ (), which at the same * time provides more features over the **bpf_perf_event_read**\ * () interface. Please refer to the description of * **bpf_perf_event_read_value**\ () for details. * Return * The value of the perf event counter read from the map, or a * negative error code in case of failure. * * int bpf_redirect(u32 ifindex, u64 flags) * Description * Redirect the packet to another net device of index *ifindex*. * This helper is somewhat similar to **bpf_clone_redirect**\ * (), except that the packet is not cloned, which provides * increased performance. * * Except for XDP, both ingress and egress interfaces can be used * for redirection. The **BPF_F_INGRESS** value in *flags* is used * to make the distinction (ingress path is selected if the flag * is present, egress path otherwise). Currently, XDP only * supports redirection to the egress interface, and accepts no * flag at all. * * The same effect can be attained with the more generic * **bpf_redirect_map**\ (), which requires specific maps to be * used but offers better performance. * Return * For XDP, the helper returns **XDP_REDIRECT** on success or * **XDP_ABORTED** on error. For other program types, the values * are **TC_ACT_REDIRECT** on success or **TC_ACT_SHOT** on * error. * * u32 bpf_get_route_realm(struct sk_buff *skb) * Description * Retrieve the realm or the route, that is to say the * **tclassid** field of the destination for the *skb*. The * indentifier retrieved is a user-provided tag, similar to the * one used with the net_cls cgroup (see description for * **bpf_get_cgroup_classid**\ () helper), but here this tag is * held by a route (a destination entry), not by a task. * * Retrieving this identifier works with the clsact TC egress hook * (see also **tc-bpf(8)**), or alternatively on conventional * classful egress qdiscs, but not on TC ingress path. In case of * clsact TC egress hook, this has the advantage that, internally, * the destination entry has not been dropped yet in the transmit * path. Therefore, the destination entry does not need to be * artificially held via **netif_keep_dst**\ () for a classful * qdisc until the *skb* is freed. * * This helper is available only if the kernel was compiled with * **CONFIG_IP_ROUTE_CLASSID** configuration option. * Return * The realm of the route for the packet associated to *skb*, or 0 * if none was found. * * int bpf_perf_event_output(struct pt_reg *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) * Description * Write raw *data* blob into a special BPF perf event held by * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf * event must have the following attributes: **PERF_SAMPLE_RAW** * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. * * The *flags* are used to indicate the index in *map* for which * the value must be put, masked with **BPF_F_INDEX_MASK**. * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** * to indicate that the index of the current CPU core should be * used. * * The value to write, of *size*, is passed through eBPF stack and * pointed by *data*. * * The context of the program *ctx* needs also be passed to the * helper. * * On user space, a program willing to read the values needs to * call **perf_event_open**\ () on the perf event (either for * one or for all CPUs) and to store the file descriptor into the * *map*. This must be done before the eBPF program can send data * into it. An example is available in file * *samples/bpf/trace_output_user.c* in the Linux kernel source * tree (the eBPF program counterpart is in * *samples/bpf/trace_output_kern.c*). * * **bpf_perf_event_output**\ () achieves better performance * than **bpf_trace_printk**\ () for sharing data with user * space, and is much better suitable for streaming data from eBPF * programs. * * Note that this helper is not restricted to tracing use cases * and can be used with programs attached to TC or XDP as well, * where it allows for passing data to user space listeners. Data * can be: * * * Only custom structs, * * Only the packet payload, or * * A combination of both. * Return * 0 on success, or a negative error in case of failure. * * int bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset, void *to, u32 len) * Description * This helper was provided as an easy way to load data from a * packet. It can be used to load *len* bytes from *offset* from * the packet associated to *skb*, into the buffer pointed by * *to*. * * Since Linux 4.7, usage of this helper has mostly been replaced * by "direct packet access", enabling packet data to be * manipulated with *skb*\ **->data** and *skb*\ **->data_end** * pointing respectively to the first byte of packet data and to * the byte after the last byte of packet data. However, it * remains useful if one wishes to read large quantities of data * at once from a packet into the eBPF stack. * Return * 0 on success, or a negative error in case of failure. * * int bpf_get_stackid(struct pt_reg *ctx, struct bpf_map *map, u64 flags) * Description * Walk a user or a kernel stack and return its id. To achieve * this, the helper needs *ctx*, which is a pointer to the context * on which the tracing program is executed, and a pointer to a * *map* of type **BPF_MAP_TYPE_STACK_TRACE**. * * The last argument, *flags*, holds the number of stack frames to * skip (from 0 to 255), masked with * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set * a combination of the following flags: * * **BPF_F_USER_STACK** * Collect a user space stack instead of a kernel stack. * **BPF_F_FAST_STACK_CMP** * Compare stacks by hash only. * **BPF_F_REUSE_STACKID** * If two different stacks hash into the same *stackid*, * discard the old one. * * The stack id retrieved is a 32 bit long integer handle which * can be further combined with other data (including other stack * ids) and used as a key into maps. This can be useful for * generating a variety of graphs (such as flame graphs or off-cpu * graphs). * * For walking a stack, this helper is an improvement over * **bpf_probe_read**\ (), which can be used with unrolled loops * but is not efficient and consumes a lot of eBPF instructions. * Instead, **bpf_get_stackid**\ () can collect up to * **PERF_MAX_STACK_DEPTH** both kernel and user frames. Note that * this limit can be controlled with the **sysctl** program, and * that it should be manually increased in order to profile long * user stacks (such as stacks for Java programs). To do so, use: * * :: * * # sysctl kernel.perf_event_max_stack= * Return * The positive or null stack id on success, or a negative error * in case of failure. * * s64 bpf_csum_diff(__be32 *from, u32 from_size, __be32 *to, u32 to_size, __wsum seed) * Description * Compute a checksum difference, from the raw buffer pointed by * *from*, of length *from_size* (that must be a multiple of 4), * towards the raw buffer pointed by *to*, of size *to_size* * (same remark). An optional *seed* can be added to the value * (this can be cascaded, the seed may come from a previous call * to the helper). * * This is flexible enough to be used in several ways: * * * With *from_size* == 0, *to_size* > 0 and *seed* set to * checksum, it can be used when pushing new data. * * With *from_size* > 0, *to_size* == 0 and *seed* set to * checksum, it can be used when removing data from a packet. * * With *from_size* > 0, *to_size* > 0 and *seed* set to 0, it * can be used to compute a diff. Note that *from_size* and * *to_size* do not need to be equal. * * This helper can be used in combination with * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ (), to * which one can feed in the difference computed with * **bpf_csum_diff**\ (). * Return * The checksum result, or a negative error code in case of * failure. * * int bpf_skb_get_tunnel_opt(struct sk_buff *skb, u8 *opt, u32 size) * Description * Retrieve tunnel options metadata for the packet associated to * *skb*, and store the raw tunnel option data to the buffer *opt* * of *size*. * * This helper can be used with encapsulation devices that can * operate in "collect metadata" mode (please refer to the related * note in the description of **bpf_skb_get_tunnel_key**\ () for * more details). A particular example where this can be used is * in combination with the Geneve encapsulation protocol, where it * allows for pushing (with **bpf_skb_get_tunnel_opt**\ () helper) * and retrieving arbitrary TLVs (Type-Length-Value headers) from * the eBPF program. This allows for full customization of these * headers. * Return * The size of the option data retrieved. * * int bpf_skb_set_tunnel_opt(struct sk_buff *skb, u8 *opt, u32 size) * Description * Set tunnel options metadata for the packet associated to *skb* * to the option data contained in the raw buffer *opt* of *size*. * * See also the description of the **bpf_skb_get_tunnel_opt**\ () * helper for additional information. * Return * 0 on success, or a negative error in case of failure. * * int bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags) * Description * Change the protocol of the *skb* to *proto*. Currently * supported are transition from IPv4 to IPv6, and from IPv6 to * IPv4. The helper takes care of the groundwork for the * transition, including resizing the socket buffer. The eBPF * program is expected to fill the new headers, if any, via * **skb_store_bytes**\ () and to recompute the checksums with * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ * (). The main case for this helper is to perform NAT64 * operations out of an eBPF program. * * Internally, the GSO type is marked as dodgy so that headers are * checked and segments are recalculated by the GSO/GRO engine. * The size for GSO target is adapted as well. * * All values for *flags* are reserved for future usage, and must * be left at zero. * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * int bpf_skb_change_type(struct sk_buff *skb, u32 type) * Description * Change the packet type for the packet associated to *skb*. This * comes down to setting *skb*\ **->pkt_type** to *type*, except * the eBPF program does not have a write access to *skb*\ * **->pkt_type** beside this helper. Using a helper here allows * for graceful handling of errors. * * The major use case is to change incoming *skb*s to * **PACKET_HOST** in a programmatic way instead of having to * recirculate via **redirect**\ (..., **BPF_F_INGRESS**), for * example. * * Note that *type* only allows certain values. At this time, they * are: * * **PACKET_HOST** * Packet is for us. * **PACKET_BROADCAST** * Send packet to all. * **PACKET_MULTICAST** * Send packet to group. * **PACKET_OTHERHOST** * Send packet to someone else. * Return * 0 on success, or a negative error in case of failure. * * int bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index) * Description * Check whether *skb* is a descendant of the cgroup2 held by * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. * Return * The return value depends on the result of the test, and can be: * * * 0, if the *skb* failed the cgroup2 descendant test. * * 1, if the *skb* succeeded the cgroup2 descendant test. * * A negative error code, if an error occurred. * * u32 bpf_get_hash_recalc(struct sk_buff *skb) * Description * Retrieve the hash of the packet, *skb*\ **->hash**. If it is * not set, in particular if the hash was cleared due to mangling, * recompute this hash. Later accesses to the hash can be done * directly with *skb*\ **->hash**. * * Calling **bpf_set_hash_invalid**\ (), changing a packet * prototype with **bpf_skb_change_proto**\ (), or calling * **bpf_skb_store_bytes**\ () with the * **BPF_F_INVALIDATE_HASH** are actions susceptible to clear * the hash and to trigger a new computation for the next call to * **bpf_get_hash_recalc**\ (). * Return * The 32-bit hash. * * u64 bpf_get_current_task(void) * Return * A pointer to the current task struct. * * int bpf_probe_write_user(void *dst, const void *src, u32 len) * Description * Attempt in a safe way to write *len* bytes from the buffer * *src* to *dst* in memory. It only works for threads that are in * user context, and *dst* must be a valid user space address. * * This helper should not be used to implement any kind of * security mechanism because of TOC-TOU attacks, but rather to * debug, divert, and manipulate execution of semi-cooperative * processes. * * Keep in mind that this feature is meant for experiments, and it * has a risk of crashing the system and running programs. * Therefore, when an eBPF program using this helper is attached, * a warning including PID and process name is printed to kernel * logs. * Return * 0 on success, or a negative error in case of failure. * * int bpf_current_task_under_cgroup(struct bpf_map *map, u32 index) * Description * Check whether the probe is being run is the context of a given * subset of the cgroup2 hierarchy. The cgroup2 to test is held by * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. * Return * The return value depends on the result of the test, and can be: * * * 0, if the *skb* task belongs to the cgroup2. * * 1, if the *skb* task does not belong to the cgroup2. * * A negative error code, if an error occurred. * * int bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags) * Description * Resize (trim or grow) the packet associated to *skb* to the * new *len*. The *flags* are reserved for future usage, and must * be left at zero. * * The basic idea is that the helper performs the needed work to * change the size of the packet, then the eBPF program rewrites * the rest via helpers like **bpf_skb_store_bytes**\ (), * **bpf_l3_csum_replace**\ (), **bpf_l3_csum_replace**\ () * and others. This helper is a slow path utility intended for * replies with control messages. And because it is targeted for * slow path, the helper itself can afford to be slow: it * implicitly linearizes, unclones and drops offloads from the * *skb*. * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * int bpf_skb_pull_data(struct sk_buff *skb, u32 len) * Description * Pull in non-linear data in case the *skb* is non-linear and not * all of *len* are part of the linear section. Make *len* bytes * from *skb* readable and writable. If a zero value is passed for * *len*, then the whole length of the *skb* is pulled. * * This helper is only needed for reading and writing with direct * packet access. * * For direct packet access, testing that offsets to access * are within packet boundaries (test on *skb*\ **->data_end**) is * susceptible to fail if offsets are invalid, or if the requested * data is in non-linear parts of the *skb*. On failure the * program can just bail out, or in the case of a non-linear * buffer, use a helper to make the data available. The * **bpf_skb_load_bytes**\ () helper is a first solution to access * the data. Another one consists in using **bpf_skb_pull_data** * to pull in once the non-linear parts, then retesting and * eventually access the data. * * At the same time, this also makes sure the *skb* is uncloned, * which is a necessary condition for direct write. As this needs * to be an invariant for the write part only, the verifier * detects writes and adds a prologue that is calling * **bpf_skb_pull_data()** to effectively unclone the *skb* from * the very beginning in case it is indeed cloned. * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * s64 bpf_csum_update(struct sk_buff *skb, __wsum csum) * Description * Add the checksum *csum* into *skb*\ **->csum** in case the * driver has supplied a checksum for the entire packet into that * field. Return an error otherwise. This helper is intended to be * used in combination with **bpf_csum_diff**\ (), in particular * when the checksum needs to be updated after data has been * written into the packet through direct packet access. * Return * The checksum on success, or a negative error code in case of * failure. * * void bpf_set_hash_invalid(struct sk_buff *skb) * Description * Invalidate the current *skb*\ **->hash**. It can be used after * mangling on headers through direct packet access, in order to * indicate that the hash is outdated and to trigger a * recalculation the next time the kernel tries to access this * hash or when the **bpf_get_hash_recalc**\ () helper is called. * * int bpf_get_numa_node_id(void) * Description * Return the id of the current NUMA node. The primary use case * for this helper is the selection of sockets for the local NUMA * node, when the program is attached to sockets using the * **SO_ATTACH_REUSEPORT_EBPF** option (see also **socket(7)**), * but the helper is also available to other eBPF program types, * similarly to **bpf_get_smp_processor_id**\ (). * Return * The id of current NUMA node. * * int bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags) * Description * Grows headroom of packet associated to *skb* and adjusts the * offset of the MAC header accordingly, adding *len* bytes of * space. It automatically extends and reallocates memory as * required. * * This helper can be used on a layer 3 *skb* to push a MAC header * for redirection into a layer 2 device. * * All values for *flags* are reserved for future usage, and must * be left at zero. * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * int bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta) * Description * Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that * it is possible to use a negative value for *delta*. This helper * can be used to prepare the packet for pushing or popping * headers. * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr) * Description * Copy a NUL terminated string from an unsafe address * *unsafe_ptr* to *dst*. The *size* should include the * terminating NUL byte. In case the string length is smaller than * *size*, the target is not padded with further NUL bytes. If the * string length is larger than *size*, just *size*-1 bytes are * copied and the last byte is set to NUL. * * On success, the length of the copied string is returned. This * makes this helper useful in tracing programs for reading * strings, and more importantly to get its length at runtime. See * the following snippet: * * :: * * SEC("kprobe/sys_open") * void bpf_sys_open(struct pt_regs *ctx) * { * char buf[PATHLEN]; // PATHLEN is defined to 256 * int res = bpf_probe_read_str(buf, sizeof(buf), * ctx->di); * * // Consume buf, for example push it to * // userspace via bpf_perf_event_output(); we * // can use res (the string length) as event * // size, after checking its boundaries. * } * * In comparison, using **bpf_probe_read()** helper here instead * to read the string would require to estimate the length at * compile time, and would often result in copying more memory * than necessary. * * Another useful use case is when parsing individual process * arguments or individual environment variables navigating * *current*\ **->mm->arg_start** and *current*\ * **->mm->env_start**: using this helper and the return value, * one can quickly iterate at the right offset of the memory area. * Return * On success, the strictly positive length of the string, * including the trailing NUL character. On error, a negative * value. * * u64 bpf_get_socket_cookie(struct sk_buff *skb) * Description * If the **struct sk_buff** pointed by *skb* has a known socket, * retrieve the cookie (generated by the kernel) of this socket. * If no cookie has been set yet, generate a new cookie. Once * generated, the socket cookie remains stable for the life of the * socket. This helper can be useful for monitoring per socket * networking traffic statistics as it provides a unique socket * identifier per namespace. * Return * A 8-byte long non-decreasing number on success, or 0 if the * socket field is missing inside *skb*. * * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx) * Description * Equivalent to bpf_get_socket_cookie() helper that accepts * *skb*, but gets socket from **struct bpf_sock_addr** context. * Return * A 8-byte long non-decreasing number. * * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx) * Description * Equivalent to bpf_get_socket_cookie() helper that accepts * *skb*, but gets socket from **struct bpf_sock_ops** context. * Return * A 8-byte long non-decreasing number. * * u32 bpf_get_socket_uid(struct sk_buff *skb) * Return * The owner UID of the socket associated to *skb*. If the socket * is **NULL**, or if it is not a full socket (i.e. if it is a * time-wait or a request socket instead), **overflowuid** value * is returned (note that **overflowuid** might also be the actual * UID value for the socket). * * u32 bpf_set_hash(struct sk_buff *skb, u32 hash) * Description * Set the full hash for *skb* (set the field *skb*\ **->hash**) * to value *hash*. * Return * 0 * * int bpf_setsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, char *optval, int optlen) * Description * Emulate a call to **setsockopt()** on the socket associated to * *bpf_socket*, which must be a full socket. The *level* at * which the option resides and the name *optname* of the option * must be specified, see **setsockopt(2)** for more information. * The option value of length *optlen* is pointed by *optval*. * * This helper actually implements a subset of **setsockopt()**. * It supports the following *level*\ s: * * * **SOL_SOCKET**, which supports the following *optname*\ s: * **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**, * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**. * * **IPPROTO_TCP**, which supports the following *optname*\ s: * **TCP_CONGESTION**, **TCP_BPF_IW**, * **TCP_BPF_SNDCWND_CLAMP**. * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**. * Return * 0 on success, or a negative error in case of failure. * * int bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags) * Description * Grow or shrink the room for data in the packet associated to * *skb* by *len_diff*, and according to the selected *mode*. * * There are two supported modes at this time: * * * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer * (room space is added or removed below the layer 2 header). * * * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer * (room space is added or removed below the layer 3 header). * * The following flags are supported at this time: * * * **BPF_F_ADJ_ROOM_FIXED_GSO**: Do not adjust gso_size. * Adjusting mss in this way is not allowed for datagrams. * * * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 **: * * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 **: * Any new space is reserved to hold a tunnel header. * Configure skb offsets and other fields accordingly. * * * **BPF_F_ADJ_ROOM_ENCAP_L4_GRE **: * * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP **: * Use with ENCAP_L3 flags to further specify the tunnel type. * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * int bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags) * Description * Redirect the packet to the endpoint referenced by *map* at * index *key*. Depending on its type, this *map* can contain * references to net devices (for forwarding packets through other * ports), or to CPUs (for redirecting XDP frames to another CPU; * but this is only implemented for native XDP (with driver * support) as of this writing). * * All values for *flags* are reserved for future usage, and must * be left at zero. * * When used to redirect packets to net devices, this helper * provides a high performance increase over **bpf_redirect**\ (). * This is due to various implementation details of the underlying * mechanisms, one of which is the fact that **bpf_redirect_map**\ * () tries to send packet as a "bulk" to the device. * Return * **XDP_REDIRECT** on success, or **XDP_ABORTED** on error. * * int bpf_sk_redirect_map(struct bpf_map *map, u32 key, u64 flags) * Description * Redirect the packet to the socket referenced by *map* (of type * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and * egress interfaces can be used for redirection. The * **BPF_F_INGRESS** value in *flags* is used to make the * distinction (ingress path is selected if the flag is present, * egress path otherwise). This is the only flag supported for now. * Return * **SK_PASS** on success, or **SK_DROP** on error. * * int bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) * Description * Add an entry to, or update a *map* referencing sockets. The * *skops* is used as a new value for the entry associated to * *key*. *flags* is one of: * * **BPF_NOEXIST** * The entry for *key* must not exist in the map. * **BPF_EXIST** * The entry for *key* must already exist in the map. * **BPF_ANY** * No condition on the existence of the entry for *key*. * * If the *map* has eBPF programs (parser and verdict), those will * be inherited by the socket being added. If the socket is * already attached to eBPF programs, this results in an error. * Return * 0 on success, or a negative error in case of failure. * * int bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta) * Description * Adjust the address pointed by *xdp_md*\ **->data_meta** by * *delta* (which can be positive or negative). Note that this * operation modifies the address stored in *xdp_md*\ **->data**, * so the latter must be loaded only after the helper has been * called. * * The use of *xdp_md*\ **->data_meta** is optional and programs * are not required to use it. The rationale is that when the * packet is processed with XDP (e.g. as DoS filter), it is * possible to push further meta data along with it before passing * to the stack, and to give the guarantee that an ingress eBPF * program attached as a TC classifier on the same device can pick * this up for further post-processing. Since TC works with socket * buffers, it remains possible to set from XDP the **mark** or * **priority** pointers, or other pointers for the socket buffer. * Having this scratch space generic and programmable allows for * more flexibility as the user is free to store whatever meta * data they need. * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * int bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size) * Description * Read the value of a perf event counter, and store it into *buf* * of size *buf_size*. This helper relies on a *map* of type * **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of the perf event * counter is selected when *map* is updated with perf event file * descriptors. The *map* is an array whose size is the number of * available CPUs, and each cell contains a value relative to one * CPU. The value to retrieve is indicated by *flags*, that * contains the index of the CPU to look up, masked with * **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to * **BPF_F_CURRENT_CPU** to indicate that the value for the * current CPU should be retrieved. * * This helper behaves in a way close to * **bpf_perf_event_read**\ () helper, save that instead of * just returning the value observed, it fills the *buf* * structure. This allows for additional data to be retrieved: in * particular, the enabled and running times (in *buf*\ * **->enabled** and *buf*\ **->running**, respectively) are * copied. In general, **bpf_perf_event_read_value**\ () is * recommended over **bpf_perf_event_read**\ (), which has some * ABI issues and provides fewer functionalities. * * These values are interesting, because hardware PMU (Performance * Monitoring Unit) counters are limited resources. When there are * more PMU based perf events opened than available counters, * kernel will multiplex these events so each event gets certain * percentage (but not all) of the PMU time. In case that * multiplexing happens, the number of samples or counter value * will not reflect the case compared to when no multiplexing * occurs. This makes comparison between different runs difficult. * Typically, the counter value should be normalized before * comparing to other experiments. The usual normalization is done * as follows. * * :: * * normalized_counter = counter * t_enabled / t_running * * Where t_enabled is the time enabled for event and t_running is * the time running for event since last normalization. The * enabled and running times are accumulated since the perf event * open. To achieve scaling factor between two invocations of an * eBPF program, users can can use CPU id as the key (which is * typical for perf array usage model) to remember the previous * value and do the calculation inside the eBPF program. * Return * 0 on success, or a negative error in case of failure. * * int bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size) * Description * For en eBPF program attached to a perf event, retrieve the * value of the event counter associated to *ctx* and store it in * the structure pointed by *buf* and of size *buf_size*. Enabled * and running times are also stored in the structure (see * description of helper **bpf_perf_event_read_value**\ () for * more details). * Return * 0 on success, or a negative error in case of failure. * * int bpf_getsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, char *optval, int optlen) * Description * Emulate a call to **getsockopt()** on the socket associated to * *bpf_socket*, which must be a full socket. The *level* at * which the option resides and the name *optname* of the option * must be specified, see **getsockopt(2)** for more information. * The retrieved value is stored in the structure pointed by * *opval* and of length *optlen*. * * This helper actually implements a subset of **getsockopt()**. * It supports the following *level*\ s: * * * **IPPROTO_TCP**, which supports *optname* * **TCP_CONGESTION**. * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**. * Return * 0 on success, or a negative error in case of failure. * * int bpf_override_return(struct pt_reg *regs, u64 rc) * Description * Used for error injection, this helper uses kprobes to override * the return value of the probed function, and to set it to *rc*. * The first argument is the context *regs* on which the kprobe * works. * * This helper works by setting setting the PC (program counter) * to an override function which is run in place of the original * probed function. This means the probed function is not run at * all. The replacement function just returns with the required * value. * * This helper has security implications, and thus is subject to * restrictions. It is only available if the kernel was compiled * with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration * option, and in this case it only works on functions tagged with * **ALLOW_ERROR_INJECTION** in the kernel code. * * Also, the helper is only available for the architectures having * the CONFIG_FUNCTION_ERROR_INJECTION option. As of this writing, * x86 architecture is the only one to support this feature. * Return * 0 * * int bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval) * Description * Attempt to set the value of the **bpf_sock_ops_cb_flags** field * for the full TCP socket associated to *bpf_sock_ops* to * *argval*. * * The primary use of this field is to determine if there should * be calls to eBPF programs of type * **BPF_PROG_TYPE_SOCK_OPS** at various points in the TCP * code. A program of the same type can change its value, per * connection and as necessary, when the connection is * established. This field is directly accessible for reading, but * this helper must be used for updates in order to return an * error if an eBPF program tries to set a callback that is not * supported in the current kernel. * * The supported callback values that *argval* can combine are: * * * **BPF_SOCK_OPS_RTO_CB_FLAG** (retransmission time out) * * **BPF_SOCK_OPS_RETRANS_CB_FLAG** (retransmission) * * **BPF_SOCK_OPS_STATE_CB_FLAG** (TCP state change) * * Here are some examples of where one could call such eBPF * program: * * * When RTO fires. * * When a packet is retransmitted. * * When the connection terminates. * * When a packet is sent. * * When a packet is received. * Return * Code **-EINVAL** if the socket is not a full TCP socket; * otherwise, a positive number containing the bits that could not * be set is returned (which comes down to 0 if all bits were set * as required). * * int bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags) * Description * This helper is used in programs implementing policies at the * socket level. If the message *msg* is allowed to pass (i.e. if * the verdict eBPF program returns **SK_PASS**), redirect it to * the socket referenced by *map* (of type * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and * egress interfaces can be used for redirection. The * **BPF_F_INGRESS** value in *flags* is used to make the * distinction (ingress path is selected if the flag is present, * egress path otherwise). This is the only flag supported for now. * Return * **SK_PASS** on success, or **SK_DROP** on error. * * int bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes) * Description * For socket policies, apply the verdict of the eBPF program to * the next *bytes* (number of bytes) of message *msg*. * * For example, this helper can be used in the following cases: * * * A single **sendmsg**\ () or **sendfile**\ () system call * contains multiple logical messages that the eBPF program is * supposed to read and for which it should apply a verdict. * * An eBPF program only cares to read the first *bytes* of a * *msg*. If the message has a large payload, then setting up * and calling the eBPF program repeatedly for all bytes, even * though the verdict is already known, would create unnecessary * overhead. * * When called from within an eBPF program, the helper sets a * counter internal to the BPF infrastructure, that is used to * apply the last verdict to the next *bytes*. If *bytes* is * smaller than the current data being processed from a * **sendmsg**\ () or **sendfile**\ () system call, the first * *bytes* will be sent and the eBPF program will be re-run with * the pointer for start of data pointing to byte number *bytes* * **+ 1**. If *bytes* is larger than the current data being * processed, then the eBPF verdict will be applied to multiple * **sendmsg**\ () or **sendfile**\ () calls until *bytes* are * consumed. * * Note that if a socket closes with the internal counter holding * a non-zero value, this is not a problem because data is not * being buffered for *bytes* and is sent as it is received. * Return * 0 * * int bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes) * Description * For socket policies, prevent the execution of the verdict eBPF * program for message *msg* until *bytes* (byte number) have been * accumulated. * * This can be used when one needs a specific number of bytes * before a verdict can be assigned, even if the data spans * multiple **sendmsg**\ () or **sendfile**\ () calls. The extreme * case would be a user calling **sendmsg**\ () repeatedly with * 1-byte long message segments. Obviously, this is bad for * performance, but it is still valid. If the eBPF program needs * *bytes* bytes to validate a header, this helper can be used to * prevent the eBPF program to be called again until *bytes* have * been accumulated. * Return * 0 * * int bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags) * Description * For socket policies, pull in non-linear data from user space * for *msg* and set pointers *msg*\ **->data** and *msg*\ * **->data_end** to *start* and *end* bytes offsets into *msg*, * respectively. * * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a * *msg* it can only parse data that the (**data**, **data_end**) * pointers have already consumed. For **sendmsg**\ () hooks this * is likely the first scatterlist element. But for calls relying * on the **sendpage** handler (e.g. **sendfile**\ ()) this will * be the range (**0**, **0**) because the data is shared with * user space and by default the objective is to avoid allowing * user space to modify data while (or after) eBPF verdict is * being decided. This helper can be used to pull in data and to * set the start and end pointer to given values. Data will be * copied if necessary (i.e. if data was not linear and if start * and end pointers do not point to the same chunk). * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * * All values for *flags* are reserved for future usage, and must * be left at zero. * Return * 0 on success, or a negative error in case of failure. * * int bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len) * Description * Bind the socket associated to *ctx* to the address pointed by * *addr*, of length *addr_len*. This allows for making outgoing * connection from the desired IP address, which can be useful for * example when all processes inside a cgroup should use one * single IP address on a host that has multiple IP configured. * * This helper works for IPv4 and IPv6, TCP and UDP sockets. The * domain (*addr*\ **->sa_family**) must be **AF_INET** (or * **AF_INET6**). Looking for a free port to bind to can be * expensive, therefore binding to port is not permitted by the * helper: *addr*\ **->sin_port** (or **sin6_port**, respectively) * must be set to zero. * Return * 0 on success, or a negative error in case of failure. * * int bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta) * Description * Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is * only possible to shrink the packet as of this writing, * therefore *delta* must be a negative integer. * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * int bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags) * Description * Retrieve the XFRM state (IP transform framework, see also * **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*. * * The retrieved value is stored in the **struct bpf_xfrm_state** * pointed by *xfrm_state* and of length *size*. * * All values for *flags* are reserved for future usage, and must * be left at zero. * * This helper is available only if the kernel was compiled with * **CONFIG_XFRM** configuration option. * Return * 0 on success, or a negative error in case of failure. * * int bpf_get_stack(struct pt_regs *regs, void *buf, u32 size, u64 flags) * Description * Return a user or a kernel stack in bpf program provided buffer. * To achieve this, the helper needs *ctx*, which is a pointer * to the context on which the tracing program is executed. * To store the stacktrace, the bpf program provides *buf* with * a nonnegative *size*. * * The last argument, *flags*, holds the number of stack frames to * skip (from 0 to 255), masked with * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set * the following flags: * * **BPF_F_USER_STACK** * Collect a user space stack instead of a kernel stack. * **BPF_F_USER_BUILD_ID** * Collect buildid+offset instead of ips for user stack, * only valid if **BPF_F_USER_STACK** is also specified. * * **bpf_get_stack**\ () can collect up to * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject * to sufficient large buffer size. Note that * this limit can be controlled with the **sysctl** program, and * that it should be manually increased in order to profile long * user stacks (such as stacks for Java programs). To do so, use: * * :: * * # sysctl kernel.perf_event_max_stack= * Return * A non-negative value equal to or less than *size* on success, * or a negative error in case of failure. * * int bpf_skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header) * Description * This helper is similar to **bpf_skb_load_bytes**\ () in that * it provides an easy way to load *len* bytes from *offset* * from the packet associated to *skb*, into the buffer pointed * by *to*. The difference to **bpf_skb_load_bytes**\ () is that * a fifth argument *start_header* exists in order to select a * base offset to start from. *start_header* can be one of: * * **BPF_HDR_START_MAC** * Base offset to load data from is *skb*'s mac header. * **BPF_HDR_START_NET** * Base offset to load data from is *skb*'s network header. * * In general, "direct packet access" is the preferred method to * access packet data, however, this helper is in particular useful * in socket filters where *skb*\ **->data** does not always point * to the start of the mac header and where "direct packet access" * is not available. * Return * 0 on success, or a negative error in case of failure. * * int bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags) * Description * Do FIB lookup in kernel tables using parameters in *params*. * If lookup is successful and result shows packet is to be * forwarded, the neighbor tables are searched for the nexthop. * If successful (ie., FIB lookup shows forwarding and nexthop * is resolved), the nexthop address is returned in ipv4_dst * or ipv6_dst based on family, smac is set to mac address of * egress device, dmac is set to nexthop mac address, rt_metric * is set to metric from route (IPv4/IPv6 only), and ifindex * is set to the device index of the nexthop from the FIB lookup. * * *plen* argument is the size of the passed in struct. * *flags* argument can be a combination of one or more of the * following values: * * **BPF_FIB_LOOKUP_DIRECT** * Do a direct table lookup vs full lookup using FIB * rules. * **BPF_FIB_LOOKUP_OUTPUT** * Perform lookup from an egress perspective (default is * ingress). * * *ctx* is either **struct xdp_md** for XDP programs or * **struct sk_buff** tc cls_act programs. * Return * * < 0 if any input argument is invalid * * 0 on success (packet is forwarded, nexthop neighbor exists) * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the * packet is not forwarded or needs assist from full stack * * int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags) * Description * Add an entry to, or update a sockhash *map* referencing sockets. * The *skops* is used as a new value for the entry associated to * *key*. *flags* is one of: * * **BPF_NOEXIST** * The entry for *key* must not exist in the map. * **BPF_EXIST** * The entry for *key* must already exist in the map. * **BPF_ANY** * No condition on the existence of the entry for *key*. * * If the *map* has eBPF programs (parser and verdict), those will * be inherited by the socket being added. If the socket is * already attached to eBPF programs, this results in an error. * Return * 0 on success, or a negative error in case of failure. * * int bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags) * Description * This helper is used in programs implementing policies at the * socket level. If the message *msg* is allowed to pass (i.e. if * the verdict eBPF program returns **SK_PASS**), redirect it to * the socket referenced by *map* (of type * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and * egress interfaces can be used for redirection. The * **BPF_F_INGRESS** value in *flags* is used to make the * distinction (ingress path is selected if the flag is present, * egress path otherwise). This is the only flag supported for now. * Return * **SK_PASS** on success, or **SK_DROP** on error. * * int bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags) * Description * This helper is used in programs implementing policies at the * skb socket level. If the sk_buff *skb* is allowed to pass (i.e. * if the verdeict eBPF program returns **SK_PASS**), redirect it * to the socket referenced by *map* (of type * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and * egress interfaces can be used for redirection. The * **BPF_F_INGRESS** value in *flags* is used to make the * distinction (ingress path is selected if the flag is present, * egress otherwise). This is the only flag supported for now. * Return * **SK_PASS** on success, or **SK_DROP** on error. * * int bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len) * Description * Encapsulate the packet associated to *skb* within a Layer 3 * protocol header. This header is provided in the buffer at * address *hdr*, with *len* its size in bytes. *type* indicates * the protocol of the header and can be one of: * * **BPF_LWT_ENCAP_SEG6** * IPv6 encapsulation with Segment Routing Header * (**struct ipv6_sr_hdr**). *hdr* only contains the SRH, * the IPv6 header is computed by the kernel. * **BPF_LWT_ENCAP_SEG6_INLINE** * Only works if *skb* contains an IPv6 packet. Insert a * Segment Routing Header (**struct ipv6_sr_hdr**) inside * the IPv6 header. * **BPF_LWT_ENCAP_IP** * IP encapsulation (GRE/GUE/IPIP/etc). The outer header * must be IPv4 or IPv6, followed by zero or more * additional headers, up to LWT_BPF_MAX_HEADROOM total * bytes in all prepended headers. Please note that * if skb_is_gso(skb) is true, no more than two headers * can be prepended, and the inner header, if present, * should be either GRE or UDP/GUE. * * BPF_LWT_ENCAP_SEG6*** types can be called by bpf programs of * type BPF_PROG_TYPE_LWT_IN; BPF_LWT_ENCAP_IP type can be called * by bpf programs of types BPF_PROG_TYPE_LWT_IN and * BPF_PROG_TYPE_LWT_XMIT. * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * int bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len) * Description * Store *len* bytes from address *from* into the packet * associated to *skb*, at *offset*. Only the flags, tag and TLVs * inside the outermost IPv6 Segment Routing Header can be * modified through this helper. * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * int bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta) * Description * Adjust the size allocated to TLVs in the outermost IPv6 * Segment Routing Header contained in the packet associated to * *skb*, at position *offset* by *delta* bytes. Only offsets * after the segments are accepted. *delta* can be as well * positive (growing) as negative (shrinking). * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * int bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len) * Description * Apply an IPv6 Segment Routing action of type *action* to the * packet associated to *skb*. Each action takes a parameter * contained at address *param*, and of length *param_len* bytes. * *action* can be one of: * * **SEG6_LOCAL_ACTION_END_X** * End.X action: Endpoint with Layer-3 cross-connect. * Type of *param*: **struct in6_addr**. * **SEG6_LOCAL_ACTION_END_T** * End.T action: Endpoint with specific IPv6 table lookup. * Type of *param*: **int**. * **SEG6_LOCAL_ACTION_END_B6** * End.B6 action: Endpoint bound to an SRv6 policy. * Type of param: **struct ipv6_sr_hdr**. * **SEG6_LOCAL_ACTION_END_B6_ENCAP** * End.B6.Encap action: Endpoint bound to an SRv6 * encapsulation policy. * Type of param: **struct ipv6_sr_hdr**. * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * int bpf_rc_repeat(void *ctx) * Description * This helper is used in programs implementing IR decoding, to * report a successfully decoded repeat key message. This delays * the generation of a key up event for previously generated * key down event. * * Some IR protocols like NEC have a special IR message for * repeating last button, for when a button is held down. * * The *ctx* should point to the lirc sample as passed into * the program. * * This helper is only available is the kernel was compiled with * the **CONFIG_BPF_LIRC_MODE2** configuration option set to * "**y**". * Return * 0 * * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle) * Description * This helper is used in programs implementing IR decoding, to * report a successfully decoded key press with *scancode*, * *toggle* value in the given *protocol*. The scancode will be * translated to a keycode using the rc keymap, and reported as * an input key down event. After a period a key up event is * generated. This period can be extended by calling either * **bpf_rc_keydown**\ () again with the same values, or calling * **bpf_rc_repeat**\ (). * * Some protocols include a toggle bit, in case the button was * released and pressed again between consecutive scancodes. * * The *ctx* should point to the lirc sample as passed into * the program. * * The *protocol* is the decoded protocol number (see * **enum rc_proto** for some predefined values). * * This helper is only available is the kernel was compiled with * the **CONFIG_BPF_LIRC_MODE2** configuration option set to * "**y**". * Return * 0 * * u64 bpf_skb_cgroup_id(struct sk_buff *skb) * Description * Return the cgroup v2 id of the socket associated with the *skb*. * This is roughly similar to the **bpf_get_cgroup_classid**\ () * helper for cgroup v1 by providing a tag resp. identifier that * can be matched on or used for map lookups e.g. to implement * policy. The cgroup v2 id of a given path in the hierarchy is * exposed in user space through the f_handle API in order to get * to the same 64-bit id. * * This helper can be used on TC egress path, but not on ingress, * and is available only if the kernel was compiled with the * **CONFIG_SOCK_CGROUP_DATA** configuration option. * Return * The id is returned or 0 in case the id could not be retrieved. * * u64 bpf_get_current_cgroup_id(void) * Return * A 64-bit integer containing the current cgroup id based * on the cgroup within which the current task is running. * * void *bpf_get_local_storage(void *map, u64 flags) * Description * Get the pointer to the local storage area. * The type and the size of the local storage is defined * by the *map* argument. * The *flags* meaning is specific for each map type, * and has to be 0 for cgroup local storage. * * Depending on the BPF program type, a local storage area * can be shared between multiple instances of the BPF program, * running simultaneously. * * A user should care about the synchronization by himself. * For example, by using the **BPF_STX_XADD** instruction to alter * the shared data. * Return * A pointer to the local storage area. * * int bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags) * Description * Select a **SO_REUSEPORT** socket from a * **BPF_MAP_TYPE_REUSEPORT_ARRAY** *map*. * It checks the selected socket is matching the incoming * request in the socket buffer. * Return * 0 on success, or a negative error in case of failure. * * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level) * Description * Return id of cgroup v2 that is ancestor of cgroup associated * with the *skb* at the *ancestor_level*. The root cgroup is at * *ancestor_level* zero and each step down the hierarchy * increments the level. If *ancestor_level* == level of cgroup * associated with *skb*, then return value will be same as that * of **bpf_skb_cgroup_id**\ (). * * The helper is useful to implement policies based on cgroups * that are upper in hierarchy than immediate cgroup associated * with *skb*. * * The format of returned id and helper limitations are same as in * **bpf_skb_cgroup_id**\ (). * Return * The id is returned or 0 in case the id could not be retrieved. * * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) * Description * Look for TCP socket matching *tuple*, optionally in a child * network namespace *netns*. The return value must be checked, * and if non-**NULL**, released via **bpf_sk_release**\ (). * * The *ctx* should point to the context of the program, such as * the skb or socket (depending on the hook in use). This is used * to determine the base network namespace for the lookup. * * *tuple_size* must be one of: * * **sizeof**\ (*tuple*\ **->ipv4**) * Look for an IPv4 socket. * **sizeof**\ (*tuple*\ **->ipv6**) * Look for an IPv6 socket. * * If the *netns* is a negative signed 32-bit integer, then the * socket lookup table in the netns associated with the *ctx* will * will be used. For the TC hooks, this is the netns of the device * in the skb. For socket hooks, this is the netns of the socket. * If *netns* is any other signed 32-bit value greater than or * equal to zero then it specifies the ID of the netns relative to * the netns associated with the *ctx*. *netns* values beyond the * range of 32-bit integers are reserved for future use. * * All values for *flags* are reserved for future usage, and must * be left at zero. * * This helper is available only if the kernel was compiled with * **CONFIG_NET** configuration option. * Return * Pointer to **struct bpf_sock**, or **NULL** in case of failure. * For sockets with reuseport option, the **struct bpf_sock** * result is from **reuse->socks**\ [] using the hash of the tuple. * * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) * Description * Look for UDP socket matching *tuple*, optionally in a child * network namespace *netns*. The return value must be checked, * and if non-**NULL**, released via **bpf_sk_release**\ (). * * The *ctx* should point to the context of the program, such as * the skb or socket (depending on the hook in use). This is used * to determine the base network namespace for the lookup. * * *tuple_size* must be one of: * * **sizeof**\ (*tuple*\ **->ipv4**) * Look for an IPv4 socket. * **sizeof**\ (*tuple*\ **->ipv6**) * Look for an IPv6 socket. * * If the *netns* is a negative signed 32-bit integer, then the * socket lookup table in the netns associated with the *ctx* will * will be used. For the TC hooks, this is the netns of the device * in the skb. For socket hooks, this is the netns of the socket. * If *netns* is any other signed 32-bit value greater than or * equal to zero then it specifies the ID of the netns relative to * the netns associated with the *ctx*. *netns* values beyond the * range of 32-bit integers are reserved for future use. * * All values for *flags* are reserved for future usage, and must * be left at zero. * * This helper is available only if the kernel was compiled with * **CONFIG_NET** configuration option. * Return * Pointer to **struct bpf_sock**, or **NULL** in case of failure. * For sockets with reuseport option, the **struct bpf_sock** * result is from **reuse->socks**\ [] using the hash of the tuple. * * int bpf_sk_release(struct bpf_sock *sock) * Description * Release the reference held by *sock*. *sock* must be a * non-**NULL** pointer that was returned from * **bpf_sk_lookup_xxx**\ (). * Return * 0 on success, or a negative error in case of failure. * * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags) * Description * Push an element *value* in *map*. *flags* is one of: * * **BPF_EXIST** * If the queue/stack is full, the oldest element is * removed to make room for this. * Return * 0 on success, or a negative error in case of failure. * * int bpf_map_pop_elem(struct bpf_map *map, void *value) * Description * Pop an element from *map*. * Return * 0 on success, or a negative error in case of failure. * * int bpf_map_peek_elem(struct bpf_map *map, void *value) * Description * Get an element from *map* without removing it. * Return * 0 on success, or a negative error in case of failure. * * int bpf_msg_push_data(struct sk_buff *skb, u32 start, u32 len, u64 flags) * Description * For socket policies, insert *len* bytes into *msg* at offset * *start*. * * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a * *msg* it may want to insert metadata or options into the *msg*. * This can later be read and used by any of the lower layer BPF * hooks. * * This helper may fail if under memory pressure (a malloc * fails) in these cases BPF programs will get an appropriate * error and BPF programs will need to handle them. * Return * 0 on success, or a negative error in case of failure. * * int bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 pop, u64 flags) * Description * Will remove *pop* bytes from a *msg* starting at byte *start*. * This may result in **ENOMEM** errors under certain situations if * an allocation and copy are required due to a full ring buffer. * However, the helper will try to avoid doing the allocation * if possible. Other errors can occur if input parameters are * invalid either due to *start* byte not being valid part of *msg* * payload and/or *pop* value being to large. * Return * 0 on success, or a negative error in case of failure. * * int bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y) * Description * This helper is used in programs implementing IR decoding, to * report a successfully decoded pointer movement. * * The *ctx* should point to the lirc sample as passed into * the program. * * This helper is only available is the kernel was compiled with * the **CONFIG_BPF_LIRC_MODE2** configuration option set to * "**y**". * Return * 0 * * int bpf_spin_lock(struct bpf_spin_lock *lock) * Description * Acquire a spinlock represented by the pointer *lock*, which is * stored as part of a value of a map. Taking the lock allows to * safely update the rest of the fields in that value. The * spinlock can (and must) later be released with a call to * **bpf_spin_unlock**\ (\ *lock*\ ). * * Spinlocks in BPF programs come with a number of restrictions * and constraints: * * * **bpf_spin_lock** objects are only allowed inside maps of * types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this * list could be extended in the future). * * BTF description of the map is mandatory. * * The BPF program can take ONE lock at a time, since taking two * or more could cause dead locks. * * Only one **struct bpf_spin_lock** is allowed per map element. * * When the lock is taken, calls (either BPF to BPF or helpers) * are not allowed. * * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not * allowed inside a spinlock-ed region. * * The BPF program MUST call **bpf_spin_unlock**\ () to release * the lock, on all execution paths, before it returns. * * The BPF program can access **struct bpf_spin_lock** only via * the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ () * helpers. Loading or storing data into the **struct * bpf_spin_lock** *lock*\ **;** field of a map is not allowed. * * To use the **bpf_spin_lock**\ () helper, the BTF description * of the map value must be a struct and have **struct * bpf_spin_lock** *anyname*\ **;** field at the top level. * Nested lock inside another struct is not allowed. * * The **struct bpf_spin_lock** *lock* field in a map value must * be aligned on a multiple of 4 bytes in that value. * * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy * the **bpf_spin_lock** field to user space. * * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from * a BPF program, do not update the **bpf_spin_lock** field. * * **bpf_spin_lock** cannot be on the stack or inside a * networking packet (it can only be inside of a map values). * * **bpf_spin_lock** is available to root only. * * Tracing programs and socket filter programs cannot use * **bpf_spin_lock**\ () due to insufficient preemption checks * (but this may change in the future). * * **bpf_spin_lock** is not allowed in inner maps of map-in-map. * Return * 0 * * int bpf_spin_unlock(struct bpf_spin_lock *lock) * Description * Release the *lock* previously locked by a call to * **bpf_spin_lock**\ (\ *lock*\ ). * Return * 0 * * struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk) * Description * This helper gets a **struct bpf_sock** pointer such * that all the fields in this **bpf_sock** can be accessed. * Return * A **struct bpf_sock** pointer on success, or **NULL** in * case of failure. * * struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk) * Description * This helper gets a **struct bpf_tcp_sock** pointer from a * **struct bpf_sock** pointer. * Return * A **struct bpf_tcp_sock** pointer on success, or **NULL** in * case of failure. * * int bpf_skb_ecn_set_ce(struct sk_buf *skb) * Description * Set ECN (Explicit Congestion Notification) field of IP header * to **CE** (Congestion Encountered) if current value is **ECT** * (ECN Capable Transport). Otherwise, do nothing. Works with IPv6 * and IPv4. * Return * 1 if the **CE** flag is set (either by the current helper call * or because it was already present), 0 if it is not set. * * struct bpf_sock *bpf_get_listener_sock(struct bpf_sock *sk) * Description * Return a **struct bpf_sock** pointer in **TCP_LISTEN** state. * **bpf_sk_release**\ () is unnecessary and not allowed. * Return * A **struct bpf_sock** pointer on success, or **NULL** in * case of failure. * * struct bpf_sock *bpf_skc_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) * Description * Look for TCP socket matching *tuple*, optionally in a child * network namespace *netns*. The return value must be checked, * and if non-**NULL**, released via **bpf_sk_release**\ (). * * This function is identical to bpf_sk_lookup_tcp, except that it * also returns timewait or request sockets. Use bpf_sk_fullsock * or bpf_tcp_socket to access the full structure. * * This helper is available only if the kernel was compiled with * **CONFIG_NET** configuration option. * Return * Pointer to **struct bpf_sock**, or **NULL** in case of failure. * For sockets with reuseport option, the **struct bpf_sock** * result is from **reuse->socks**\ [] using the hash of the tuple. * * int bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) * Description * Check whether iph and th contain a valid SYN cookie ACK for * the listening socket in sk. * * iph points to the start of the IPv4 or IPv6 header, while * iph_len contains sizeof(struct iphdr) or sizeof(struct ip6hdr). * * th points to the start of the TCP header, while th_len contains * sizeof(struct tcphdr). * * Return * 0 if iph and th are a valid SYN cookie ACK, or a negative error * otherwise. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ FN(map_lookup_elem), \ FN(map_update_elem), \ FN(map_delete_elem), \ FN(probe_read), \ FN(ktime_get_ns), \ FN(trace_printk), \ FN(get_prandom_u32), \ FN(get_smp_processor_id), \ FN(skb_store_bytes), \ FN(l3_csum_replace), \ FN(l4_csum_replace), \ FN(tail_call), \ FN(clone_redirect), \ FN(get_current_pid_tgid), \ FN(get_current_uid_gid), \ FN(get_current_comm), \ FN(get_cgroup_classid), \ FN(skb_vlan_push), \ FN(skb_vlan_pop), \ FN(skb_get_tunnel_key), \ FN(skb_set_tunnel_key), \ FN(perf_event_read), \ FN(redirect), \ FN(get_route_realm), \ FN(perf_event_output), \ FN(skb_load_bytes), \ FN(get_stackid), \ FN(csum_diff), \ FN(skb_get_tunnel_opt), \ FN(skb_set_tunnel_opt), \ FN(skb_change_proto), \ FN(skb_change_type), \ FN(skb_under_cgroup), \ FN(get_hash_recalc), \ FN(get_current_task), \ FN(probe_write_user), \ FN(current_task_under_cgroup), \ FN(skb_change_tail), \ FN(skb_pull_data), \ FN(csum_update), \ FN(set_hash_invalid), \ FN(get_numa_node_id), \ FN(skb_change_head), \ FN(xdp_adjust_head), \ FN(probe_read_str), \ FN(get_socket_cookie), \ FN(get_socket_uid), \ FN(set_hash), \ FN(setsockopt), \ FN(skb_adjust_room), \ FN(redirect_map), \ FN(sk_redirect_map), \ FN(sock_map_update), \ FN(xdp_adjust_meta), \ FN(perf_event_read_value), \ FN(perf_prog_read_value), \ FN(getsockopt), \ FN(override_return), \ FN(sock_ops_cb_flags_set), \ FN(msg_redirect_map), \ FN(msg_apply_bytes), \ FN(msg_cork_bytes), \ FN(msg_pull_data), \ FN(bind), \ FN(xdp_adjust_tail), \ FN(skb_get_xfrm_state), \ FN(get_stack), \ FN(skb_load_bytes_relative), \ FN(fib_lookup), \ FN(sock_hash_update), \ FN(msg_redirect_hash), \ FN(sk_redirect_hash), \ FN(lwt_push_encap), \ FN(lwt_seg6_store_bytes), \ FN(lwt_seg6_adjust_srh), \ FN(lwt_seg6_action), \ FN(rc_repeat), \ FN(rc_keydown), \ FN(skb_cgroup_id), \ FN(get_current_cgroup_id), \ FN(get_local_storage), \ FN(sk_select_reuseport), \ FN(skb_ancestor_cgroup_id), \ FN(sk_lookup_tcp), \ FN(sk_lookup_udp), \ FN(sk_release), \ FN(map_push_elem), \ FN(map_pop_elem), \ FN(map_peek_elem), \ FN(msg_push_data), \ FN(msg_pop_data), \ FN(rc_pointer_rel), \ FN(spin_lock), \ FN(spin_unlock), \ FN(sk_fullsock), \ FN(tcp_sock), \ FN(skb_ecn_set_ce), \ FN(get_listener_sock), \ FN(skc_lookup_tcp), \ FN(tcp_check_syncookie), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call */ #define __BPF_ENUM_FN(x) BPF_FUNC_ ## x enum bpf_func_id { __BPF_FUNC_MAPPER(__BPF_ENUM_FN) __BPF_FUNC_MAX_ID, }; #undef __BPF_ENUM_FN /* All flags used by eBPF helper functions, placed here. */ /* BPF_FUNC_skb_store_bytes flags. */ #define BPF_F_RECOMPUTE_CSUM (1ULL << 0) #define BPF_F_INVALIDATE_HASH (1ULL << 1) /* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags. * First 4 bits are for passing the header field size. */ #define BPF_F_HDR_FIELD_MASK 0xfULL /* BPF_FUNC_l4_csum_replace flags. */ #define BPF_F_PSEUDO_HDR (1ULL << 4) #define BPF_F_MARK_MANGLED_0 (1ULL << 5) #define BPF_F_MARK_ENFORCE (1ULL << 6) /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */ #define BPF_F_INGRESS (1ULL << 0) /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ #define BPF_F_TUNINFO_IPV6 (1ULL << 0) /* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */ #define BPF_F_SKIP_FIELD_MASK 0xffULL #define BPF_F_USER_STACK (1ULL << 8) /* flags used by BPF_FUNC_get_stackid only. */ #define BPF_F_FAST_STACK_CMP (1ULL << 9) #define BPF_F_REUSE_STACKID (1ULL << 10) /* flags used by BPF_FUNC_get_stack only. */ #define BPF_F_USER_BUILD_ID (1ULL << 11) /* BPF_FUNC_skb_set_tunnel_key flags. */ #define BPF_F_ZERO_CSUM_TX (1ULL << 1) #define BPF_F_DONT_FRAGMENT (1ULL << 2) #define BPF_F_SEQ_NUMBER (1ULL << 3) /* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and * BPF_FUNC_perf_event_read_value flags. */ #define BPF_F_INDEX_MASK 0xffffffffULL #define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK /* BPF_FUNC_perf_event_output for sk_buff input context. */ #define BPF_F_CTXLEN_MASK (0xfffffULL << 32) /* Current network namespace */ #define BPF_F_CURRENT_NETNS (-1L) /* BPF_FUNC_skb_adjust_room flags. */ #define BPF_F_ADJ_ROOM_FIXED_GSO (1ULL << 0) #define BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 (1ULL << 1) #define BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 (1ULL << 2) #define BPF_F_ADJ_ROOM_ENCAP_L4_GRE (1ULL << 3) #define BPF_F_ADJ_ROOM_ENCAP_L4_UDP (1ULL << 4) /* Mode for BPF_FUNC_skb_adjust_room helper. */ enum bpf_adj_room_mode { BPF_ADJ_ROOM_NET, BPF_ADJ_ROOM_MAC, }; /* Mode for BPF_FUNC_skb_load_bytes_relative helper. */ enum bpf_hdr_start_off { BPF_HDR_START_MAC, BPF_HDR_START_NET, }; /* Encapsulation type for BPF_FUNC_lwt_push_encap helper. */ enum bpf_lwt_encap_mode { BPF_LWT_ENCAP_SEG6, BPF_LWT_ENCAP_SEG6_INLINE, BPF_LWT_ENCAP_IP, }; #define __bpf_md_ptr(type, name) \ union { \ type name; \ __u64 :64; \ } __attribute__((aligned(8))) /* user accessible mirror of in-kernel sk_buff. * new fields can only be added to the end of this structure */ struct __sk_buff { __u32 len; __u32 pkt_type; __u32 mark; __u32 queue_mapping; __u32 protocol; __u32 vlan_present; __u32 vlan_tci; __u32 vlan_proto; __u32 priority; __u32 ingress_ifindex; __u32 ifindex; __u32 tc_index; __u32 cb[5]; __u32 hash; __u32 tc_classid; __u32 data; __u32 data_end; __u32 napi_id; /* Accessed by BPF_PROG_TYPE_sk_skb types from here to ... */ __u32 family; __u32 remote_ip4; /* Stored in network byte order */ __u32 local_ip4; /* Stored in network byte order */ __u32 remote_ip6[4]; /* Stored in network byte order */ __u32 local_ip6[4]; /* Stored in network byte order */ __u32 remote_port; /* Stored in network byte order */ __u32 local_port; /* stored in host byte order */ /* ... here. */ __u32 data_meta; __bpf_md_ptr(struct bpf_flow_keys *, flow_keys); __u64 tstamp; __u32 wire_len; __u32 gso_segs; __bpf_md_ptr(struct bpf_sock *, sk); }; struct bpf_tunnel_key { __u32 tunnel_id; union { __u32 remote_ipv4; __u32 remote_ipv6[4]; }; __u8 tunnel_tos; __u8 tunnel_ttl; __u16 tunnel_ext; /* Padding, future use. */ __u32 tunnel_label; }; /* user accessible mirror of in-kernel xfrm_state. * new fields can only be added to the end of this structure */ struct bpf_xfrm_state { __u32 reqid; __u32 spi; /* Stored in network byte order */ __u16 family; __u16 ext; /* Padding, future use. */ union { __u32 remote_ipv4; /* Stored in network byte order */ __u32 remote_ipv6[4]; /* Stored in network byte order */ }; }; /* Generic BPF return codes which all BPF program types may support. * The values are binary compatible with their TC_ACT_* counter-part to * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT * programs. * * XDP is handled seprately, see XDP_*. */ enum bpf_ret_code { BPF_OK = 0, /* 1 reserved */ BPF_DROP = 2, /* 3-6 reserved */ BPF_REDIRECT = 7, /* >127 are reserved for prog type specific return codes. * * BPF_LWT_REROUTE: used by BPF_PROG_TYPE_LWT_IN and * BPF_PROG_TYPE_LWT_XMIT to indicate that skb had been * changed and should be routed based on its new L3 header. * (This is an L3 redirect, as opposed to L2 redirect * represented by BPF_REDIRECT above). */ BPF_LWT_REROUTE = 128, }; struct bpf_sock { __u32 bound_dev_if; __u32 family; __u32 type; __u32 protocol; __u32 mark; __u32 priority; /* IP address also allows 1 and 2 bytes access */ __u32 src_ip4; __u32 src_ip6[4]; __u32 src_port; /* host byte order */ __u32 dst_port; /* network byte order */ __u32 dst_ip4; __u32 dst_ip6[4]; __u32 state; }; struct bpf_tcp_sock { __u32 snd_cwnd; /* Sending congestion window */ __u32 srtt_us; /* smoothed round trip time << 3 in usecs */ __u32 rtt_min; __u32 snd_ssthresh; /* Slow start size threshold */ __u32 rcv_nxt; /* What we want to receive next */ __u32 snd_nxt; /* Next sequence we send */ __u32 snd_una; /* First byte we want an ack for */ __u32 mss_cache; /* Cached effective mss, not including SACKS */ __u32 ecn_flags; /* ECN status bits. */ __u32 rate_delivered; /* saved rate sample: packets delivered */ __u32 rate_interval_us; /* saved rate sample: time elapsed */ __u32 packets_out; /* Packets which are "in flight" */ __u32 retrans_out; /* Retransmitted packets out */ __u32 total_retrans; /* Total retransmits for entire connection */ __u32 segs_in; /* RFC4898 tcpEStatsPerfSegsIn * total number of segments in. */ __u32 data_segs_in; /* RFC4898 tcpEStatsPerfDataSegsIn * total number of data segments in. */ __u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut * The total number of segments sent. */ __u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut * total number of data segments sent. */ __u32 lost_out; /* Lost packets */ __u32 sacked_out; /* SACK'd packets */ __u64 bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived * sum(delta(rcv_nxt)), or how many bytes * were acked. */ __u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked * sum(delta(snd_una)), or how many bytes * were acked. */ }; struct bpf_sock_tuple { union { struct { __be32 saddr; __be32 daddr; __be16 sport; __be16 dport; } ipv4; struct { __be32 saddr[4]; __be32 daddr[4]; __be16 sport; __be16 dport; } ipv6; }; }; #define XDP_PACKET_HEADROOM 256 /* User return codes for XDP prog type. * A valid XDP program must return one of these defined values. All other * return codes are reserved for future use. Unknown return codes will * result in packet drops and a warning via bpf_warn_invalid_xdp_action(). */ enum xdp_action { XDP_ABORTED = 0, XDP_DROP, XDP_PASS, XDP_TX, XDP_REDIRECT, }; /* user accessible metadata for XDP packet hook * new fields must be added to the end of this structure */ struct xdp_md { __u32 data; __u32 data_end; __u32 data_meta; /* Below access go through struct xdp_rxq_info */ __u32 ingress_ifindex; /* rxq->dev->ifindex */ __u32 rx_queue_index; /* rxq->queue_index */ }; enum sk_action { SK_DROP = 0, SK_PASS, }; /* user accessible metadata for SK_MSG packet hook, new fields must * be added to the end of this structure */ struct sk_msg_md { __bpf_md_ptr(void *, data); __bpf_md_ptr(void *, data_end); __u32 family; __u32 remote_ip4; /* Stored in network byte order */ __u32 local_ip4; /* Stored in network byte order */ __u32 remote_ip6[4]; /* Stored in network byte order */ __u32 local_ip6[4]; /* Stored in network byte order */ __u32 remote_port; /* Stored in network byte order */ __u32 local_port; /* stored in host byte order */ __u32 size; /* Total size of sk_msg */ }; struct sk_reuseport_md { /* * Start of directly accessible data. It begins from * the tcp/udp header. */ __bpf_md_ptr(void *, data); /* End of directly accessible data */ __bpf_md_ptr(void *, data_end); /* * Total length of packet (starting from the tcp/udp header). * Note that the directly accessible bytes (data_end - data) * could be less than this "len". Those bytes could be * indirectly read by a helper "bpf_skb_load_bytes()". */ __u32 len; /* * Eth protocol in the mac header (network byte order). e.g. * ETH_P_IP(0x0800) and ETH_P_IPV6(0x86DD) */ __u32 eth_protocol; __u32 ip_protocol; /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */ __u32 bind_inany; /* Is sock bound to an INANY address? */ __u32 hash; /* A hash of the packet 4 tuples */ }; #define BPF_TAG_SIZE 8 struct bpf_prog_info { __u32 type; __u32 id; __u8 tag[BPF_TAG_SIZE]; __u32 jited_prog_len; __u32 xlated_prog_len; __aligned_u64 jited_prog_insns; __aligned_u64 xlated_prog_insns; __u64 load_time; /* ns since boottime */ __u32 created_by_uid; __u32 nr_map_ids; __aligned_u64 map_ids; char name[BPF_OBJ_NAME_LEN]; __u32 ifindex; __u32 gpl_compatible:1; __u64 netns_dev; __u64 netns_ino; __u32 nr_jited_ksyms; __u32 nr_jited_func_lens; __aligned_u64 jited_ksyms; __aligned_u64 jited_func_lens; __u32 btf_id; __u32 func_info_rec_size; __aligned_u64 func_info; __u32 nr_func_info; __u32 nr_line_info; __aligned_u64 line_info; __aligned_u64 jited_line_info; __u32 nr_jited_line_info; __u32 line_info_rec_size; __u32 jited_line_info_rec_size; __u32 nr_prog_tags; __aligned_u64 prog_tags; __u64 run_time_ns; __u64 run_cnt; } __attribute__((aligned(8))); struct bpf_map_info { __u32 type; __u32 id; __u32 key_size; __u32 value_size; __u32 max_entries; __u32 map_flags; char name[BPF_OBJ_NAME_LEN]; __u32 ifindex; __u32 :32; __u64 netns_dev; __u64 netns_ino; __u32 btf_id; __u32 btf_key_type_id; __u32 btf_value_type_id; } __attribute__((aligned(8))); struct bpf_btf_info { __aligned_u64 btf; __u32 btf_size; __u32 id; } __attribute__((aligned(8))); /* User bpf_sock_addr struct to access socket fields and sockaddr struct passed * by user and intended to be used by socket (e.g. to bind to, depends on * attach attach type). */ struct bpf_sock_addr { __u32 user_family; /* Allows 4-byte read, but no write. */ __u32 user_ip4; /* Allows 1,2,4-byte read and 4-byte write. * Stored in network byte order. */ __u32 user_ip6[4]; /* Allows 1,2,4-byte read an 4-byte write. * Stored in network byte order. */ __u32 user_port; /* Allows 4-byte read and write. * Stored in network byte order */ __u32 family; /* Allows 4-byte read, but no write */ __u32 type; /* Allows 4-byte read, but no write */ __u32 protocol; /* Allows 4-byte read, but no write */ __u32 msg_src_ip4; /* Allows 1,2,4-byte read an 4-byte write. * Stored in network byte order. */ __u32 msg_src_ip6[4]; /* Allows 1,2,4-byte read an 4-byte write. * Stored in network byte order. */ }; /* User bpf_sock_ops struct to access socket values and specify request ops * and their replies. * Some of this fields are in network (bigendian) byte order and may need * to be converted before use (bpf_ntohl() defined in samples/bpf/bpf_endian.h). * New fields can only be added at the end of this structure */ struct bpf_sock_ops { __u32 op; union { __u32 args[4]; /* Optionally passed to bpf program */ __u32 reply; /* Returned by bpf program */ __u32 replylong[4]; /* Optionally returned by bpf prog */ }; __u32 family; __u32 remote_ip4; /* Stored in network byte order */ __u32 local_ip4; /* Stored in network byte order */ __u32 remote_ip6[4]; /* Stored in network byte order */ __u32 local_ip6[4]; /* Stored in network byte order */ __u32 remote_port; /* Stored in network byte order */ __u32 local_port; /* stored in host byte order */ __u32 is_fullsock; /* Some TCP fields are only valid if * there is a full socket. If not, the * fields read as zero. */ __u32 snd_cwnd; __u32 srtt_us; /* Averaged RTT << 3 in usecs */ __u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */ __u32 state; __u32 rtt_min; __u32 snd_ssthresh; __u32 rcv_nxt; __u32 snd_nxt; __u32 snd_una; __u32 mss_cache; __u32 ecn_flags; __u32 rate_delivered; __u32 rate_interval_us; __u32 packets_out; __u32 retrans_out; __u32 total_retrans; __u32 segs_in; __u32 data_segs_in; __u32 segs_out; __u32 data_segs_out; __u32 lost_out; __u32 sacked_out; __u32 sk_txhash; __u64 bytes_received; __u64 bytes_acked; }; /* Definitions for bpf_sock_ops_cb_flags */ #define BPF_SOCK_OPS_RTO_CB_FLAG (1<<0) #define BPF_SOCK_OPS_RETRANS_CB_FLAG (1<<1) #define BPF_SOCK_OPS_STATE_CB_FLAG (1<<2) #define BPF_SOCK_OPS_ALL_CB_FLAGS 0x7 /* Mask of all currently * supported cb flags */ /* List of known BPF sock_ops operators. * New entries can only be added at the end */ enum { BPF_SOCK_OPS_VOID, BPF_SOCK_OPS_TIMEOUT_INIT, /* Should return SYN-RTO value to use or * -1 if default value should be used */ BPF_SOCK_OPS_RWND_INIT, /* Should return initial advertized * window (in packets) or -1 if default * value should be used */ BPF_SOCK_OPS_TCP_CONNECT_CB, /* Calls BPF program right before an * active connection is initialized */ BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB, /* Calls BPF program when an * active connection is * established */ BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, /* Calls BPF program when a * passive connection is * established */ BPF_SOCK_OPS_NEEDS_ECN, /* If connection's congestion control * needs ECN */ BPF_SOCK_OPS_BASE_RTT, /* Get base RTT. The correct value is * based on the path and may be * dependent on the congestion control * algorithm. In general it indicates * a congestion threshold. RTTs above * this indicate congestion */ BPF_SOCK_OPS_RTO_CB, /* Called when an RTO has triggered. * Arg1: value of icsk_retransmits * Arg2: value of icsk_rto * Arg3: whether RTO has expired */ BPF_SOCK_OPS_RETRANS_CB, /* Called when skb is retransmitted. * Arg1: sequence number of 1st byte * Arg2: # segments * Arg3: return value of * tcp_transmit_skb (0 => success) */ BPF_SOCK_OPS_STATE_CB, /* Called when TCP changes state. * Arg1: old_state * Arg2: new_state */ BPF_SOCK_OPS_TCP_LISTEN_CB, /* Called on listen(2), right after * socket transition to LISTEN state. */ }; /* List of TCP states. There is a build check in net/ipv4/tcp.c to detect * changes between the TCP and BPF versions. Ideally this should never happen. * If it does, we need to add code to convert them before calling * the BPF sock_ops function. */ enum { BPF_TCP_ESTABLISHED = 1, BPF_TCP_SYN_SENT, BPF_TCP_SYN_RECV, BPF_TCP_FIN_WAIT1, BPF_TCP_FIN_WAIT2, BPF_TCP_TIME_WAIT, BPF_TCP_CLOSE, BPF_TCP_CLOSE_WAIT, BPF_TCP_LAST_ACK, BPF_TCP_LISTEN, BPF_TCP_CLOSING, /* Now a valid state */ BPF_TCP_NEW_SYN_RECV, BPF_TCP_MAX_STATES /* Leave at the end! */ }; #define TCP_BPF_IW 1001 /* Set TCP initial congestion window */ #define TCP_BPF_SNDCWND_CLAMP 1002 /* Set sndcwnd_clamp */ struct bpf_perf_event_value { __u64 counter; __u64 enabled; __u64 running; }; #define BPF_DEVCG_ACC_MKNOD (1ULL << 0) #define BPF_DEVCG_ACC_READ (1ULL << 1) #define BPF_DEVCG_ACC_WRITE (1ULL << 2) #define BPF_DEVCG_DEV_BLOCK (1ULL << 0) #define BPF_DEVCG_DEV_CHAR (1ULL << 1) struct bpf_cgroup_dev_ctx { /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */ __u32 access_type; __u32 major; __u32 minor; }; struct bpf_raw_tracepoint_args { __u64 args[0]; }; /* DIRECT: Skip the FIB rules and go to FIB table associated with device * OUTPUT: Do lookup from egress perspective; default is ingress */ #define BPF_FIB_LOOKUP_DIRECT BIT(0) #define BPF_FIB_LOOKUP_OUTPUT BIT(1) enum { BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */ BPF_FIB_LKUP_RET_BLACKHOLE, /* dest is blackholed; can be dropped */ BPF_FIB_LKUP_RET_UNREACHABLE, /* dest is unreachable; can be dropped */ BPF_FIB_LKUP_RET_PROHIBIT, /* dest not allowed; can be dropped */ BPF_FIB_LKUP_RET_NOT_FWDED, /* packet is not forwarded */ BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */ BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */ BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */ BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */ }; struct bpf_fib_lookup { /* input: network family for lookup (AF_INET, AF_INET6) * output: network family of egress nexthop */ __u8 family; /* set if lookup is to consider L4 data - e.g., FIB rules */ __u8 l4_protocol; __be16 sport; __be16 dport; /* total length of packet from network header - used for MTU check */ __u16 tot_len; /* input: L3 device index for lookup * output: device index from FIB lookup */ __u32 ifindex; union { /* inputs to lookup */ __u8 tos; /* AF_INET */ __be32 flowinfo; /* AF_INET6, flow_label + priority */ /* output: metric of fib result (IPv4/IPv6 only) */ __u32 rt_metric; }; union { __be32 ipv4_src; __u32 ipv6_src[4]; /* in6_addr; network order */ }; /* input to bpf_fib_lookup, ipv{4,6}_dst is destination address in * network header. output: bpf_fib_lookup sets to gateway address * if FIB lookup returns gateway route */ union { __be32 ipv4_dst; __u32 ipv6_dst[4]; /* in6_addr; network order */ }; /* output */ __be16 h_vlan_proto; __be16 h_vlan_TCI; __u8 smac[6]; /* ETH_ALEN */ __u8 dmac[6]; /* ETH_ALEN */ }; enum bpf_task_fd_type { BPF_FD_TYPE_RAW_TRACEPOINT, /* tp name */ BPF_FD_TYPE_TRACEPOINT, /* tp name */ BPF_FD_TYPE_KPROBE, /* (symbol + offset) or addr */ BPF_FD_TYPE_KRETPROBE, /* (symbol + offset) or addr */ BPF_FD_TYPE_UPROBE, /* filename + offset */ BPF_FD_TYPE_URETPROBE, /* filename + offset */ }; struct bpf_flow_keys { __u16 nhoff; __u16 thoff; __u16 addr_proto; /* ETH_P_* of valid addrs */ __u8 is_frag; __u8 is_first_frag; __u8 is_encap; __u8 ip_proto; __be16 n_proto; __be16 sport; __be16 dport; union { struct { __be32 ipv4_src; __be32 ipv4_dst; }; struct { __u32 ipv6_src[4]; /* in6_addr; network order */ __u32 ipv6_dst[4]; /* in6_addr; network order */ }; }; }; struct bpf_func_info { __u32 insn_off; __u32 type_id; }; #define BPF_LINE_INFO_LINE_NUM(line_col) ((line_col) >> 10) #define BPF_LINE_INFO_LINE_COL(line_col) ((line_col) & 0x3ff) struct bpf_line_info { __u32 insn_off; __u32 file_name_off; __u32 line_off; __u32 line_col; }; struct bpf_spin_lock { __u32 val; }; #endif /* _UAPI__LINUX_BPF_H__ */ dwarves-dfsg-1.15/lib/bpf/include/uapi/linux/bpf_common.h000066400000000000000000000025461344730411300233700ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI__LINUX_BPF_COMMON_H__ #define _UAPI__LINUX_BPF_COMMON_H__ /* Instruction classes */ #define BPF_CLASS(code) ((code) & 0x07) #define BPF_LD 0x00 #define BPF_LDX 0x01 #define BPF_ST 0x02 #define BPF_STX 0x03 #define BPF_ALU 0x04 #define BPF_JMP 0x05 #define BPF_RET 0x06 #define BPF_MISC 0x07 /* ld/ldx fields */ #define BPF_SIZE(code) ((code) & 0x18) #define BPF_W 0x00 /* 32-bit */ #define BPF_H 0x08 /* 16-bit */ #define BPF_B 0x10 /* 8-bit */ /* eBPF BPF_DW 0x18 64-bit */ #define BPF_MODE(code) ((code) & 0xe0) #define BPF_IMM 0x00 #define BPF_ABS 0x20 #define BPF_IND 0x40 #define BPF_MEM 0x60 #define BPF_LEN 0x80 #define BPF_MSH 0xa0 /* alu/jmp fields */ #define BPF_OP(code) ((code) & 0xf0) #define BPF_ADD 0x00 #define BPF_SUB 0x10 #define BPF_MUL 0x20 #define BPF_DIV 0x30 #define BPF_OR 0x40 #define BPF_AND 0x50 #define BPF_LSH 0x60 #define BPF_RSH 0x70 #define BPF_NEG 0x80 #define BPF_MOD 0x90 #define BPF_XOR 0xa0 #define BPF_JA 0x00 #define BPF_JEQ 0x10 #define BPF_JGT 0x20 #define BPF_JGE 0x30 #define BPF_JSET 0x40 #define BPF_SRC(code) ((code) & 0x08) #define BPF_K 0x00 #define BPF_X 0x08 #ifndef BPF_MAXINSNS #define BPF_MAXINSNS 4096 #endif #endif /* _UAPI__LINUX_BPF_COMMON_H__ */ dwarves-dfsg-1.15/lib/bpf/include/uapi/linux/btf.h000066400000000000000000000076731344730411300220320ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* Copyright (c) 2018 Facebook */ #ifndef _UAPI__LINUX_BTF_H__ #define _UAPI__LINUX_BTF_H__ #include #define BTF_MAGIC 0xeB9F #define BTF_VERSION 1 struct btf_header { __u16 magic; __u8 version; __u8 flags; __u32 hdr_len; /* All offsets are in bytes relative to the end of this header */ __u32 type_off; /* offset of type section */ __u32 type_len; /* length of type section */ __u32 str_off; /* offset of string section */ __u32 str_len; /* length of string section */ }; /* Max # of type identifier */ #define BTF_MAX_TYPE 0x0000ffff /* Max offset into the string section */ #define BTF_MAX_NAME_OFFSET 0x0000ffff /* Max # of struct/union/enum members or func args */ #define BTF_MAX_VLEN 0xffff struct btf_type { __u32 name_off; /* "info" bits arrangement * bits 0-15: vlen (e.g. # of struct's members) * bits 16-23: unused * bits 24-27: kind (e.g. int, ptr, array...etc) * bits 28-30: unused * bit 31: kind_flag, currently used by * struct, union and fwd */ __u32 info; /* "size" is used by INT, ENUM, STRUCT and UNION. * "size" tells the size of the type it is describing. * * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT, * FUNC and FUNC_PROTO. * "type" is a type_id referring to another type. */ union { __u32 size; __u32 type; }; }; #define BTF_INFO_KIND(info) (((info) >> 24) & 0x0f) #define BTF_INFO_VLEN(info) ((info) & 0xffff) #define BTF_INFO_KFLAG(info) ((info) >> 31) #define BTF_KIND_UNKN 0 /* Unknown */ #define BTF_KIND_INT 1 /* Integer */ #define BTF_KIND_PTR 2 /* Pointer */ #define BTF_KIND_ARRAY 3 /* Array */ #define BTF_KIND_STRUCT 4 /* Struct */ #define BTF_KIND_UNION 5 /* Union */ #define BTF_KIND_ENUM 6 /* Enumeration */ #define BTF_KIND_FWD 7 /* Forward */ #define BTF_KIND_TYPEDEF 8 /* Typedef */ #define BTF_KIND_VOLATILE 9 /* Volatile */ #define BTF_KIND_CONST 10 /* Const */ #define BTF_KIND_RESTRICT 11 /* Restrict */ #define BTF_KIND_FUNC 12 /* Function */ #define BTF_KIND_FUNC_PROTO 13 /* Function Proto */ #define BTF_KIND_MAX 13 #define NR_BTF_KINDS 14 /* For some specific BTF_KIND, "struct btf_type" is immediately * followed by extra data. */ /* BTF_KIND_INT is followed by a u32 and the following * is the 32 bits arrangement: */ #define BTF_INT_ENCODING(VAL) (((VAL) & 0x0f000000) >> 24) #define BTF_INT_OFFSET(VAL) (((VAL & 0x00ff0000)) >> 16) #define BTF_INT_BITS(VAL) ((VAL) & 0x000000ff) /* Attributes stored in the BTF_INT_ENCODING */ #define BTF_INT_SIGNED (1 << 0) #define BTF_INT_CHAR (1 << 1) #define BTF_INT_BOOL (1 << 2) /* BTF_KIND_ENUM is followed by multiple "struct btf_enum". * The exact number of btf_enum is stored in the vlen (of the * info in "struct btf_type"). */ struct btf_enum { __u32 name_off; __s32 val; }; /* BTF_KIND_ARRAY is followed by one "struct btf_array" */ struct btf_array { __u32 type; __u32 index_type; __u32 nelems; }; /* BTF_KIND_STRUCT and BTF_KIND_UNION are followed * by multiple "struct btf_member". The exact number * of btf_member is stored in the vlen (of the info in * "struct btf_type"). */ struct btf_member { __u32 name_off; __u32 type; /* If the type info kind_flag is set, the btf_member offset * contains both member bitfield size and bit offset. The * bitfield size is set for bitfield members. If the type * info kind_flag is not set, the offset contains only bit * offset. */ __u32 offset; }; /* If the struct/union type info kind_flag is set, the * following two macros are used to access bitfield_size * and bit_offset from btf_member.offset. */ #define BTF_MEMBER_BITFIELD_SIZE(val) ((val) >> 24) #define BTF_MEMBER_BIT_OFFSET(val) ((val) & 0xffffff) /* BTF_KIND_FUNC_PROTO is followed by multiple "struct btf_param". * The exact number of btf_param is stored in the vlen (of the * info in "struct btf_type"). */ struct btf_param { __u32 name_off; __u32 type; }; #endif /* _UAPI__LINUX_BTF_H__ */ dwarves-dfsg-1.15/lib/bpf/include/uapi/linux/if_link.h000066400000000000000000000560221344730411300226620ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_LINUX_IF_LINK_H #define _UAPI_LINUX_IF_LINK_H #include #include /* This struct should be in sync with struct rtnl_link_stats64 */ struct rtnl_link_stats { __u32 rx_packets; /* total packets received */ __u32 tx_packets; /* total packets transmitted */ __u32 rx_bytes; /* total bytes received */ __u32 tx_bytes; /* total bytes transmitted */ __u32 rx_errors; /* bad packets received */ __u32 tx_errors; /* packet transmit problems */ __u32 rx_dropped; /* no space in linux buffers */ __u32 tx_dropped; /* no space available in linux */ __u32 multicast; /* multicast packets received */ __u32 collisions; /* detailed rx_errors: */ __u32 rx_length_errors; __u32 rx_over_errors; /* receiver ring buff overflow */ __u32 rx_crc_errors; /* recved pkt with crc error */ __u32 rx_frame_errors; /* recv'd frame alignment error */ __u32 rx_fifo_errors; /* recv'r fifo overrun */ __u32 rx_missed_errors; /* receiver missed packet */ /* detailed tx_errors */ __u32 tx_aborted_errors; __u32 tx_carrier_errors; __u32 tx_fifo_errors; __u32 tx_heartbeat_errors; __u32 tx_window_errors; /* for cslip etc */ __u32 rx_compressed; __u32 tx_compressed; __u32 rx_nohandler; /* dropped, no handler found */ }; /* The main device statistics structure */ struct rtnl_link_stats64 { __u64 rx_packets; /* total packets received */ __u64 tx_packets; /* total packets transmitted */ __u64 rx_bytes; /* total bytes received */ __u64 tx_bytes; /* total bytes transmitted */ __u64 rx_errors; /* bad packets received */ __u64 tx_errors; /* packet transmit problems */ __u64 rx_dropped; /* no space in linux buffers */ __u64 tx_dropped; /* no space available in linux */ __u64 multicast; /* multicast packets received */ __u64 collisions; /* detailed rx_errors: */ __u64 rx_length_errors; __u64 rx_over_errors; /* receiver ring buff overflow */ __u64 rx_crc_errors; /* recved pkt with crc error */ __u64 rx_frame_errors; /* recv'd frame alignment error */ __u64 rx_fifo_errors; /* recv'r fifo overrun */ __u64 rx_missed_errors; /* receiver missed packet */ /* detailed tx_errors */ __u64 tx_aborted_errors; __u64 tx_carrier_errors; __u64 tx_fifo_errors; __u64 tx_heartbeat_errors; __u64 tx_window_errors; /* for cslip etc */ __u64 rx_compressed; __u64 tx_compressed; __u64 rx_nohandler; /* dropped, no handler found */ }; /* The struct should be in sync with struct ifmap */ struct rtnl_link_ifmap { __u64 mem_start; __u64 mem_end; __u64 base_addr; __u16 irq; __u8 dma; __u8 port; }; /* * IFLA_AF_SPEC * Contains nested attributes for address family specific attributes. * Each address family may create a attribute with the address family * number as type and create its own attribute structure in it. * * Example: * [IFLA_AF_SPEC] = { * [AF_INET] = { * [IFLA_INET_CONF] = ..., * }, * [AF_INET6] = { * [IFLA_INET6_FLAGS] = ..., * [IFLA_INET6_CONF] = ..., * } * } */ enum { IFLA_UNSPEC, IFLA_ADDRESS, IFLA_BROADCAST, IFLA_IFNAME, IFLA_MTU, IFLA_LINK, IFLA_QDISC, IFLA_STATS, IFLA_COST, #define IFLA_COST IFLA_COST IFLA_PRIORITY, #define IFLA_PRIORITY IFLA_PRIORITY IFLA_MASTER, #define IFLA_MASTER IFLA_MASTER IFLA_WIRELESS, /* Wireless Extension event - see wireless.h */ #define IFLA_WIRELESS IFLA_WIRELESS IFLA_PROTINFO, /* Protocol specific information for a link */ #define IFLA_PROTINFO IFLA_PROTINFO IFLA_TXQLEN, #define IFLA_TXQLEN IFLA_TXQLEN IFLA_MAP, #define IFLA_MAP IFLA_MAP IFLA_WEIGHT, #define IFLA_WEIGHT IFLA_WEIGHT IFLA_OPERSTATE, IFLA_LINKMODE, IFLA_LINKINFO, #define IFLA_LINKINFO IFLA_LINKINFO IFLA_NET_NS_PID, IFLA_IFALIAS, IFLA_NUM_VF, /* Number of VFs if device is SR-IOV PF */ IFLA_VFINFO_LIST, IFLA_STATS64, IFLA_VF_PORTS, IFLA_PORT_SELF, IFLA_AF_SPEC, IFLA_GROUP, /* Group the device belongs to */ IFLA_NET_NS_FD, IFLA_EXT_MASK, /* Extended info mask, VFs, etc */ IFLA_PROMISCUITY, /* Promiscuity count: > 0 means acts PROMISC */ #define IFLA_PROMISCUITY IFLA_PROMISCUITY IFLA_NUM_TX_QUEUES, IFLA_NUM_RX_QUEUES, IFLA_CARRIER, IFLA_PHYS_PORT_ID, IFLA_CARRIER_CHANGES, IFLA_PHYS_SWITCH_ID, IFLA_LINK_NETNSID, IFLA_PHYS_PORT_NAME, IFLA_PROTO_DOWN, IFLA_GSO_MAX_SEGS, IFLA_GSO_MAX_SIZE, IFLA_PAD, IFLA_XDP, IFLA_EVENT, IFLA_NEW_NETNSID, IFLA_IF_NETNSID, IFLA_TARGET_NETNSID = IFLA_IF_NETNSID, /* new alias */ IFLA_CARRIER_UP_COUNT, IFLA_CARRIER_DOWN_COUNT, IFLA_NEW_IFINDEX, IFLA_MIN_MTU, IFLA_MAX_MTU, __IFLA_MAX }; #define IFLA_MAX (__IFLA_MAX - 1) /* backwards compatibility for userspace */ #ifndef __KERNEL__ #define IFLA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct ifinfomsg)))) #define IFLA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ifinfomsg)) #endif enum { IFLA_INET_UNSPEC, IFLA_INET_CONF, __IFLA_INET_MAX, }; #define IFLA_INET_MAX (__IFLA_INET_MAX - 1) /* ifi_flags. IFF_* flags. The only change is: IFF_LOOPBACK, IFF_BROADCAST and IFF_POINTOPOINT are more not changeable by user. They describe link media characteristics and set by device driver. Comments: - Combination IFF_BROADCAST|IFF_POINTOPOINT is invalid - If neither of these three flags are set; the interface is NBMA. - IFF_MULTICAST does not mean anything special: multicasts can be used on all not-NBMA links. IFF_MULTICAST means that this media uses special encapsulation for multicast frames. Apparently, all IFF_POINTOPOINT and IFF_BROADCAST devices are able to use multicasts too. */ /* IFLA_LINK. For usual devices it is equal ifi_index. If it is a "virtual interface" (f.e. tunnel), ifi_link can point to real physical interface (f.e. for bandwidth calculations), or maybe 0, what means, that real media is unknown (usual for IPIP tunnels, when route to endpoint is allowed to change) */ /* Subtype attributes for IFLA_PROTINFO */ enum { IFLA_INET6_UNSPEC, IFLA_INET6_FLAGS, /* link flags */ IFLA_INET6_CONF, /* sysctl parameters */ IFLA_INET6_STATS, /* statistics */ IFLA_INET6_MCAST, /* MC things. What of them? */ IFLA_INET6_CACHEINFO, /* time values and max reasm size */ IFLA_INET6_ICMP6STATS, /* statistics (icmpv6) */ IFLA_INET6_TOKEN, /* device token */ IFLA_INET6_ADDR_GEN_MODE, /* implicit address generator mode */ __IFLA_INET6_MAX }; #define IFLA_INET6_MAX (__IFLA_INET6_MAX - 1) enum in6_addr_gen_mode { IN6_ADDR_GEN_MODE_EUI64, IN6_ADDR_GEN_MODE_NONE, IN6_ADDR_GEN_MODE_STABLE_PRIVACY, IN6_ADDR_GEN_MODE_RANDOM, }; /* Bridge section */ enum { IFLA_BR_UNSPEC, IFLA_BR_FORWARD_DELAY, IFLA_BR_HELLO_TIME, IFLA_BR_MAX_AGE, IFLA_BR_AGEING_TIME, IFLA_BR_STP_STATE, IFLA_BR_PRIORITY, IFLA_BR_VLAN_FILTERING, IFLA_BR_VLAN_PROTOCOL, IFLA_BR_GROUP_FWD_MASK, IFLA_BR_ROOT_ID, IFLA_BR_BRIDGE_ID, IFLA_BR_ROOT_PORT, IFLA_BR_ROOT_PATH_COST, IFLA_BR_TOPOLOGY_CHANGE, IFLA_BR_TOPOLOGY_CHANGE_DETECTED, IFLA_BR_HELLO_TIMER, IFLA_BR_TCN_TIMER, IFLA_BR_TOPOLOGY_CHANGE_TIMER, IFLA_BR_GC_TIMER, IFLA_BR_GROUP_ADDR, IFLA_BR_FDB_FLUSH, IFLA_BR_MCAST_ROUTER, IFLA_BR_MCAST_SNOOPING, IFLA_BR_MCAST_QUERY_USE_IFADDR, IFLA_BR_MCAST_QUERIER, IFLA_BR_MCAST_HASH_ELASTICITY, IFLA_BR_MCAST_HASH_MAX, IFLA_BR_MCAST_LAST_MEMBER_CNT, IFLA_BR_MCAST_STARTUP_QUERY_CNT, IFLA_BR_MCAST_LAST_MEMBER_INTVL, IFLA_BR_MCAST_MEMBERSHIP_INTVL, IFLA_BR_MCAST_QUERIER_INTVL, IFLA_BR_MCAST_QUERY_INTVL, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, IFLA_BR_NF_CALL_IPTABLES, IFLA_BR_NF_CALL_IP6TABLES, IFLA_BR_NF_CALL_ARPTABLES, IFLA_BR_VLAN_DEFAULT_PVID, IFLA_BR_PAD, IFLA_BR_VLAN_STATS_ENABLED, IFLA_BR_MCAST_STATS_ENABLED, IFLA_BR_MCAST_IGMP_VERSION, IFLA_BR_MCAST_MLD_VERSION, IFLA_BR_VLAN_STATS_PER_PORT, IFLA_BR_MULTI_BOOLOPT, __IFLA_BR_MAX, }; #define IFLA_BR_MAX (__IFLA_BR_MAX - 1) struct ifla_bridge_id { __u8 prio[2]; __u8 addr[6]; /* ETH_ALEN */ }; enum { BRIDGE_MODE_UNSPEC, BRIDGE_MODE_HAIRPIN, }; enum { IFLA_BRPORT_UNSPEC, IFLA_BRPORT_STATE, /* Spanning tree state */ IFLA_BRPORT_PRIORITY, /* " priority */ IFLA_BRPORT_COST, /* " cost */ IFLA_BRPORT_MODE, /* mode (hairpin) */ IFLA_BRPORT_GUARD, /* bpdu guard */ IFLA_BRPORT_PROTECT, /* root port protection */ IFLA_BRPORT_FAST_LEAVE, /* multicast fast leave */ IFLA_BRPORT_LEARNING, /* mac learning */ IFLA_BRPORT_UNICAST_FLOOD, /* flood unicast traffic */ IFLA_BRPORT_PROXYARP, /* proxy ARP */ IFLA_BRPORT_LEARNING_SYNC, /* mac learning sync from device */ IFLA_BRPORT_PROXYARP_WIFI, /* proxy ARP for Wi-Fi */ IFLA_BRPORT_ROOT_ID, /* designated root */ IFLA_BRPORT_BRIDGE_ID, /* designated bridge */ IFLA_BRPORT_DESIGNATED_PORT, IFLA_BRPORT_DESIGNATED_COST, IFLA_BRPORT_ID, IFLA_BRPORT_NO, IFLA_BRPORT_TOPOLOGY_CHANGE_ACK, IFLA_BRPORT_CONFIG_PENDING, IFLA_BRPORT_MESSAGE_AGE_TIMER, IFLA_BRPORT_FORWARD_DELAY_TIMER, IFLA_BRPORT_HOLD_TIMER, IFLA_BRPORT_FLUSH, IFLA_BRPORT_MULTICAST_ROUTER, IFLA_BRPORT_PAD, IFLA_BRPORT_MCAST_FLOOD, IFLA_BRPORT_MCAST_TO_UCAST, IFLA_BRPORT_VLAN_TUNNEL, IFLA_BRPORT_BCAST_FLOOD, IFLA_BRPORT_GROUP_FWD_MASK, IFLA_BRPORT_NEIGH_SUPPRESS, IFLA_BRPORT_ISOLATED, IFLA_BRPORT_BACKUP_PORT, __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) struct ifla_cacheinfo { __u32 max_reasm_len; __u32 tstamp; /* ipv6InterfaceTable updated timestamp */ __u32 reachable_time; __u32 retrans_time; }; enum { IFLA_INFO_UNSPEC, IFLA_INFO_KIND, IFLA_INFO_DATA, IFLA_INFO_XSTATS, IFLA_INFO_SLAVE_KIND, IFLA_INFO_SLAVE_DATA, __IFLA_INFO_MAX, }; #define IFLA_INFO_MAX (__IFLA_INFO_MAX - 1) /* VLAN section */ enum { IFLA_VLAN_UNSPEC, IFLA_VLAN_ID, IFLA_VLAN_FLAGS, IFLA_VLAN_EGRESS_QOS, IFLA_VLAN_INGRESS_QOS, IFLA_VLAN_PROTOCOL, __IFLA_VLAN_MAX, }; #define IFLA_VLAN_MAX (__IFLA_VLAN_MAX - 1) struct ifla_vlan_flags { __u32 flags; __u32 mask; }; enum { IFLA_VLAN_QOS_UNSPEC, IFLA_VLAN_QOS_MAPPING, __IFLA_VLAN_QOS_MAX }; #define IFLA_VLAN_QOS_MAX (__IFLA_VLAN_QOS_MAX - 1) struct ifla_vlan_qos_mapping { __u32 from; __u32 to; }; /* MACVLAN section */ enum { IFLA_MACVLAN_UNSPEC, IFLA_MACVLAN_MODE, IFLA_MACVLAN_FLAGS, IFLA_MACVLAN_MACADDR_MODE, IFLA_MACVLAN_MACADDR, IFLA_MACVLAN_MACADDR_DATA, IFLA_MACVLAN_MACADDR_COUNT, __IFLA_MACVLAN_MAX, }; #define IFLA_MACVLAN_MAX (__IFLA_MACVLAN_MAX - 1) enum macvlan_mode { MACVLAN_MODE_PRIVATE = 1, /* don't talk to other macvlans */ MACVLAN_MODE_VEPA = 2, /* talk to other ports through ext bridge */ MACVLAN_MODE_BRIDGE = 4, /* talk to bridge ports directly */ MACVLAN_MODE_PASSTHRU = 8,/* take over the underlying device */ MACVLAN_MODE_SOURCE = 16,/* use source MAC address list to assign */ }; enum macvlan_macaddr_mode { MACVLAN_MACADDR_ADD, MACVLAN_MACADDR_DEL, MACVLAN_MACADDR_FLUSH, MACVLAN_MACADDR_SET, }; #define MACVLAN_FLAG_NOPROMISC 1 /* VRF section */ enum { IFLA_VRF_UNSPEC, IFLA_VRF_TABLE, __IFLA_VRF_MAX }; #define IFLA_VRF_MAX (__IFLA_VRF_MAX - 1) enum { IFLA_VRF_PORT_UNSPEC, IFLA_VRF_PORT_TABLE, __IFLA_VRF_PORT_MAX }; #define IFLA_VRF_PORT_MAX (__IFLA_VRF_PORT_MAX - 1) /* MACSEC section */ enum { IFLA_MACSEC_UNSPEC, IFLA_MACSEC_SCI, IFLA_MACSEC_PORT, IFLA_MACSEC_ICV_LEN, IFLA_MACSEC_CIPHER_SUITE, IFLA_MACSEC_WINDOW, IFLA_MACSEC_ENCODING_SA, IFLA_MACSEC_ENCRYPT, IFLA_MACSEC_PROTECT, IFLA_MACSEC_INC_SCI, IFLA_MACSEC_ES, IFLA_MACSEC_SCB, IFLA_MACSEC_REPLAY_PROTECT, IFLA_MACSEC_VALIDATION, IFLA_MACSEC_PAD, __IFLA_MACSEC_MAX, }; #define IFLA_MACSEC_MAX (__IFLA_MACSEC_MAX - 1) /* XFRM section */ enum { IFLA_XFRM_UNSPEC, IFLA_XFRM_LINK, IFLA_XFRM_IF_ID, __IFLA_XFRM_MAX }; #define IFLA_XFRM_MAX (__IFLA_XFRM_MAX - 1) enum macsec_validation_type { MACSEC_VALIDATE_DISABLED = 0, MACSEC_VALIDATE_CHECK = 1, MACSEC_VALIDATE_STRICT = 2, __MACSEC_VALIDATE_END, MACSEC_VALIDATE_MAX = __MACSEC_VALIDATE_END - 1, }; /* IPVLAN section */ enum { IFLA_IPVLAN_UNSPEC, IFLA_IPVLAN_MODE, IFLA_IPVLAN_FLAGS, __IFLA_IPVLAN_MAX }; #define IFLA_IPVLAN_MAX (__IFLA_IPVLAN_MAX - 1) enum ipvlan_mode { IPVLAN_MODE_L2 = 0, IPVLAN_MODE_L3, IPVLAN_MODE_L3S, IPVLAN_MODE_MAX }; #define IPVLAN_F_PRIVATE 0x01 #define IPVLAN_F_VEPA 0x02 /* VXLAN section */ enum { IFLA_VXLAN_UNSPEC, IFLA_VXLAN_ID, IFLA_VXLAN_GROUP, /* group or remote address */ IFLA_VXLAN_LINK, IFLA_VXLAN_LOCAL, IFLA_VXLAN_TTL, IFLA_VXLAN_TOS, IFLA_VXLAN_LEARNING, IFLA_VXLAN_AGEING, IFLA_VXLAN_LIMIT, IFLA_VXLAN_PORT_RANGE, /* source port */ IFLA_VXLAN_PROXY, IFLA_VXLAN_RSC, IFLA_VXLAN_L2MISS, IFLA_VXLAN_L3MISS, IFLA_VXLAN_PORT, /* destination port */ IFLA_VXLAN_GROUP6, IFLA_VXLAN_LOCAL6, IFLA_VXLAN_UDP_CSUM, IFLA_VXLAN_UDP_ZERO_CSUM6_TX, IFLA_VXLAN_UDP_ZERO_CSUM6_RX, IFLA_VXLAN_REMCSUM_TX, IFLA_VXLAN_REMCSUM_RX, IFLA_VXLAN_GBP, IFLA_VXLAN_REMCSUM_NOPARTIAL, IFLA_VXLAN_COLLECT_METADATA, IFLA_VXLAN_LABEL, IFLA_VXLAN_GPE, IFLA_VXLAN_TTL_INHERIT, IFLA_VXLAN_DF, __IFLA_VXLAN_MAX }; #define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1) struct ifla_vxlan_port_range { __be16 low; __be16 high; }; enum ifla_vxlan_df { VXLAN_DF_UNSET = 0, VXLAN_DF_SET, VXLAN_DF_INHERIT, __VXLAN_DF_END, VXLAN_DF_MAX = __VXLAN_DF_END - 1, }; /* GENEVE section */ enum { IFLA_GENEVE_UNSPEC, IFLA_GENEVE_ID, IFLA_GENEVE_REMOTE, IFLA_GENEVE_TTL, IFLA_GENEVE_TOS, IFLA_GENEVE_PORT, /* destination port */ IFLA_GENEVE_COLLECT_METADATA, IFLA_GENEVE_REMOTE6, IFLA_GENEVE_UDP_CSUM, IFLA_GENEVE_UDP_ZERO_CSUM6_TX, IFLA_GENEVE_UDP_ZERO_CSUM6_RX, IFLA_GENEVE_LABEL, IFLA_GENEVE_TTL_INHERIT, IFLA_GENEVE_DF, __IFLA_GENEVE_MAX }; #define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1) enum ifla_geneve_df { GENEVE_DF_UNSET = 0, GENEVE_DF_SET, GENEVE_DF_INHERIT, __GENEVE_DF_END, GENEVE_DF_MAX = __GENEVE_DF_END - 1, }; /* PPP section */ enum { IFLA_PPP_UNSPEC, IFLA_PPP_DEV_FD, __IFLA_PPP_MAX }; #define IFLA_PPP_MAX (__IFLA_PPP_MAX - 1) /* GTP section */ enum ifla_gtp_role { GTP_ROLE_GGSN = 0, GTP_ROLE_SGSN, }; enum { IFLA_GTP_UNSPEC, IFLA_GTP_FD0, IFLA_GTP_FD1, IFLA_GTP_PDP_HASHSIZE, IFLA_GTP_ROLE, __IFLA_GTP_MAX, }; #define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1) /* Bonding section */ enum { IFLA_BOND_UNSPEC, IFLA_BOND_MODE, IFLA_BOND_ACTIVE_SLAVE, IFLA_BOND_MIIMON, IFLA_BOND_UPDELAY, IFLA_BOND_DOWNDELAY, IFLA_BOND_USE_CARRIER, IFLA_BOND_ARP_INTERVAL, IFLA_BOND_ARP_IP_TARGET, IFLA_BOND_ARP_VALIDATE, IFLA_BOND_ARP_ALL_TARGETS, IFLA_BOND_PRIMARY, IFLA_BOND_PRIMARY_RESELECT, IFLA_BOND_FAIL_OVER_MAC, IFLA_BOND_XMIT_HASH_POLICY, IFLA_BOND_RESEND_IGMP, IFLA_BOND_NUM_PEER_NOTIF, IFLA_BOND_ALL_SLAVES_ACTIVE, IFLA_BOND_MIN_LINKS, IFLA_BOND_LP_INTERVAL, IFLA_BOND_PACKETS_PER_SLAVE, IFLA_BOND_AD_LACP_RATE, IFLA_BOND_AD_SELECT, IFLA_BOND_AD_INFO, IFLA_BOND_AD_ACTOR_SYS_PRIO, IFLA_BOND_AD_USER_PORT_KEY, IFLA_BOND_AD_ACTOR_SYSTEM, IFLA_BOND_TLB_DYNAMIC_LB, __IFLA_BOND_MAX, }; #define IFLA_BOND_MAX (__IFLA_BOND_MAX - 1) enum { IFLA_BOND_AD_INFO_UNSPEC, IFLA_BOND_AD_INFO_AGGREGATOR, IFLA_BOND_AD_INFO_NUM_PORTS, IFLA_BOND_AD_INFO_ACTOR_KEY, IFLA_BOND_AD_INFO_PARTNER_KEY, IFLA_BOND_AD_INFO_PARTNER_MAC, __IFLA_BOND_AD_INFO_MAX, }; #define IFLA_BOND_AD_INFO_MAX (__IFLA_BOND_AD_INFO_MAX - 1) enum { IFLA_BOND_SLAVE_UNSPEC, IFLA_BOND_SLAVE_STATE, IFLA_BOND_SLAVE_MII_STATUS, IFLA_BOND_SLAVE_LINK_FAILURE_COUNT, IFLA_BOND_SLAVE_PERM_HWADDR, IFLA_BOND_SLAVE_QUEUE_ID, IFLA_BOND_SLAVE_AD_AGGREGATOR_ID, IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE, IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE, __IFLA_BOND_SLAVE_MAX, }; #define IFLA_BOND_SLAVE_MAX (__IFLA_BOND_SLAVE_MAX - 1) /* SR-IOV virtual function management section */ enum { IFLA_VF_INFO_UNSPEC, IFLA_VF_INFO, __IFLA_VF_INFO_MAX, }; #define IFLA_VF_INFO_MAX (__IFLA_VF_INFO_MAX - 1) enum { IFLA_VF_UNSPEC, IFLA_VF_MAC, /* Hardware queue specific attributes */ IFLA_VF_VLAN, /* VLAN ID and QoS */ IFLA_VF_TX_RATE, /* Max TX Bandwidth Allocation */ IFLA_VF_SPOOFCHK, /* Spoof Checking on/off switch */ IFLA_VF_LINK_STATE, /* link state enable/disable/auto switch */ IFLA_VF_RATE, /* Min and Max TX Bandwidth Allocation */ IFLA_VF_RSS_QUERY_EN, /* RSS Redirection Table and Hash Key query * on/off switch */ IFLA_VF_STATS, /* network device statistics */ IFLA_VF_TRUST, /* Trust VF */ IFLA_VF_IB_NODE_GUID, /* VF Infiniband node GUID */ IFLA_VF_IB_PORT_GUID, /* VF Infiniband port GUID */ IFLA_VF_VLAN_LIST, /* nested list of vlans, option for QinQ */ __IFLA_VF_MAX, }; #define IFLA_VF_MAX (__IFLA_VF_MAX - 1) struct ifla_vf_mac { __u32 vf; __u8 mac[32]; /* MAX_ADDR_LEN */ }; struct ifla_vf_vlan { __u32 vf; __u32 vlan; /* 0 - 4095, 0 disables VLAN filter */ __u32 qos; }; enum { IFLA_VF_VLAN_INFO_UNSPEC, IFLA_VF_VLAN_INFO, /* VLAN ID, QoS and VLAN protocol */ __IFLA_VF_VLAN_INFO_MAX, }; #define IFLA_VF_VLAN_INFO_MAX (__IFLA_VF_VLAN_INFO_MAX - 1) #define MAX_VLAN_LIST_LEN 1 struct ifla_vf_vlan_info { __u32 vf; __u32 vlan; /* 0 - 4095, 0 disables VLAN filter */ __u32 qos; __be16 vlan_proto; /* VLAN protocol either 802.1Q or 802.1ad */ }; struct ifla_vf_tx_rate { __u32 vf; __u32 rate; /* Max TX bandwidth in Mbps, 0 disables throttling */ }; struct ifla_vf_rate { __u32 vf; __u32 min_tx_rate; /* Min Bandwidth in Mbps */ __u32 max_tx_rate; /* Max Bandwidth in Mbps */ }; struct ifla_vf_spoofchk { __u32 vf; __u32 setting; }; struct ifla_vf_guid { __u32 vf; __u64 guid; }; enum { IFLA_VF_LINK_STATE_AUTO, /* link state of the uplink */ IFLA_VF_LINK_STATE_ENABLE, /* link always up */ IFLA_VF_LINK_STATE_DISABLE, /* link always down */ __IFLA_VF_LINK_STATE_MAX, }; struct ifla_vf_link_state { __u32 vf; __u32 link_state; }; struct ifla_vf_rss_query_en { __u32 vf; __u32 setting; }; enum { IFLA_VF_STATS_RX_PACKETS, IFLA_VF_STATS_TX_PACKETS, IFLA_VF_STATS_RX_BYTES, IFLA_VF_STATS_TX_BYTES, IFLA_VF_STATS_BROADCAST, IFLA_VF_STATS_MULTICAST, IFLA_VF_STATS_PAD, IFLA_VF_STATS_RX_DROPPED, IFLA_VF_STATS_TX_DROPPED, __IFLA_VF_STATS_MAX, }; #define IFLA_VF_STATS_MAX (__IFLA_VF_STATS_MAX - 1) struct ifla_vf_trust { __u32 vf; __u32 setting; }; /* VF ports management section * * Nested layout of set/get msg is: * * [IFLA_NUM_VF] * [IFLA_VF_PORTS] * [IFLA_VF_PORT] * [IFLA_PORT_*], ... * [IFLA_VF_PORT] * [IFLA_PORT_*], ... * ... * [IFLA_PORT_SELF] * [IFLA_PORT_*], ... */ enum { IFLA_VF_PORT_UNSPEC, IFLA_VF_PORT, /* nest */ __IFLA_VF_PORT_MAX, }; #define IFLA_VF_PORT_MAX (__IFLA_VF_PORT_MAX - 1) enum { IFLA_PORT_UNSPEC, IFLA_PORT_VF, /* __u32 */ IFLA_PORT_PROFILE, /* string */ IFLA_PORT_VSI_TYPE, /* 802.1Qbg (pre-)standard VDP */ IFLA_PORT_INSTANCE_UUID, /* binary UUID */ IFLA_PORT_HOST_UUID, /* binary UUID */ IFLA_PORT_REQUEST, /* __u8 */ IFLA_PORT_RESPONSE, /* __u16, output only */ __IFLA_PORT_MAX, }; #define IFLA_PORT_MAX (__IFLA_PORT_MAX - 1) #define PORT_PROFILE_MAX 40 #define PORT_UUID_MAX 16 #define PORT_SELF_VF -1 enum { PORT_REQUEST_PREASSOCIATE = 0, PORT_REQUEST_PREASSOCIATE_RR, PORT_REQUEST_ASSOCIATE, PORT_REQUEST_DISASSOCIATE, }; enum { PORT_VDP_RESPONSE_SUCCESS = 0, PORT_VDP_RESPONSE_INVALID_FORMAT, PORT_VDP_RESPONSE_INSUFFICIENT_RESOURCES, PORT_VDP_RESPONSE_UNUSED_VTID, PORT_VDP_RESPONSE_VTID_VIOLATION, PORT_VDP_RESPONSE_VTID_VERSION_VIOALTION, PORT_VDP_RESPONSE_OUT_OF_SYNC, /* 0x08-0xFF reserved for future VDP use */ PORT_PROFILE_RESPONSE_SUCCESS = 0x100, PORT_PROFILE_RESPONSE_INPROGRESS, PORT_PROFILE_RESPONSE_INVALID, PORT_PROFILE_RESPONSE_BADSTATE, PORT_PROFILE_RESPONSE_INSUFFICIENT_RESOURCES, PORT_PROFILE_RESPONSE_ERROR, }; struct ifla_port_vsi { __u8 vsi_mgr_id; __u8 vsi_type_id[3]; __u8 vsi_type_version; __u8 pad[3]; }; /* IPoIB section */ enum { IFLA_IPOIB_UNSPEC, IFLA_IPOIB_PKEY, IFLA_IPOIB_MODE, IFLA_IPOIB_UMCAST, __IFLA_IPOIB_MAX }; enum { IPOIB_MODE_DATAGRAM = 0, /* using unreliable datagram QPs */ IPOIB_MODE_CONNECTED = 1, /* using connected QPs */ }; #define IFLA_IPOIB_MAX (__IFLA_IPOIB_MAX - 1) /* HSR section */ enum { IFLA_HSR_UNSPEC, IFLA_HSR_SLAVE1, IFLA_HSR_SLAVE2, IFLA_HSR_MULTICAST_SPEC, /* Last byte of supervision addr */ IFLA_HSR_SUPERVISION_ADDR, /* Supervision frame multicast addr */ IFLA_HSR_SEQ_NR, IFLA_HSR_VERSION, /* HSR version */ __IFLA_HSR_MAX, }; #define IFLA_HSR_MAX (__IFLA_HSR_MAX - 1) /* STATS section */ struct if_stats_msg { __u8 family; __u8 pad1; __u16 pad2; __u32 ifindex; __u32 filter_mask; }; /* A stats attribute can be netdev specific or a global stat. * For netdev stats, lets use the prefix IFLA_STATS_LINK_* */ enum { IFLA_STATS_UNSPEC, /* also used as 64bit pad attribute */ IFLA_STATS_LINK_64, IFLA_STATS_LINK_XSTATS, IFLA_STATS_LINK_XSTATS_SLAVE, IFLA_STATS_LINK_OFFLOAD_XSTATS, IFLA_STATS_AF_SPEC, __IFLA_STATS_MAX, }; #define IFLA_STATS_MAX (__IFLA_STATS_MAX - 1) #define IFLA_STATS_FILTER_BIT(ATTR) (1 << (ATTR - 1)) /* These are embedded into IFLA_STATS_LINK_XSTATS: * [IFLA_STATS_LINK_XSTATS] * -> [LINK_XSTATS_TYPE_xxx] * -> [rtnl link type specific attributes] */ enum { LINK_XSTATS_TYPE_UNSPEC, LINK_XSTATS_TYPE_BRIDGE, LINK_XSTATS_TYPE_BOND, __LINK_XSTATS_TYPE_MAX }; #define LINK_XSTATS_TYPE_MAX (__LINK_XSTATS_TYPE_MAX - 1) /* These are stats embedded into IFLA_STATS_LINK_OFFLOAD_XSTATS */ enum { IFLA_OFFLOAD_XSTATS_UNSPEC, IFLA_OFFLOAD_XSTATS_CPU_HIT, /* struct rtnl_link_stats64 */ __IFLA_OFFLOAD_XSTATS_MAX }; #define IFLA_OFFLOAD_XSTATS_MAX (__IFLA_OFFLOAD_XSTATS_MAX - 1) /* XDP section */ #define XDP_FLAGS_UPDATE_IF_NOEXIST (1U << 0) #define XDP_FLAGS_SKB_MODE (1U << 1) #define XDP_FLAGS_DRV_MODE (1U << 2) #define XDP_FLAGS_HW_MODE (1U << 3) #define XDP_FLAGS_MODES (XDP_FLAGS_SKB_MODE | \ XDP_FLAGS_DRV_MODE | \ XDP_FLAGS_HW_MODE) #define XDP_FLAGS_MASK (XDP_FLAGS_UPDATE_IF_NOEXIST | \ XDP_FLAGS_MODES) /* These are stored into IFLA_XDP_ATTACHED on dump. */ enum { XDP_ATTACHED_NONE = 0, XDP_ATTACHED_DRV, XDP_ATTACHED_SKB, XDP_ATTACHED_HW, XDP_ATTACHED_MULTI, }; enum { IFLA_XDP_UNSPEC, IFLA_XDP_FD, IFLA_XDP_ATTACHED, IFLA_XDP_FLAGS, IFLA_XDP_PROG_ID, IFLA_XDP_DRV_PROG_ID, IFLA_XDP_SKB_PROG_ID, IFLA_XDP_HW_PROG_ID, __IFLA_XDP_MAX, }; #define IFLA_XDP_MAX (__IFLA_XDP_MAX - 1) enum { IFLA_EVENT_NONE, IFLA_EVENT_REBOOT, /* internal reset / reboot */ IFLA_EVENT_FEATURES, /* change in offload features */ IFLA_EVENT_BONDING_FAILOVER, /* change in active slave */ IFLA_EVENT_NOTIFY_PEERS, /* re-sent grat. arp/ndisc */ IFLA_EVENT_IGMP_RESEND, /* re-sent IGMP JOIN */ IFLA_EVENT_BONDING_OPTIONS, /* change in bonding options */ }; /* tun section */ enum { IFLA_TUN_UNSPEC, IFLA_TUN_OWNER, IFLA_TUN_GROUP, IFLA_TUN_TYPE, IFLA_TUN_PI, IFLA_TUN_VNET_HDR, IFLA_TUN_PERSIST, IFLA_TUN_MULTI_QUEUE, IFLA_TUN_NUM_QUEUES, IFLA_TUN_NUM_DISABLED_QUEUES, __IFLA_TUN_MAX, }; #define IFLA_TUN_MAX (__IFLA_TUN_MAX - 1) /* rmnet section */ #define RMNET_FLAGS_INGRESS_DEAGGREGATION (1U << 0) #define RMNET_FLAGS_INGRESS_MAP_COMMANDS (1U << 1) #define RMNET_FLAGS_INGRESS_MAP_CKSUMV4 (1U << 2) #define RMNET_FLAGS_EGRESS_MAP_CKSUMV4 (1U << 3) enum { IFLA_RMNET_UNSPEC, IFLA_RMNET_MUX_ID, IFLA_RMNET_FLAGS, __IFLA_RMNET_MAX, }; #define IFLA_RMNET_MAX (__IFLA_RMNET_MAX - 1) struct ifla_rmnet_flags { __u32 flags; __u32 mask; }; #endif /* _UAPI_LINUX_IF_LINK_H */ dwarves-dfsg-1.15/lib/bpf/include/uapi/linux/if_xdp.h000066400000000000000000000034721344730411300225210ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * if_xdp: XDP socket user-space interface * Copyright(c) 2018 Intel Corporation. * * Author(s): Björn Töpel * Magnus Karlsson */ #ifndef _LINUX_IF_XDP_H #define _LINUX_IF_XDP_H #include /* Options for the sxdp_flags field */ #define XDP_SHARED_UMEM (1 << 0) #define XDP_COPY (1 << 1) /* Force copy-mode */ #define XDP_ZEROCOPY (1 << 2) /* Force zero-copy mode */ struct sockaddr_xdp { __u16 sxdp_family; __u16 sxdp_flags; __u32 sxdp_ifindex; __u32 sxdp_queue_id; __u32 sxdp_shared_umem_fd; }; struct xdp_ring_offset { __u64 producer; __u64 consumer; __u64 desc; }; struct xdp_mmap_offsets { struct xdp_ring_offset rx; struct xdp_ring_offset tx; struct xdp_ring_offset fr; /* Fill */ struct xdp_ring_offset cr; /* Completion */ }; /* XDP socket options */ #define XDP_MMAP_OFFSETS 1 #define XDP_RX_RING 2 #define XDP_TX_RING 3 #define XDP_UMEM_REG 4 #define XDP_UMEM_FILL_RING 5 #define XDP_UMEM_COMPLETION_RING 6 #define XDP_STATISTICS 7 struct xdp_umem_reg { __u64 addr; /* Start of packet data area */ __u64 len; /* Length of packet data area */ __u32 chunk_size; __u32 headroom; }; struct xdp_statistics { __u64 rx_dropped; /* Dropped for reasons other than invalid desc */ __u64 rx_invalid_descs; /* Dropped due to invalid descriptor */ __u64 tx_invalid_descs; /* Dropped due to invalid descriptor */ }; /* Pgoff for mmaping the rings */ #define XDP_PGOFF_RX_RING 0 #define XDP_PGOFF_TX_RING 0x80000000 #define XDP_UMEM_PGOFF_FILL_RING 0x100000000ULL #define XDP_UMEM_PGOFF_COMPLETION_RING 0x180000000ULL /* Rx/Tx descriptor */ struct xdp_desc { __u64 addr; __u32 len; __u32 options; }; /* UMEM descriptor is __u64 */ #endif /* _LINUX_IF_XDP_H */ dwarves-dfsg-1.15/lib/bpf/include/uapi/linux/netlink.h000066400000000000000000000173241344730411300227150ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI__LINUX_NETLINK_H #define _UAPI__LINUX_NETLINK_H #include #include /* for __kernel_sa_family_t */ #include #define NETLINK_ROUTE 0 /* Routing/device hook */ #define NETLINK_UNUSED 1 /* Unused number */ #define NETLINK_USERSOCK 2 /* Reserved for user mode socket protocols */ #define NETLINK_FIREWALL 3 /* Unused number, formerly ip_queue */ #define NETLINK_SOCK_DIAG 4 /* socket monitoring */ #define NETLINK_NFLOG 5 /* netfilter/iptables ULOG */ #define NETLINK_XFRM 6 /* ipsec */ #define NETLINK_SELINUX 7 /* SELinux event notifications */ #define NETLINK_ISCSI 8 /* Open-iSCSI */ #define NETLINK_AUDIT 9 /* auditing */ #define NETLINK_FIB_LOOKUP 10 #define NETLINK_CONNECTOR 11 #define NETLINK_NETFILTER 12 /* netfilter subsystem */ #define NETLINK_IP6_FW 13 #define NETLINK_DNRTMSG 14 /* DECnet routing messages */ #define NETLINK_KOBJECT_UEVENT 15 /* Kernel messages to userspace */ #define NETLINK_GENERIC 16 /* leave room for NETLINK_DM (DM Events) */ #define NETLINK_SCSITRANSPORT 18 /* SCSI Transports */ #define NETLINK_ECRYPTFS 19 #define NETLINK_RDMA 20 #define NETLINK_CRYPTO 21 /* Crypto layer */ #define NETLINK_SMC 22 /* SMC monitoring */ #define NETLINK_INET_DIAG NETLINK_SOCK_DIAG #define MAX_LINKS 32 struct sockaddr_nl { __kernel_sa_family_t nl_family; /* AF_NETLINK */ unsigned short nl_pad; /* zero */ __u32 nl_pid; /* port ID */ __u32 nl_groups; /* multicast groups mask */ }; struct nlmsghdr { __u32 nlmsg_len; /* Length of message including header */ __u16 nlmsg_type; /* Message content */ __u16 nlmsg_flags; /* Additional flags */ __u32 nlmsg_seq; /* Sequence number */ __u32 nlmsg_pid; /* Sending process port ID */ }; /* Flags values */ #define NLM_F_REQUEST 0x01 /* It is request message. */ #define NLM_F_MULTI 0x02 /* Multipart message, terminated by NLMSG_DONE */ #define NLM_F_ACK 0x04 /* Reply with ack, with zero or error code */ #define NLM_F_ECHO 0x08 /* Echo this request */ #define NLM_F_DUMP_INTR 0x10 /* Dump was inconsistent due to sequence change */ #define NLM_F_DUMP_FILTERED 0x20 /* Dump was filtered as requested */ /* Modifiers to GET request */ #define NLM_F_ROOT 0x100 /* specify tree root */ #define NLM_F_MATCH 0x200 /* return all matching */ #define NLM_F_ATOMIC 0x400 /* atomic GET */ #define NLM_F_DUMP (NLM_F_ROOT|NLM_F_MATCH) /* Modifiers to NEW request */ #define NLM_F_REPLACE 0x100 /* Override existing */ #define NLM_F_EXCL 0x200 /* Do not touch, if it exists */ #define NLM_F_CREATE 0x400 /* Create, if it does not exist */ #define NLM_F_APPEND 0x800 /* Add to end of list */ /* Modifiers to DELETE request */ #define NLM_F_NONREC 0x100 /* Do not delete recursively */ /* Flags for ACK message */ #define NLM_F_CAPPED 0x100 /* request was capped */ #define NLM_F_ACK_TLVS 0x200 /* extended ACK TVLs were included */ /* 4.4BSD ADD NLM_F_CREATE|NLM_F_EXCL 4.4BSD CHANGE NLM_F_REPLACE True CHANGE NLM_F_CREATE|NLM_F_REPLACE Append NLM_F_CREATE Check NLM_F_EXCL */ #define NLMSG_ALIGNTO 4U #define NLMSG_ALIGN(len) ( ((len)+NLMSG_ALIGNTO-1) & ~(NLMSG_ALIGNTO-1) ) #define NLMSG_HDRLEN ((int) NLMSG_ALIGN(sizeof(struct nlmsghdr))) #define NLMSG_LENGTH(len) ((len) + NLMSG_HDRLEN) #define NLMSG_SPACE(len) NLMSG_ALIGN(NLMSG_LENGTH(len)) #define NLMSG_DATA(nlh) ((void*)(((char*)nlh) + NLMSG_LENGTH(0))) #define NLMSG_NEXT(nlh,len) ((len) -= NLMSG_ALIGN((nlh)->nlmsg_len), \ (struct nlmsghdr*)(((char*)(nlh)) + NLMSG_ALIGN((nlh)->nlmsg_len))) #define NLMSG_OK(nlh,len) ((len) >= (int)sizeof(struct nlmsghdr) && \ (nlh)->nlmsg_len >= sizeof(struct nlmsghdr) && \ (nlh)->nlmsg_len <= (len)) #define NLMSG_PAYLOAD(nlh,len) ((nlh)->nlmsg_len - NLMSG_SPACE((len))) #define NLMSG_NOOP 0x1 /* Nothing. */ #define NLMSG_ERROR 0x2 /* Error */ #define NLMSG_DONE 0x3 /* End of a dump */ #define NLMSG_OVERRUN 0x4 /* Data lost */ #define NLMSG_MIN_TYPE 0x10 /* < 0x10: reserved control messages */ struct nlmsgerr { int error; struct nlmsghdr msg; /* * followed by the message contents unless NETLINK_CAP_ACK was set * or the ACK indicates success (error == 0) * message length is aligned with NLMSG_ALIGN() */ /* * followed by TLVs defined in enum nlmsgerr_attrs * if NETLINK_EXT_ACK was set */ }; /** * enum nlmsgerr_attrs - nlmsgerr attributes * @NLMSGERR_ATTR_UNUSED: unused * @NLMSGERR_ATTR_MSG: error message string (string) * @NLMSGERR_ATTR_OFFS: offset of the invalid attribute in the original * message, counting from the beginning of the header (u32) * @NLMSGERR_ATTR_COOKIE: arbitrary subsystem specific cookie to * be used - in the success case - to identify a created * object or operation or similar (binary) * @__NLMSGERR_ATTR_MAX: number of attributes * @NLMSGERR_ATTR_MAX: highest attribute number */ enum nlmsgerr_attrs { NLMSGERR_ATTR_UNUSED, NLMSGERR_ATTR_MSG, NLMSGERR_ATTR_OFFS, NLMSGERR_ATTR_COOKIE, __NLMSGERR_ATTR_MAX, NLMSGERR_ATTR_MAX = __NLMSGERR_ATTR_MAX - 1 }; #define NETLINK_ADD_MEMBERSHIP 1 #define NETLINK_DROP_MEMBERSHIP 2 #define NETLINK_PKTINFO 3 #define NETLINK_BROADCAST_ERROR 4 #define NETLINK_NO_ENOBUFS 5 #ifndef __KERNEL__ #define NETLINK_RX_RING 6 #define NETLINK_TX_RING 7 #endif #define NETLINK_LISTEN_ALL_NSID 8 #define NETLINK_LIST_MEMBERSHIPS 9 #define NETLINK_CAP_ACK 10 #define NETLINK_EXT_ACK 11 #define NETLINK_GET_STRICT_CHK 12 struct nl_pktinfo { __u32 group; }; struct nl_mmap_req { unsigned int nm_block_size; unsigned int nm_block_nr; unsigned int nm_frame_size; unsigned int nm_frame_nr; }; struct nl_mmap_hdr { unsigned int nm_status; unsigned int nm_len; __u32 nm_group; /* credentials */ __u32 nm_pid; __u32 nm_uid; __u32 nm_gid; }; #ifndef __KERNEL__ enum nl_mmap_status { NL_MMAP_STATUS_UNUSED, NL_MMAP_STATUS_RESERVED, NL_MMAP_STATUS_VALID, NL_MMAP_STATUS_COPY, NL_MMAP_STATUS_SKIP, }; #define NL_MMAP_MSG_ALIGNMENT NLMSG_ALIGNTO #define NL_MMAP_MSG_ALIGN(sz) __ALIGN_KERNEL(sz, NL_MMAP_MSG_ALIGNMENT) #define NL_MMAP_HDRLEN NL_MMAP_MSG_ALIGN(sizeof(struct nl_mmap_hdr)) #endif #define NET_MAJOR 36 /* Major 36 is reserved for networking */ enum { NETLINK_UNCONNECTED = 0, NETLINK_CONNECTED, }; /* * <------- NLA_HDRLEN ------> <-- NLA_ALIGN(payload)--> * +---------------------+- - -+- - - - - - - - - -+- - -+ * | Header | Pad | Payload | Pad | * | (struct nlattr) | ing | | ing | * +---------------------+- - -+- - - - - - - - - -+- - -+ * <-------------- nlattr->nla_len --------------> */ struct nlattr { __u16 nla_len; __u16 nla_type; }; /* * nla_type (16 bits) * +---+---+-------------------------------+ * | N | O | Attribute Type | * +---+---+-------------------------------+ * N := Carries nested attributes * O := Payload stored in network byte order * * Note: The N and O flag are mutually exclusive. */ #define NLA_F_NESTED (1 << 15) #define NLA_F_NET_BYTEORDER (1 << 14) #define NLA_TYPE_MASK ~(NLA_F_NESTED | NLA_F_NET_BYTEORDER) #define NLA_ALIGNTO 4 #define NLA_ALIGN(len) (((len) + NLA_ALIGNTO - 1) & ~(NLA_ALIGNTO - 1)) #define NLA_HDRLEN ((int) NLA_ALIGN(sizeof(struct nlattr))) /* Generic 32 bitflags attribute content sent to the kernel. * * The value is a bitmap that defines the values being set * The selector is a bitmask that defines which value is legit * * Examples: * value = 0x0, and selector = 0x1 * implies we are selecting bit 1 and we want to set its value to 0. * * value = 0x2, and selector = 0x2 * implies we are selecting bit 2 and we want to set its value to 1. * */ struct nla_bitfield32 { __u32 value; __u32 selector; }; #endif /* _UAPI__LINUX_NETLINK_H */ dwarves-dfsg-1.15/lib/bpf/scripts/000077500000000000000000000000001344730411300170405ustar00rootroot00000000000000dwarves-dfsg-1.15/lib/bpf/scripts/check-reallocarray.sh000077500000000000000000000004561344730411300231370ustar00rootroot00000000000000#!/bin/sh tfile=$(mktemp /tmp/test_reallocarray_XXXXXXXX.c) ofile=${tfile%.c}.o cat > $tfile < int main(void) { return !!reallocarray(NULL, 1, 1); } EOL gcc $tfile -o $ofile >/dev/null 2>&1 if [ $? -ne 0 ]; then echo "FAIL"; fi /bin/rm -f $tfile $ofile dwarves-dfsg-1.15/lib/bpf/scripts/sync-kernel.sh000077500000000000000000000141751344730411300216410ustar00rootroot00000000000000#!/bin/bash usage () { echo "USAGE: ./sync-kernel.sh []" echo "" echo "If is not specified, it's read from /CHECKPOINT-COMMIT" exit 1 } LINUX_REPO=${1-""} LIBBPF_REPO=${2-""} if [ -z "${LINUX_REPO}" ]; then usage fi if [ -z "${LIBBPF_REPO}" ]; then usage fi set -eu WORKDIR=$(pwd) trap "cd ${WORKDIR}; exit" INT TERM EXIT echo "WORKDIR: ${WORKDIR}" echo "LINUX REPO: ${LINUX_REPO}" echo "LIBBPF REPO: ${LIBBPF_REPO}" SUFFIX=$(date --utc +%Y-%m-%dT%H-%M-%S.%3NZ) BASELINE_COMMIT=${3-$(cat ${LIBBPF_REPO}/CHECKPOINT-COMMIT)} # Use current kernel repo HEAD as a source of patches cd ${LINUX_REPO} TIP_SYM_REF=$(git symbolic-ref -q --short HEAD || git rev-parse HEAD) TIP_COMMIT=$(git rev-parse HEAD) BASELINE_TAG=libbpf-baseline-${SUFFIX} TIP_TAG=libbpf-tip-${SUFFIX} VIEW_TAG=libbpf-view-${SUFFIX} LIBBPF_SYNC_TAG=libbpf-sync-${SUFFIX} # Squash state of kernel repo at baseline into single commit SQUASH_BASE_TAG=libbpf-squash-base-${SUFFIX} SQUASH_TIP_TAG=libbpf-squash-tip-${SUFFIX} SQUASH_COMMIT=$(git commit-tree ${BASELINE_COMMIT}^{tree} -m "BASELINE SQUASH ${BASELINE_COMMIT}") echo "SUFFIX: ${SUFFIX}" echo "BASELINE COMMIT: $(git log --pretty=oneline --no-walk ${BASELINE_COMMIT})" echo "TIP COMMIT: $(git log --pretty=oneline --no-walk ${TIP_COMMIT})" echo "SQUASH COMMIT: ${SQUASH_COMMIT}" echo "BASELINE TAG: ${BASELINE_TAG}" echo "TIP TAG: ${TIP_TAG}" echo "SQUASH BASE TAG: ${SQUASH_BASE_TAG}" echo "SQUASH TIP TAG: ${SQUASH_TIP_TAG}" echo "VIEW TAG: ${VIEW_TAG}" echo "LIBBPF SYNC TAG: ${LIBBPF_SYNC_TAG}" TMP_DIR=$(mktemp -d) echo "TEMP DIR: ${TMP_DIR}" echo "PATCHES+COVER: ${TMP_DIR}/patches" echo "PATCHSET: ${TMP_DIR}/patchset.patch" git branch ${BASELINE_TAG} ${BASELINE_COMMIT} git branch ${TIP_TAG} ${TIP_COMMIT} git branch ${SQUASH_BASE_TAG} ${SQUASH_COMMIT} git checkout -b ${SQUASH_TIP_TAG} ${SQUASH_COMMIT} # Cherry-pick new commits onto squashed baseline commit LIBBPF_PATHS=(tools/lib/bpf tools/include/uapi/linux/{bpf_common.h,bpf.h,btf.h,if_link.h,netlink.h} tools/include/tools/libc_compat.h) LIBBPF_NEW_MERGES=$(git rev-list --merges --topo-order --reverse ${BASELINE_TAG}..${TIP_TAG} ${LIBBPF_PATHS[@]}) for LIBBPF_NEW_MERGE in ${LIBBPF_NEW_MERGES}; do printf "MERGE:\t" && git log --oneline -n1 ${LIBBPF_NEW_MERGE} MERGE_CHANGES=$(git log --format='' -n1 ${LIBBPF_NEW_MERGE} | wc -l) if ((${MERGE_CHANGES} > 0)); then echo "Merge is non empty, aborting!.." exit 3 fi done LIBBPF_NEW_COMMITS=$(git rev-list --no-merges --topo-order --reverse ${BASELINE_TAG}..${TIP_TAG} ${LIBBPF_PATHS[@]}) for LIBBPF_NEW_COMMIT in ${LIBBPF_NEW_COMMITS}; do git cherry-pick ${LIBBPF_NEW_COMMIT} done LIBBPF_TREE_FILTER=' \ mkdir -p __libbpf/include/uapi/linux __libbpf/include/tools && \ git mv -kf tools/lib/bpf __libbpf/src && \ git mv -kf tools/include/uapi/linux/{bpf_common.h,bpf.h,btf.h,if_link.h,if_xdp.h,netlink.h} \ __libbpf/include/uapi/linux && \ git mv -kf tools/include/tools/libc_compat.h __libbpf/include/tools && \ git rm --ignore-unmatch -f __libbpf/src/{Makefile,Build,test_libbpf.cpp,.gitignore} \ ' # Move all libbpf files into __libbpf directory. git filter-branch --prune-empty -f --tree-filter "${LIBBPF_TREE_FILTER}" ${SQUASH_TIP_TAG} ${SQUASH_BASE_TAG} # Make __libbpf a new root directory git filter-branch --prune-empty -f --subdirectory-filter __libbpf ${SQUASH_TIP_TAG} ${SQUASH_BASE_TAG} # If there are no new commits with libbpf-related changes, bail out COMMIT_CNT=$(git rev-list --count ${SQUASH_BASE_TAG}..${SQUASH_TIP_TAG}) if ((${COMMIT_CNT} <= 0)); then echo "No new changes to apply, we are done!" exit 2 fi # Exclude baseline commit and generate nice cover letter with summary git format-patch ${SQUASH_BASE_TAG}..${SQUASH_TIP_TAG} --cover-letter -o ${TMP_DIR}/patches # Now generate single-file patchset w/o cover to apply on top of libbpf repo git format-patch ${SQUASH_BASE_TAG}..${SQUASH_TIP_TAG} --stdout > ${TMP_DIR}/patchset.patch # Now is time to re-apply libbpf-related linux patches to libbpf repo cd ${WORKDIR} && cd ${LIBBPF_REPO} git checkout -b ${LIBBPF_SYNC_TAG} git am --committer-date-is-author-date ${TMP_DIR}/patchset.patch # Use generated cover-letter as a template for "sync commit" with # baseline and checkpoint commits from kernel repo (and leave summary # from cover letter intact, of course) echo ${TIP_COMMIT} > CHECKPOINT-COMMIT && \ git add CHECKPOINT-COMMIT && \ awk '/\*\*\* BLURB HERE \*\*\*/ {p=1} p' ${TMP_DIR}/patches/0000-cover-letter.patch | \ sed "s/\*\*\* BLURB HERE \*\*\*/\ sync: latest libbpf changes from kernel\n\ \n\ Syncing latest libbpf commits from kernel repository.\n\ Baseline commit: ${BASELINE_COMMIT}\n\ Checkpoint commit: ${TIP_COMMIT}/" | \ git commit --file=- echo "SUCCESS! ${COMMIT_CNT} commits synced." echo "Verifying Linux's and Github's libbpf state" LIBBPF_VIEW_PATHS=(src include/uapi/linux/{bpf_common.h,bpf.h,btf.h,if_link.h,if_xdp.h,netlink.h} include/tools/libc_compat.h) LIBBPF_VIEW_EXCLUDE_REGEX='^src/(Makefile|Build|test_libbpf.cpp|\.gitignore)$' cd ${WORKDIR} && cd ${LINUX_REPO} LINUX_ABS_DIR=$(pwd) git checkout -b ${VIEW_TAG} ${TIP_COMMIT} git filter-branch -f --tree-filter "${LIBBPF_TREE_FILTER}" ${VIEW_TAG}^..${VIEW_TAG} git filter-branch -f --subdirectory-filter __libbpf ${VIEW_TAG}^..${VIEW_TAG} git ls-files -- ${LIBBPF_VIEW_PATHS[@]} > ${TMP_DIR}/linux-view.ls cd ${WORKDIR} && cd ${LIBBPF_REPO} GITHUB_ABS_DIR=$(pwd) git ls-files -- ${LIBBPF_VIEW_PATHS[@]} | grep -v -E "${LIBBPF_VIEW_EXCLUDE_REGEX}" > ${TMP_DIR}/github-view.ls echo "Comparing list of files..." diff ${TMP_DIR}/linux-view.ls ${TMP_DIR}/github-view.ls echo "Comparing file contents..." for F in $(cat ${TMP_DIR}/linux-view.ls); do diff "${LINUX_ABS_DIR}/${F}" "${GITHUB_ABS_DIR}/${F}" done echo "Contents appear identical!" echo "Cleaning up..." rm -r ${TMP_DIR} cd ${WORKDIR} && cd ${LINUX_REPO} git checkout ${TIP_SYM_REF} git branch -D ${BASELINE_TAG} ${TIP_TAG} ${SQUASH_BASE_TAG} ${SQUASH_TIP_TAG} ${VIEW_TAG} cd ${WORKDIR} echo "DONE." dwarves-dfsg-1.15/lib/bpf/src/000077500000000000000000000000001344730411300161405ustar00rootroot00000000000000dwarves-dfsg-1.15/lib/bpf/src/.gitignore000066400000000000000000000000101344730411300201170ustar00rootroot00000000000000*.o *.a dwarves-dfsg-1.15/lib/bpf/src/Makefile000066400000000000000000000032451344730411300176040ustar00rootroot00000000000000# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) TOPDIR = .. INCLUDES := -I. -I$(TOPDIR)/include -I$(TOPDIR)/include/uapi ALL_CFLAGS := $(INCLUDES) FEATURE_REALLOCARRAY := $(shell $(TOPDIR)/scripts/check-reallocarray.sh) ifneq ($(FEATURE_REALLOCARRAY),) ALL_CFLAGS += -DCOMPAT_NEED_REALLOCARRAY endif ifdef BUILD_SHARED ALL_CFLAGS += -fPIC -fvisibility=hidden endif CFLAGS ?= -g -O2 -Werror -Wall ALL_CFLAGS += $(CFLAGS) OBJDIR ?= . OBJS := $(addprefix $(OBJDIR)/,bpf.o btf.o libbpf.o libbpf_errno.o netlink.o \ nlattr.o str_error.o libbpf_probes.o bpf_prog_linfo.o xsk.o) LIBS := $(OBJDIR)/libbpf.a ifdef BUILD_SHARED LIBS += $(OBJDIR)/libbpf.so endif HEADERS := bpf.h libbpf.h btf.h UAPI_HEADERS := $(addprefix $(TOPDIR)/include/uapi/linux/,bpf.h bpf_common.h \ btf.h) INSTALL = install DESTDIR ?= ifeq ($(shell uname -m),x86_64) LIBSUBDIR := lib64 else LIBSUBDIR := lib endif PREFIX ?= /usr LIBDIR ?= $(PREFIX)/$(LIBSUBDIR) INCLUDEDIR ?= $(PREFIX)/include UAPIDIR ?= $(PREFIX)/include all: $(LIBS) $(OBJDIR)/libbpf.a: $(OBJS) $(AR) rcs $@ $^ $(OBJDIR)/libbpf.so: $(OBJS) $(CC) -shared $(LDFLAGS) $^ -o $@ $(OBJDIR)/%.o: %.c $(CC) $(ALL_CFLAGS) -c $< -o $@ define do_install if [ ! -d '$(DESTDIR)$2' ]; then \ $(INSTALL) -d -m 755 '$(DESTDIR)$2'; \ fi; \ $(INSTALL) $1 $(if $3,-m $3,) '$(DESTDIR)$2' endef install: all install_headers $(call do_install,$(LIBS),$(LIBDIR)) install_headers: $(call do_install,$(HEADERS),$(INCLUDEDIR)/bpf,644) # UAPI headers can be installed by a different package so they're not installed # in by install rule. install_uapi_headers: $(call do_install,$(UAPI_HEADERS),$(UAPIDIR)/linux,644) clean: rm -f *.o *.a *.so dwarves-dfsg-1.15/lib/bpf/src/README.rst000066400000000000000000000127411344730411300176340ustar00rootroot00000000000000.. SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) libbpf API naming convention ============================ libbpf API provides access to a few logically separated groups of functions and types. Every group has its own naming convention described here. It's recommended to follow these conventions whenever a new function or type is added to keep libbpf API clean and consistent. All types and functions provided by libbpf API should have one of the following prefixes: ``bpf_``, ``btf_``, ``libbpf_``, ``xsk_``. System call wrappers -------------------- System call wrappers are simple wrappers for commands supported by sys_bpf system call. These wrappers should go to ``bpf.h`` header file and map one-on-one to corresponding commands. For example ``bpf_map_lookup_elem`` wraps ``BPF_MAP_LOOKUP_ELEM`` command of sys_bpf, ``bpf_prog_attach`` wraps ``BPF_PROG_ATTACH``, etc. Objects ------- Another class of types and functions provided by libbpf API is "objects" and functions to work with them. Objects are high-level abstractions such as BPF program or BPF map. They're represented by corresponding structures such as ``struct bpf_object``, ``struct bpf_program``, ``struct bpf_map``, etc. Structures are forward declared and access to their fields should be provided via corresponding getters and setters rather than directly. These objects are associated with corresponding parts of ELF object that contains compiled BPF programs. For example ``struct bpf_object`` represents ELF object itself created from an ELF file or from a buffer, ``struct bpf_program`` represents a program in ELF object and ``struct bpf_map`` is a map. Functions that work with an object have names built from object name, double underscore and part that describes function purpose. For example ``bpf_object__open`` consists of the name of corresponding object, ``bpf_object``, double underscore and ``open`` that defines the purpose of the function to open ELF file and create ``bpf_object`` from it. Another example: ``bpf_program__load`` is named for corresponding object, ``bpf_program``, that is separated from other part of the name by double underscore. All objects and corresponding functions other than BTF related should go to ``libbpf.h``. BTF types and functions should go to ``btf.h``. Auxiliary functions ------------------- Auxiliary functions and types that don't fit well in any of categories described above should have ``libbpf_`` prefix, e.g. ``libbpf_get_error`` or ``libbpf_prog_type_by_name``. AF_XDP functions ------------------- AF_XDP functions should have an ``xsk_`` prefix, e.g. ``xsk_umem__get_data`` or ``xsk_umem__create``. The interface consists of both low-level ring access functions and high-level configuration functions. These can be mixed and matched. Note that these functions are not reentrant for performance reasons. Please take a look at Documentation/networking/af_xdp.rst in the Linux kernel source tree on how to use XDP sockets and for some common mistakes in case you do not get any traffic up to user space. libbpf ABI ========== libbpf can be both linked statically or used as DSO. To avoid possible conflicts with other libraries an application is linked with, all non-static libbpf symbols should have one of the prefixes mentioned in API documentation above. See API naming convention to choose the right name for a new symbol. Symbol visibility ----------------- libbpf follow the model when all global symbols have visibility "hidden" by default and to make a symbol visible it has to be explicitly attributed with ``LIBBPF_API`` macro. For example: .. code-block:: c LIBBPF_API int bpf_prog_get_fd_by_id(__u32 id); This prevents from accidentally exporting a symbol, that is not supposed to be a part of ABI what, in turn, improves both libbpf developer- and user-experiences. ABI versionning --------------- To make future ABI extensions possible libbpf ABI is versioned. Versioning is implemented by ``libbpf.map`` version script that is passed to linker. Version name is ``LIBBPF_`` prefix + three-component numeric version, starting from ``0.0.1``. Every time ABI is being changed, e.g. because a new symbol is added or semantic of existing symbol is changed, ABI version should be bumped. This bump in ABI version is at most once per kernel development cycle. For example, if current state of ``libbpf.map`` is: .. code-block:: LIBBPF_0.0.1 { global: bpf_func_a; bpf_func_b; local: \*; }; , and a new symbol ``bpf_func_c`` is being introduced, then ``libbpf.map`` should be changed like this: .. code-block:: LIBBPF_0.0.1 { global: bpf_func_a; bpf_func_b; local: \*; }; LIBBPF_0.0.2 { global: bpf_func_c; } LIBBPF_0.0.1; , where new version ``LIBBPF_0.0.2`` depends on the previous ``LIBBPF_0.0.1``. Format of version script and ways to handle ABI changes, including incompatible ones, described in details in [1]. Stand-alone build ================= Under https://github.com/libbpf/libbpf there is a (semi-)automated mirror of the mainline's version of libbpf for a stand-alone build. However, all changes to libbpf's code base must be upstreamed through the mainline kernel tree. License ======= libbpf is dual-licensed under LGPL 2.1 and BSD 2-Clause. Links ===== [1] https://www.akkadia.org/drepper/dsohowto.pdf (Chapter 3. Maintaining APIs and ABIs). dwarves-dfsg-1.15/lib/bpf/src/bpf.c000066400000000000000000000422341344730411300170600ustar00rootroot00000000000000// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* * common eBPF ELF operations. * * Copyright (C) 2013-2015 Alexei Starovoitov * Copyright (C) 2015 Wang Nan * Copyright (C) 2015 Huawei Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; * version 2.1 of the License (not later!) * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see */ #include #include #include #include #include #include #include "bpf.h" #include "libbpf.h" #include /* * When building perf, unistd.h is overridden. __NR_bpf is * required to be defined explicitly. */ #ifndef __NR_bpf # if defined(__i386__) # define __NR_bpf 357 # elif defined(__x86_64__) # define __NR_bpf 321 # elif defined(__aarch64__) # define __NR_bpf 280 # elif defined(__sparc__) # define __NR_bpf 349 # elif defined(__s390__) # define __NR_bpf 351 # else # error __NR_bpf not defined. libbpf does not support your arch. # endif #endif #ifndef min #define min(x, y) ((x) < (y) ? (x) : (y)) #endif static inline __u64 ptr_to_u64(const void *ptr) { return (__u64) (unsigned long) ptr; } static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr, unsigned int size) { return syscall(__NR_bpf, cmd, attr, size); } static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size) { int fd; do { fd = sys_bpf(BPF_PROG_LOAD, attr, size); } while (fd < 0 && errno == EAGAIN); return fd; } int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr) { __u32 name_len = create_attr->name ? strlen(create_attr->name) : 0; union bpf_attr attr; memset(&attr, '\0', sizeof(attr)); attr.map_type = create_attr->map_type; attr.key_size = create_attr->key_size; attr.value_size = create_attr->value_size; attr.max_entries = create_attr->max_entries; attr.map_flags = create_attr->map_flags; memcpy(attr.map_name, create_attr->name, min(name_len, BPF_OBJ_NAME_LEN - 1)); attr.numa_node = create_attr->numa_node; attr.btf_fd = create_attr->btf_fd; attr.btf_key_type_id = create_attr->btf_key_type_id; attr.btf_value_type_id = create_attr->btf_value_type_id; attr.map_ifindex = create_attr->map_ifindex; attr.inner_map_fd = create_attr->inner_map_fd; return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr)); } int bpf_create_map_node(enum bpf_map_type map_type, const char *name, int key_size, int value_size, int max_entries, __u32 map_flags, int node) { struct bpf_create_map_attr map_attr = {}; map_attr.name = name; map_attr.map_type = map_type; map_attr.map_flags = map_flags; map_attr.key_size = key_size; map_attr.value_size = value_size; map_attr.max_entries = max_entries; if (node >= 0) { map_attr.numa_node = node; map_attr.map_flags |= BPF_F_NUMA_NODE; } return bpf_create_map_xattr(&map_attr); } int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size, int max_entries, __u32 map_flags) { struct bpf_create_map_attr map_attr = {}; map_attr.map_type = map_type; map_attr.map_flags = map_flags; map_attr.key_size = key_size; map_attr.value_size = value_size; map_attr.max_entries = max_entries; return bpf_create_map_xattr(&map_attr); } int bpf_create_map_name(enum bpf_map_type map_type, const char *name, int key_size, int value_size, int max_entries, __u32 map_flags) { struct bpf_create_map_attr map_attr = {}; map_attr.name = name; map_attr.map_type = map_type; map_attr.map_flags = map_flags; map_attr.key_size = key_size; map_attr.value_size = value_size; map_attr.max_entries = max_entries; return bpf_create_map_xattr(&map_attr); } int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name, int key_size, int inner_map_fd, int max_entries, __u32 map_flags, int node) { __u32 name_len = name ? strlen(name) : 0; union bpf_attr attr; memset(&attr, '\0', sizeof(attr)); attr.map_type = map_type; attr.key_size = key_size; attr.value_size = 4; attr.inner_map_fd = inner_map_fd; attr.max_entries = max_entries; attr.map_flags = map_flags; memcpy(attr.map_name, name, min(name_len, BPF_OBJ_NAME_LEN - 1)); if (node >= 0) { attr.map_flags |= BPF_F_NUMA_NODE; attr.numa_node = node; } return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr)); } int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name, int key_size, int inner_map_fd, int max_entries, __u32 map_flags) { return bpf_create_map_in_map_node(map_type, name, key_size, inner_map_fd, max_entries, map_flags, -1); } static void * alloc_zero_tailing_info(const void *orecord, __u32 cnt, __u32 actual_rec_size, __u32 expected_rec_size) { __u64 info_len = actual_rec_size * cnt; void *info, *nrecord; int i; info = malloc(info_len); if (!info) return NULL; /* zero out bytes kernel does not understand */ nrecord = info; for (i = 0; i < cnt; i++) { memcpy(nrecord, orecord, expected_rec_size); memset(nrecord + expected_rec_size, 0, actual_rec_size - expected_rec_size); orecord += actual_rec_size; nrecord += actual_rec_size; } return info; } int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, char *log_buf, size_t log_buf_sz) { void *finfo = NULL, *linfo = NULL; union bpf_attr attr; __u32 log_level; __u32 name_len; int fd; if (!load_attr || !log_buf != !log_buf_sz) return -EINVAL; log_level = load_attr->log_level; if (log_level > 2 || (log_level && !log_buf)) return -EINVAL; name_len = load_attr->name ? strlen(load_attr->name) : 0; memset(&attr, 0, sizeof(attr)); attr.prog_type = load_attr->prog_type; attr.expected_attach_type = load_attr->expected_attach_type; attr.insn_cnt = (__u32)load_attr->insns_cnt; attr.insns = ptr_to_u64(load_attr->insns); attr.license = ptr_to_u64(load_attr->license); attr.log_level = log_level; if (log_level) { attr.log_buf = ptr_to_u64(log_buf); attr.log_size = log_buf_sz; } else { attr.log_buf = ptr_to_u64(NULL); attr.log_size = 0; } attr.kern_version = load_attr->kern_version; attr.prog_ifindex = load_attr->prog_ifindex; attr.prog_btf_fd = load_attr->prog_btf_fd; attr.func_info_rec_size = load_attr->func_info_rec_size; attr.func_info_cnt = load_attr->func_info_cnt; attr.func_info = ptr_to_u64(load_attr->func_info); attr.line_info_rec_size = load_attr->line_info_rec_size; attr.line_info_cnt = load_attr->line_info_cnt; attr.line_info = ptr_to_u64(load_attr->line_info); memcpy(attr.prog_name, load_attr->name, min(name_len, BPF_OBJ_NAME_LEN - 1)); fd = sys_bpf_prog_load(&attr, sizeof(attr)); if (fd >= 0) return fd; /* After bpf_prog_load, the kernel may modify certain attributes * to give user space a hint how to deal with loading failure. * Check to see whether we can make some changes and load again. */ while (errno == E2BIG && (!finfo || !linfo)) { if (!finfo && attr.func_info_cnt && attr.func_info_rec_size < load_attr->func_info_rec_size) { /* try with corrected func info records */ finfo = alloc_zero_tailing_info(load_attr->func_info, load_attr->func_info_cnt, load_attr->func_info_rec_size, attr.func_info_rec_size); if (!finfo) goto done; attr.func_info = ptr_to_u64(finfo); attr.func_info_rec_size = load_attr->func_info_rec_size; } else if (!linfo && attr.line_info_cnt && attr.line_info_rec_size < load_attr->line_info_rec_size) { linfo = alloc_zero_tailing_info(load_attr->line_info, load_attr->line_info_cnt, load_attr->line_info_rec_size, attr.line_info_rec_size); if (!linfo) goto done; attr.line_info = ptr_to_u64(linfo); attr.line_info_rec_size = load_attr->line_info_rec_size; } else { break; } fd = sys_bpf_prog_load(&attr, sizeof(attr)); if (fd >= 0) goto done; } if (log_level || !log_buf) goto done; /* Try again with log */ attr.log_buf = ptr_to_u64(log_buf); attr.log_size = log_buf_sz; attr.log_level = 1; log_buf[0] = 0; fd = sys_bpf_prog_load(&attr, sizeof(attr)); done: free(finfo); free(linfo); return fd; } int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, size_t insns_cnt, const char *license, __u32 kern_version, char *log_buf, size_t log_buf_sz) { struct bpf_load_program_attr load_attr; memset(&load_attr, 0, sizeof(struct bpf_load_program_attr)); load_attr.prog_type = type; load_attr.expected_attach_type = 0; load_attr.name = NULL; load_attr.insns = insns; load_attr.insns_cnt = insns_cnt; load_attr.license = license; load_attr.kern_version = kern_version; return bpf_load_program_xattr(&load_attr, log_buf, log_buf_sz); } int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns, size_t insns_cnt, __u32 prog_flags, const char *license, __u32 kern_version, char *log_buf, size_t log_buf_sz, int log_level) { union bpf_attr attr; memset(&attr, 0, sizeof(attr)); attr.prog_type = type; attr.insn_cnt = (__u32)insns_cnt; attr.insns = ptr_to_u64(insns); attr.license = ptr_to_u64(license); attr.log_buf = ptr_to_u64(log_buf); attr.log_size = log_buf_sz; attr.log_level = log_level; log_buf[0] = 0; attr.kern_version = kern_version; attr.prog_flags = prog_flags; return sys_bpf_prog_load(&attr, sizeof(attr)); } int bpf_map_update_elem(int fd, const void *key, const void *value, __u64 flags) { union bpf_attr attr; memset(&attr, 0, sizeof(attr)); attr.map_fd = fd; attr.key = ptr_to_u64(key); attr.value = ptr_to_u64(value); attr.flags = flags; return sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr)); } int bpf_map_lookup_elem(int fd, const void *key, void *value) { union bpf_attr attr; memset(&attr, 0, sizeof(attr)); attr.map_fd = fd; attr.key = ptr_to_u64(key); attr.value = ptr_to_u64(value); return sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr)); } int bpf_map_lookup_elem_flags(int fd, const void *key, void *value, __u64 flags) { union bpf_attr attr; memset(&attr, 0, sizeof(attr)); attr.map_fd = fd; attr.key = ptr_to_u64(key); attr.value = ptr_to_u64(value); attr.flags = flags; return sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr)); } int bpf_map_lookup_and_delete_elem(int fd, const void *key, void *value) { union bpf_attr attr; memset(&attr, 0, sizeof(attr)); attr.map_fd = fd; attr.key = ptr_to_u64(key); attr.value = ptr_to_u64(value); return sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, sizeof(attr)); } int bpf_map_delete_elem(int fd, const void *key) { union bpf_attr attr; memset(&attr, 0, sizeof(attr)); attr.map_fd = fd; attr.key = ptr_to_u64(key); return sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr)); } int bpf_map_get_next_key(int fd, const void *key, void *next_key) { union bpf_attr attr; memset(&attr, 0, sizeof(attr)); attr.map_fd = fd; attr.key = ptr_to_u64(key); attr.next_key = ptr_to_u64(next_key); return sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr)); } int bpf_obj_pin(int fd, const char *pathname) { union bpf_attr attr; memset(&attr, 0, sizeof(attr)); attr.pathname = ptr_to_u64((void *)pathname); attr.bpf_fd = fd; return sys_bpf(BPF_OBJ_PIN, &attr, sizeof(attr)); } int bpf_obj_get(const char *pathname) { union bpf_attr attr; memset(&attr, 0, sizeof(attr)); attr.pathname = ptr_to_u64((void *)pathname); return sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr)); } int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type, unsigned int flags) { union bpf_attr attr; memset(&attr, 0, sizeof(attr)); attr.target_fd = target_fd; attr.attach_bpf_fd = prog_fd; attr.attach_type = type; attr.attach_flags = flags; return sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr)); } int bpf_prog_detach(int target_fd, enum bpf_attach_type type) { union bpf_attr attr; memset(&attr, 0, sizeof(attr)); attr.target_fd = target_fd; attr.attach_type = type; return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr)); } int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type) { union bpf_attr attr; memset(&attr, 0, sizeof(attr)); attr.target_fd = target_fd; attr.attach_bpf_fd = prog_fd; attr.attach_type = type; return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr)); } int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags, __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt) { union bpf_attr attr; int ret; memset(&attr, 0, sizeof(attr)); attr.query.target_fd = target_fd; attr.query.attach_type = type; attr.query.query_flags = query_flags; attr.query.prog_cnt = *prog_cnt; attr.query.prog_ids = ptr_to_u64(prog_ids); ret = sys_bpf(BPF_PROG_QUERY, &attr, sizeof(attr)); if (attach_flags) *attach_flags = attr.query.attach_flags; *prog_cnt = attr.query.prog_cnt; return ret; } int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size, void *data_out, __u32 *size_out, __u32 *retval, __u32 *duration) { union bpf_attr attr; int ret; memset(&attr, 0, sizeof(attr)); attr.test.prog_fd = prog_fd; attr.test.data_in = ptr_to_u64(data); attr.test.data_out = ptr_to_u64(data_out); attr.test.data_size_in = size; attr.test.repeat = repeat; ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr)); if (size_out) *size_out = attr.test.data_size_out; if (retval) *retval = attr.test.retval; if (duration) *duration = attr.test.duration; return ret; } int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr) { union bpf_attr attr; int ret; if (!test_attr->data_out && test_attr->data_size_out > 0) return -EINVAL; memset(&attr, 0, sizeof(attr)); attr.test.prog_fd = test_attr->prog_fd; attr.test.data_in = ptr_to_u64(test_attr->data_in); attr.test.data_out = ptr_to_u64(test_attr->data_out); attr.test.data_size_in = test_attr->data_size_in; attr.test.data_size_out = test_attr->data_size_out; attr.test.repeat = test_attr->repeat; ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr)); test_attr->data_size_out = attr.test.data_size_out; test_attr->retval = attr.test.retval; test_attr->duration = attr.test.duration; return ret; } int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id) { union bpf_attr attr; int err; memset(&attr, 0, sizeof(attr)); attr.start_id = start_id; err = sys_bpf(BPF_PROG_GET_NEXT_ID, &attr, sizeof(attr)); if (!err) *next_id = attr.next_id; return err; } int bpf_map_get_next_id(__u32 start_id, __u32 *next_id) { union bpf_attr attr; int err; memset(&attr, 0, sizeof(attr)); attr.start_id = start_id; err = sys_bpf(BPF_MAP_GET_NEXT_ID, &attr, sizeof(attr)); if (!err) *next_id = attr.next_id; return err; } int bpf_prog_get_fd_by_id(__u32 id) { union bpf_attr attr; memset(&attr, 0, sizeof(attr)); attr.prog_id = id; return sys_bpf(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr)); } int bpf_map_get_fd_by_id(__u32 id) { union bpf_attr attr; memset(&attr, 0, sizeof(attr)); attr.map_id = id; return sys_bpf(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr)); } int bpf_btf_get_fd_by_id(__u32 id) { union bpf_attr attr; memset(&attr, 0, sizeof(attr)); attr.btf_id = id; return sys_bpf(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr)); } int bpf_obj_get_info_by_fd(int prog_fd, void *info, __u32 *info_len) { union bpf_attr attr; int err; memset(&attr, 0, sizeof(attr)); attr.info.bpf_fd = prog_fd; attr.info.info_len = *info_len; attr.info.info = ptr_to_u64(info); err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr)); if (!err) *info_len = attr.info.info_len; return err; } int bpf_raw_tracepoint_open(const char *name, int prog_fd) { union bpf_attr attr; memset(&attr, 0, sizeof(attr)); attr.raw_tracepoint.name = ptr_to_u64(name); attr.raw_tracepoint.prog_fd = prog_fd; return sys_bpf(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr)); } int bpf_load_btf(void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_size, bool do_log) { union bpf_attr attr = {}; int fd; attr.btf = ptr_to_u64(btf); attr.btf_size = btf_size; retry: if (do_log && log_buf && log_buf_size) { attr.btf_log_level = 1; attr.btf_log_size = log_buf_size; attr.btf_log_buf = ptr_to_u64(log_buf); } fd = sys_bpf(BPF_BTF_LOAD, &attr, sizeof(attr)); if (fd == -1 && !do_log && log_buf && log_buf_size) { do_log = true; goto retry; } return fd; } int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len, __u32 *prog_id, __u32 *fd_type, __u64 *probe_offset, __u64 *probe_addr) { union bpf_attr attr = {}; int err; attr.task_fd_query.pid = pid; attr.task_fd_query.fd = fd; attr.task_fd_query.flags = flags; attr.task_fd_query.buf = ptr_to_u64(buf); attr.task_fd_query.buf_len = *buf_len; err = sys_bpf(BPF_TASK_FD_QUERY, &attr, sizeof(attr)); *buf_len = attr.task_fd_query.buf_len; *prog_id = attr.task_fd_query.prog_id; *fd_type = attr.task_fd_query.fd_type; *probe_offset = attr.task_fd_query.probe_offset; *probe_addr = attr.task_fd_query.probe_addr; return err; } dwarves-dfsg-1.15/lib/bpf/src/bpf.h000066400000000000000000000136361344730411300170710ustar00rootroot00000000000000/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ /* * common eBPF ELF operations. * * Copyright (C) 2013-2015 Alexei Starovoitov * Copyright (C) 2015 Wang Nan * Copyright (C) 2015 Huawei Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; * version 2.1 of the License (not later!) * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see */ #ifndef __LIBBPF_BPF_H #define __LIBBPF_BPF_H #include #include #include #ifdef __cplusplus extern "C" { #endif #ifndef LIBBPF_API #define LIBBPF_API __attribute__((visibility("default"))) #endif struct bpf_create_map_attr { const char *name; enum bpf_map_type map_type; __u32 map_flags; __u32 key_size; __u32 value_size; __u32 max_entries; __u32 numa_node; __u32 btf_fd; __u32 btf_key_type_id; __u32 btf_value_type_id; __u32 map_ifindex; __u32 inner_map_fd; }; LIBBPF_API int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr); LIBBPF_API int bpf_create_map_node(enum bpf_map_type map_type, const char *name, int key_size, int value_size, int max_entries, __u32 map_flags, int node); LIBBPF_API int bpf_create_map_name(enum bpf_map_type map_type, const char *name, int key_size, int value_size, int max_entries, __u32 map_flags); LIBBPF_API int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size, int max_entries, __u32 map_flags); LIBBPF_API int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name, int key_size, int inner_map_fd, int max_entries, __u32 map_flags, int node); LIBBPF_API int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name, int key_size, int inner_map_fd, int max_entries, __u32 map_flags); struct bpf_load_program_attr { enum bpf_prog_type prog_type; enum bpf_attach_type expected_attach_type; const char *name; const struct bpf_insn *insns; size_t insns_cnt; const char *license; __u32 kern_version; __u32 prog_ifindex; __u32 prog_btf_fd; __u32 func_info_rec_size; const void *func_info; __u32 func_info_cnt; __u32 line_info_rec_size; const void *line_info; __u32 line_info_cnt; __u32 log_level; }; /* Flags to direct loading requirements */ #define MAPS_RELAX_COMPAT 0x01 /* Recommend log buffer size */ #define BPF_LOG_BUF_SIZE (256 * 1024) LIBBPF_API int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, char *log_buf, size_t log_buf_sz); LIBBPF_API int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, size_t insns_cnt, const char *license, __u32 kern_version, char *log_buf, size_t log_buf_sz); LIBBPF_API int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns, size_t insns_cnt, __u32 prog_flags, const char *license, __u32 kern_version, char *log_buf, size_t log_buf_sz, int log_level); LIBBPF_API int bpf_map_update_elem(int fd, const void *key, const void *value, __u64 flags); LIBBPF_API int bpf_map_lookup_elem(int fd, const void *key, void *value); LIBBPF_API int bpf_map_lookup_elem_flags(int fd, const void *key, void *value, __u64 flags); LIBBPF_API int bpf_map_lookup_and_delete_elem(int fd, const void *key, void *value); LIBBPF_API int bpf_map_delete_elem(int fd, const void *key); LIBBPF_API int bpf_map_get_next_key(int fd, const void *key, void *next_key); LIBBPF_API int bpf_obj_pin(int fd, const char *pathname); LIBBPF_API int bpf_obj_get(const char *pathname); LIBBPF_API int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type, unsigned int flags); LIBBPF_API int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type); LIBBPF_API int bpf_prog_detach2(int prog_fd, int attachable_fd, enum bpf_attach_type type); struct bpf_prog_test_run_attr { int prog_fd; int repeat; const void *data_in; __u32 data_size_in; void *data_out; /* optional */ __u32 data_size_out; /* in: max length of data_out * out: length of data_out */ __u32 retval; /* out: return code of the BPF program */ __u32 duration; /* out: average per repetition in ns */ }; LIBBPF_API int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr); /* * bpf_prog_test_run does not check that data_out is large enough. Consider * using bpf_prog_test_run_xattr instead. */ LIBBPF_API int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size, void *data_out, __u32 *size_out, __u32 *retval, __u32 *duration); LIBBPF_API int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id); LIBBPF_API int bpf_map_get_next_id(__u32 start_id, __u32 *next_id); LIBBPF_API int bpf_prog_get_fd_by_id(__u32 id); LIBBPF_API int bpf_map_get_fd_by_id(__u32 id); LIBBPF_API int bpf_btf_get_fd_by_id(__u32 id); LIBBPF_API int bpf_obj_get_info_by_fd(int prog_fd, void *info, __u32 *info_len); LIBBPF_API int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags, __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt); LIBBPF_API int bpf_raw_tracepoint_open(const char *name, int prog_fd); LIBBPF_API int bpf_load_btf(void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_size, bool do_log); LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len, __u32 *prog_id, __u32 *fd_type, __u64 *probe_offset, __u64 *probe_addr); #ifdef __cplusplus } /* extern "C" */ #endif #endif /* __LIBBPF_BPF_H */ dwarves-dfsg-1.15/lib/bpf/src/bpf_prog_linfo.c000066400000000000000000000140531344730411300212740ustar00rootroot00000000000000// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* Copyright (c) 2018 Facebook */ #include #include #include #include #include "libbpf.h" #ifndef min #define min(x, y) ((x) < (y) ? (x) : (y)) #endif struct bpf_prog_linfo { void *raw_linfo; void *raw_jited_linfo; __u32 *nr_jited_linfo_per_func; __u32 *jited_linfo_func_idx; __u32 nr_linfo; __u32 nr_jited_func; __u32 rec_size; __u32 jited_rec_size; }; static int dissect_jited_func(struct bpf_prog_linfo *prog_linfo, const __u64 *ksym_func, const __u32 *ksym_len) { __u32 nr_jited_func, nr_linfo; const void *raw_jited_linfo; const __u64 *jited_linfo; __u64 last_jited_linfo; /* * Index to raw_jited_linfo: * i: Index for searching the next ksym_func * prev_i: Index to the last found ksym_func */ __u32 i, prev_i; __u32 f; /* Index to ksym_func */ raw_jited_linfo = prog_linfo->raw_jited_linfo; jited_linfo = raw_jited_linfo; if (ksym_func[0] != *jited_linfo) goto errout; prog_linfo->jited_linfo_func_idx[0] = 0; nr_jited_func = prog_linfo->nr_jited_func; nr_linfo = prog_linfo->nr_linfo; for (prev_i = 0, i = 1, f = 1; i < nr_linfo && f < nr_jited_func; i++) { raw_jited_linfo += prog_linfo->jited_rec_size; last_jited_linfo = *jited_linfo; jited_linfo = raw_jited_linfo; if (ksym_func[f] == *jited_linfo) { prog_linfo->jited_linfo_func_idx[f] = i; /* Sanity check */ if (last_jited_linfo - ksym_func[f - 1] + 1 > ksym_len[f - 1]) goto errout; prog_linfo->nr_jited_linfo_per_func[f - 1] = i - prev_i; prev_i = i; /* * The ksym_func[f] is found in jited_linfo. * Look for the next one. */ f++; } else if (*jited_linfo <= last_jited_linfo) { /* Ensure the addr is increasing _within_ a func */ goto errout; } } if (f != nr_jited_func) goto errout; prog_linfo->nr_jited_linfo_per_func[nr_jited_func - 1] = nr_linfo - prev_i; return 0; errout: return -EINVAL; } void bpf_prog_linfo__free(struct bpf_prog_linfo *prog_linfo) { if (!prog_linfo) return; free(prog_linfo->raw_linfo); free(prog_linfo->raw_jited_linfo); free(prog_linfo->nr_jited_linfo_per_func); free(prog_linfo->jited_linfo_func_idx); free(prog_linfo); } struct bpf_prog_linfo *bpf_prog_linfo__new(const struct bpf_prog_info *info) { struct bpf_prog_linfo *prog_linfo; __u32 nr_linfo, nr_jited_func; nr_linfo = info->nr_line_info; if (!nr_linfo) return NULL; /* * The min size that bpf_prog_linfo has to access for * searching purpose. */ if (info->line_info_rec_size < offsetof(struct bpf_line_info, file_name_off)) return NULL; prog_linfo = calloc(1, sizeof(*prog_linfo)); if (!prog_linfo) return NULL; /* Copy xlated line_info */ prog_linfo->nr_linfo = nr_linfo; prog_linfo->rec_size = info->line_info_rec_size; prog_linfo->raw_linfo = malloc(nr_linfo * prog_linfo->rec_size); if (!prog_linfo->raw_linfo) goto err_free; memcpy(prog_linfo->raw_linfo, (void *)(long)info->line_info, nr_linfo * prog_linfo->rec_size); nr_jited_func = info->nr_jited_ksyms; if (!nr_jited_func || !info->jited_line_info || info->nr_jited_line_info != nr_linfo || info->jited_line_info_rec_size < sizeof(__u64) || info->nr_jited_func_lens != nr_jited_func || !info->jited_ksyms || !info->jited_func_lens) /* Not enough info to provide jited_line_info */ return prog_linfo; /* Copy jited_line_info */ prog_linfo->nr_jited_func = nr_jited_func; prog_linfo->jited_rec_size = info->jited_line_info_rec_size; prog_linfo->raw_jited_linfo = malloc(nr_linfo * prog_linfo->jited_rec_size); if (!prog_linfo->raw_jited_linfo) goto err_free; memcpy(prog_linfo->raw_jited_linfo, (void *)(long)info->jited_line_info, nr_linfo * prog_linfo->jited_rec_size); /* Number of jited_line_info per jited func */ prog_linfo->nr_jited_linfo_per_func = malloc(nr_jited_func * sizeof(__u32)); if (!prog_linfo->nr_jited_linfo_per_func) goto err_free; /* * For each jited func, * the start idx to the "linfo" and "jited_linfo" array, */ prog_linfo->jited_linfo_func_idx = malloc(nr_jited_func * sizeof(__u32)); if (!prog_linfo->jited_linfo_func_idx) goto err_free; if (dissect_jited_func(prog_linfo, (__u64 *)(long)info->jited_ksyms, (__u32 *)(long)info->jited_func_lens)) goto err_free; return prog_linfo; err_free: bpf_prog_linfo__free(prog_linfo); return NULL; } const struct bpf_line_info * bpf_prog_linfo__lfind_addr_func(const struct bpf_prog_linfo *prog_linfo, __u64 addr, __u32 func_idx, __u32 nr_skip) { __u32 jited_rec_size, rec_size, nr_linfo, start, i; const void *raw_jited_linfo, *raw_linfo; const __u64 *jited_linfo; if (func_idx >= prog_linfo->nr_jited_func) return NULL; nr_linfo = prog_linfo->nr_jited_linfo_per_func[func_idx]; if (nr_skip >= nr_linfo) return NULL; start = prog_linfo->jited_linfo_func_idx[func_idx] + nr_skip; jited_rec_size = prog_linfo->jited_rec_size; raw_jited_linfo = prog_linfo->raw_jited_linfo + (start * jited_rec_size); jited_linfo = raw_jited_linfo; if (addr < *jited_linfo) return NULL; nr_linfo -= nr_skip; rec_size = prog_linfo->rec_size; raw_linfo = prog_linfo->raw_linfo + (start * rec_size); for (i = 0; i < nr_linfo; i++) { if (addr < *jited_linfo) break; raw_linfo += rec_size; raw_jited_linfo += jited_rec_size; jited_linfo = raw_jited_linfo; } return raw_linfo - rec_size; } const struct bpf_line_info * bpf_prog_linfo__lfind(const struct bpf_prog_linfo *prog_linfo, __u32 insn_off, __u32 nr_skip) { const struct bpf_line_info *linfo; __u32 rec_size, nr_linfo, i; const void *raw_linfo; nr_linfo = prog_linfo->nr_linfo; if (nr_skip >= nr_linfo) return NULL; rec_size = prog_linfo->rec_size; raw_linfo = prog_linfo->raw_linfo + (nr_skip * rec_size); linfo = raw_linfo; if (insn_off < linfo->insn_off) return NULL; nr_linfo -= nr_skip; for (i = 0; i < nr_linfo; i++) { if (insn_off < linfo->insn_off) break; raw_linfo += rec_size; linfo = raw_linfo; } return raw_linfo - rec_size; } dwarves-dfsg-1.15/lib/bpf/src/btf.c000066400000000000000000002151461344730411300170700ustar00rootroot00000000000000// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* Copyright (c) 2018 Facebook */ #include #include #include #include #include #include #include #include "btf.h" #include "bpf.h" #include "libbpf.h" #include "libbpf_util.h" #define max(a, b) ((a) > (b) ? (a) : (b)) #define min(a, b) ((a) < (b) ? (a) : (b)) #define BTF_MAX_NR_TYPES 0x7fffffff #define BTF_MAX_STR_OFFSET 0x7fffffff #define IS_MODIFIER(k) (((k) == BTF_KIND_TYPEDEF) || \ ((k) == BTF_KIND_VOLATILE) || \ ((k) == BTF_KIND_CONST) || \ ((k) == BTF_KIND_RESTRICT)) static struct btf_type btf_void; struct btf { union { struct btf_header *hdr; void *data; }; struct btf_type **types; const char *strings; void *nohdr_data; __u32 nr_types; __u32 types_size; __u32 data_size; int fd; }; struct btf_ext_info { /* * info points to the individual info section (e.g. func_info and * line_info) from the .BTF.ext. It does not include the __u32 rec_size. */ void *info; __u32 rec_size; __u32 len; }; struct btf_ext { union { struct btf_ext_header *hdr; void *data; }; struct btf_ext_info func_info; struct btf_ext_info line_info; __u32 data_size; }; struct btf_ext_info_sec { __u32 sec_name_off; __u32 num_info; /* Followed by num_info * record_size number of bytes */ __u8 data[0]; }; /* The minimum bpf_func_info checked by the loader */ struct bpf_func_info_min { __u32 insn_off; __u32 type_id; }; /* The minimum bpf_line_info checked by the loader */ struct bpf_line_info_min { __u32 insn_off; __u32 file_name_off; __u32 line_off; __u32 line_col; }; static inline __u64 ptr_to_u64(const void *ptr) { return (__u64) (unsigned long) ptr; } static int btf_add_type(struct btf *btf, struct btf_type *t) { if (btf->types_size - btf->nr_types < 2) { struct btf_type **new_types; __u32 expand_by, new_size; if (btf->types_size == BTF_MAX_NR_TYPES) return -E2BIG; expand_by = max(btf->types_size >> 2, 16); new_size = min(BTF_MAX_NR_TYPES, btf->types_size + expand_by); new_types = realloc(btf->types, sizeof(*new_types) * new_size); if (!new_types) return -ENOMEM; if (btf->nr_types == 0) new_types[0] = &btf_void; btf->types = new_types; btf->types_size = new_size; } btf->types[++(btf->nr_types)] = t; return 0; } static int btf_parse_hdr(struct btf *btf) { const struct btf_header *hdr = btf->hdr; __u32 meta_left; if (btf->data_size < sizeof(struct btf_header)) { pr_debug("BTF header not found\n"); return -EINVAL; } if (hdr->magic != BTF_MAGIC) { pr_debug("Invalid BTF magic:%x\n", hdr->magic); return -EINVAL; } if (hdr->version != BTF_VERSION) { pr_debug("Unsupported BTF version:%u\n", hdr->version); return -ENOTSUP; } if (hdr->flags) { pr_debug("Unsupported BTF flags:%x\n", hdr->flags); return -ENOTSUP; } meta_left = btf->data_size - sizeof(*hdr); if (!meta_left) { pr_debug("BTF has no data\n"); return -EINVAL; } if (meta_left < hdr->type_off) { pr_debug("Invalid BTF type section offset:%u\n", hdr->type_off); return -EINVAL; } if (meta_left < hdr->str_off) { pr_debug("Invalid BTF string section offset:%u\n", hdr->str_off); return -EINVAL; } if (hdr->type_off >= hdr->str_off) { pr_debug("BTF type section offset >= string section offset. No type?\n"); return -EINVAL; } if (hdr->type_off & 0x02) { pr_debug("BTF type section is not aligned to 4 bytes\n"); return -EINVAL; } btf->nohdr_data = btf->hdr + 1; return 0; } static int btf_parse_str_sec(struct btf *btf) { const struct btf_header *hdr = btf->hdr; const char *start = btf->nohdr_data + hdr->str_off; const char *end = start + btf->hdr->str_len; if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET || start[0] || end[-1]) { pr_debug("Invalid BTF string section\n"); return -EINVAL; } btf->strings = start; return 0; } static int btf_type_size(struct btf_type *t) { int base_size = sizeof(struct btf_type); __u16 vlen = BTF_INFO_VLEN(t->info); switch (BTF_INFO_KIND(t->info)) { case BTF_KIND_FWD: case BTF_KIND_CONST: case BTF_KIND_VOLATILE: case BTF_KIND_RESTRICT: case BTF_KIND_PTR: case BTF_KIND_TYPEDEF: case BTF_KIND_FUNC: return base_size; case BTF_KIND_INT: return base_size + sizeof(__u32); case BTF_KIND_ENUM: return base_size + vlen * sizeof(struct btf_enum); case BTF_KIND_ARRAY: return base_size + sizeof(struct btf_array); case BTF_KIND_STRUCT: case BTF_KIND_UNION: return base_size + vlen * sizeof(struct btf_member); case BTF_KIND_FUNC_PROTO: return base_size + vlen * sizeof(struct btf_param); default: pr_debug("Unsupported BTF_KIND:%u\n", BTF_INFO_KIND(t->info)); return -EINVAL; } } static int btf_parse_type_sec(struct btf *btf) { struct btf_header *hdr = btf->hdr; void *nohdr_data = btf->nohdr_data; void *next_type = nohdr_data + hdr->type_off; void *end_type = nohdr_data + hdr->str_off; while (next_type < end_type) { struct btf_type *t = next_type; int type_size; int err; type_size = btf_type_size(t); if (type_size < 0) return type_size; next_type += type_size; err = btf_add_type(btf, t); if (err) return err; } return 0; } __u32 btf__get_nr_types(const struct btf *btf) { return btf->nr_types; } const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id) { if (type_id > btf->nr_types) return NULL; return btf->types[type_id]; } static bool btf_type_is_void(const struct btf_type *t) { return t == &btf_void || BTF_INFO_KIND(t->info) == BTF_KIND_FWD; } static bool btf_type_is_void_or_null(const struct btf_type *t) { return !t || btf_type_is_void(t); } #define MAX_RESOLVE_DEPTH 32 __s64 btf__resolve_size(const struct btf *btf, __u32 type_id) { const struct btf_array *array; const struct btf_type *t; __u32 nelems = 1; __s64 size = -1; int i; t = btf__type_by_id(btf, type_id); for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t); i++) { switch (BTF_INFO_KIND(t->info)) { case BTF_KIND_INT: case BTF_KIND_STRUCT: case BTF_KIND_UNION: case BTF_KIND_ENUM: size = t->size; goto done; case BTF_KIND_PTR: size = sizeof(void *); goto done; case BTF_KIND_TYPEDEF: case BTF_KIND_VOLATILE: case BTF_KIND_CONST: case BTF_KIND_RESTRICT: type_id = t->type; break; case BTF_KIND_ARRAY: array = (const struct btf_array *)(t + 1); if (nelems && array->nelems > UINT32_MAX / nelems) return -E2BIG; nelems *= array->nelems; type_id = array->type; break; default: return -EINVAL; } t = btf__type_by_id(btf, type_id); } if (size < 0) return -EINVAL; done: if (nelems && size > UINT32_MAX / nelems) return -E2BIG; return nelems * size; } int btf__resolve_type(const struct btf *btf, __u32 type_id) { const struct btf_type *t; int depth = 0; t = btf__type_by_id(btf, type_id); while (depth < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t) && IS_MODIFIER(BTF_INFO_KIND(t->info))) { type_id = t->type; t = btf__type_by_id(btf, type_id); depth++; } if (depth == MAX_RESOLVE_DEPTH || btf_type_is_void_or_null(t)) return -EINVAL; return type_id; } __s32 btf__find_by_name(const struct btf *btf, const char *type_name) { __u32 i; if (!strcmp(type_name, "void")) return 0; for (i = 1; i <= btf->nr_types; i++) { const struct btf_type *t = btf->types[i]; const char *name = btf__name_by_offset(btf, t->name_off); if (name && !strcmp(type_name, name)) return i; } return -ENOENT; } void btf__free(struct btf *btf) { if (!btf) return; if (btf->fd != -1) close(btf->fd); free(btf->data); free(btf->types); free(btf); } struct btf *btf__new(__u8 *data, __u32 size) { struct btf *btf; int err; btf = calloc(1, sizeof(struct btf)); if (!btf) return ERR_PTR(-ENOMEM); btf->fd = -1; btf->data = malloc(size); if (!btf->data) { err = -ENOMEM; goto done; } memcpy(btf->data, data, size); btf->data_size = size; err = btf_parse_hdr(btf); if (err) goto done; err = btf_parse_str_sec(btf); if (err) goto done; err = btf_parse_type_sec(btf); done: if (err) { btf__free(btf); return ERR_PTR(err); } return btf; } int btf__load(struct btf *btf) { __u32 log_buf_size = BPF_LOG_BUF_SIZE; char *log_buf = NULL; int err = 0; if (btf->fd >= 0) return -EEXIST; log_buf = malloc(log_buf_size); if (!log_buf) return -ENOMEM; *log_buf = 0; btf->fd = bpf_load_btf(btf->data, btf->data_size, log_buf, log_buf_size, false); if (btf->fd < 0) { err = -errno; pr_warning("Error loading BTF: %s(%d)\n", strerror(errno), errno); if (*log_buf) pr_warning("%s\n", log_buf); goto done; } done: free(log_buf); return err; } int btf__fd(const struct btf *btf) { return btf->fd; } const void *btf__get_raw_data(const struct btf *btf, __u32 *size) { *size = btf->data_size; return btf->data; } const char *btf__name_by_offset(const struct btf *btf, __u32 offset) { if (offset < btf->hdr->str_len) return &btf->strings[offset]; else return NULL; } int btf__get_from_id(__u32 id, struct btf **btf) { struct bpf_btf_info btf_info = { 0 }; __u32 len = sizeof(btf_info); __u32 last_size; int btf_fd; void *ptr; int err; err = 0; *btf = NULL; btf_fd = bpf_btf_get_fd_by_id(id); if (btf_fd < 0) return 0; /* we won't know btf_size until we call bpf_obj_get_info_by_fd(). so * let's start with a sane default - 4KiB here - and resize it only if * bpf_obj_get_info_by_fd() needs a bigger buffer. */ btf_info.btf_size = 4096; last_size = btf_info.btf_size; ptr = malloc(last_size); if (!ptr) { err = -ENOMEM; goto exit_free; } memset(ptr, 0, last_size); btf_info.btf = ptr_to_u64(ptr); err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len); if (!err && btf_info.btf_size > last_size) { void *temp_ptr; last_size = btf_info.btf_size; temp_ptr = realloc(ptr, last_size); if (!temp_ptr) { err = -ENOMEM; goto exit_free; } ptr = temp_ptr; memset(ptr, 0, last_size); btf_info.btf = ptr_to_u64(ptr); err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len); } if (err || btf_info.btf_size > last_size) { err = errno; goto exit_free; } *btf = btf__new((__u8 *)(long)btf_info.btf, btf_info.btf_size); if (IS_ERR(*btf)) { err = PTR_ERR(*btf); *btf = NULL; } exit_free: close(btf_fd); free(ptr); return err; } int btf__get_map_kv_tids(const struct btf *btf, const char *map_name, __u32 expected_key_size, __u32 expected_value_size, __u32 *key_type_id, __u32 *value_type_id) { const struct btf_type *container_type; const struct btf_member *key, *value; const size_t max_name = 256; char container_name[max_name]; __s64 key_size, value_size; __s32 container_id; if (snprintf(container_name, max_name, "____btf_map_%s", map_name) == max_name) { pr_warning("map:%s length of '____btf_map_%s' is too long\n", map_name, map_name); return -EINVAL; } container_id = btf__find_by_name(btf, container_name); if (container_id < 0) { pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n", map_name, container_name); return container_id; } container_type = btf__type_by_id(btf, container_id); if (!container_type) { pr_warning("map:%s cannot find BTF type for container_id:%u\n", map_name, container_id); return -EINVAL; } if (BTF_INFO_KIND(container_type->info) != BTF_KIND_STRUCT || BTF_INFO_VLEN(container_type->info) < 2) { pr_warning("map:%s container_name:%s is an invalid container struct\n", map_name, container_name); return -EINVAL; } key = (struct btf_member *)(container_type + 1); value = key + 1; key_size = btf__resolve_size(btf, key->type); if (key_size < 0) { pr_warning("map:%s invalid BTF key_type_size\n", map_name); return key_size; } if (expected_key_size != key_size) { pr_warning("map:%s btf_key_type_size:%u != map_def_key_size:%u\n", map_name, (__u32)key_size, expected_key_size); return -EINVAL; } value_size = btf__resolve_size(btf, value->type); if (value_size < 0) { pr_warning("map:%s invalid BTF value_type_size\n", map_name); return value_size; } if (expected_value_size != value_size) { pr_warning("map:%s btf_value_type_size:%u != map_def_value_size:%u\n", map_name, (__u32)value_size, expected_value_size); return -EINVAL; } *key_type_id = key->type; *value_type_id = value->type; return 0; } struct btf_ext_sec_setup_param { __u32 off; __u32 len; __u32 min_rec_size; struct btf_ext_info *ext_info; const char *desc; }; static int btf_ext_setup_info(struct btf_ext *btf_ext, struct btf_ext_sec_setup_param *ext_sec) { const struct btf_ext_info_sec *sinfo; struct btf_ext_info *ext_info; __u32 info_left, record_size; /* The start of the info sec (including the __u32 record_size). */ void *info; if (ext_sec->off & 0x03) { pr_debug(".BTF.ext %s section is not aligned to 4 bytes\n", ext_sec->desc); return -EINVAL; } info = btf_ext->data + btf_ext->hdr->hdr_len + ext_sec->off; info_left = ext_sec->len; if (btf_ext->data + btf_ext->data_size < info + ext_sec->len) { pr_debug("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n", ext_sec->desc, ext_sec->off, ext_sec->len); return -EINVAL; } /* At least a record size */ if (info_left < sizeof(__u32)) { pr_debug(".BTF.ext %s record size not found\n", ext_sec->desc); return -EINVAL; } /* The record size needs to meet the minimum standard */ record_size = *(__u32 *)info; if (record_size < ext_sec->min_rec_size || record_size & 0x03) { pr_debug("%s section in .BTF.ext has invalid record size %u\n", ext_sec->desc, record_size); return -EINVAL; } sinfo = info + sizeof(__u32); info_left -= sizeof(__u32); /* If no records, return failure now so .BTF.ext won't be used. */ if (!info_left) { pr_debug("%s section in .BTF.ext has no records", ext_sec->desc); return -EINVAL; } while (info_left) { unsigned int sec_hdrlen = sizeof(struct btf_ext_info_sec); __u64 total_record_size; __u32 num_records; if (info_left < sec_hdrlen) { pr_debug("%s section header is not found in .BTF.ext\n", ext_sec->desc); return -EINVAL; } num_records = sinfo->num_info; if (num_records == 0) { pr_debug("%s section has incorrect num_records in .BTF.ext\n", ext_sec->desc); return -EINVAL; } total_record_size = sec_hdrlen + (__u64)num_records * record_size; if (info_left < total_record_size) { pr_debug("%s section has incorrect num_records in .BTF.ext\n", ext_sec->desc); return -EINVAL; } info_left -= total_record_size; sinfo = (void *)sinfo + total_record_size; } ext_info = ext_sec->ext_info; ext_info->len = ext_sec->len - sizeof(__u32); ext_info->rec_size = record_size; ext_info->info = info + sizeof(__u32); return 0; } static int btf_ext_setup_func_info(struct btf_ext *btf_ext) { struct btf_ext_sec_setup_param param = { .off = btf_ext->hdr->func_info_off, .len = btf_ext->hdr->func_info_len, .min_rec_size = sizeof(struct bpf_func_info_min), .ext_info = &btf_ext->func_info, .desc = "func_info" }; return btf_ext_setup_info(btf_ext, ¶m); } static int btf_ext_setup_line_info(struct btf_ext *btf_ext) { struct btf_ext_sec_setup_param param = { .off = btf_ext->hdr->line_info_off, .len = btf_ext->hdr->line_info_len, .min_rec_size = sizeof(struct bpf_line_info_min), .ext_info = &btf_ext->line_info, .desc = "line_info", }; return btf_ext_setup_info(btf_ext, ¶m); } static int btf_ext_parse_hdr(__u8 *data, __u32 data_size) { const struct btf_ext_header *hdr = (struct btf_ext_header *)data; if (data_size < offsetof(struct btf_ext_header, func_info_off) || data_size < hdr->hdr_len) { pr_debug("BTF.ext header not found"); return -EINVAL; } if (hdr->magic != BTF_MAGIC) { pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic); return -EINVAL; } if (hdr->version != BTF_VERSION) { pr_debug("Unsupported BTF.ext version:%u\n", hdr->version); return -ENOTSUP; } if (hdr->flags) { pr_debug("Unsupported BTF.ext flags:%x\n", hdr->flags); return -ENOTSUP; } if (data_size == hdr->hdr_len) { pr_debug("BTF.ext has no data\n"); return -EINVAL; } return 0; } void btf_ext__free(struct btf_ext *btf_ext) { if (!btf_ext) return; free(btf_ext->data); free(btf_ext); } struct btf_ext *btf_ext__new(__u8 *data, __u32 size) { struct btf_ext *btf_ext; int err; err = btf_ext_parse_hdr(data, size); if (err) return ERR_PTR(err); btf_ext = calloc(1, sizeof(struct btf_ext)); if (!btf_ext) return ERR_PTR(-ENOMEM); btf_ext->data_size = size; btf_ext->data = malloc(size); if (!btf_ext->data) { err = -ENOMEM; goto done; } memcpy(btf_ext->data, data, size); err = btf_ext_setup_func_info(btf_ext); if (err) goto done; err = btf_ext_setup_line_info(btf_ext); if (err) goto done; done: if (err) { btf_ext__free(btf_ext); return ERR_PTR(err); } return btf_ext; } const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size) { *size = btf_ext->data_size; return btf_ext->data; } static int btf_ext_reloc_info(const struct btf *btf, const struct btf_ext_info *ext_info, const char *sec_name, __u32 insns_cnt, void **info, __u32 *cnt) { __u32 sec_hdrlen = sizeof(struct btf_ext_info_sec); __u32 i, record_size, existing_len, records_len; struct btf_ext_info_sec *sinfo; const char *info_sec_name; __u64 remain_len; void *data; record_size = ext_info->rec_size; sinfo = ext_info->info; remain_len = ext_info->len; while (remain_len > 0) { records_len = sinfo->num_info * record_size; info_sec_name = btf__name_by_offset(btf, sinfo->sec_name_off); if (strcmp(info_sec_name, sec_name)) { remain_len -= sec_hdrlen + records_len; sinfo = (void *)sinfo + sec_hdrlen + records_len; continue; } existing_len = (*cnt) * record_size; data = realloc(*info, existing_len + records_len); if (!data) return -ENOMEM; memcpy(data + existing_len, sinfo->data, records_len); /* adjust insn_off only, the rest data will be passed * to the kernel. */ for (i = 0; i < sinfo->num_info; i++) { __u32 *insn_off; insn_off = data + existing_len + (i * record_size); *insn_off = *insn_off / sizeof(struct bpf_insn) + insns_cnt; } *info = data; *cnt += sinfo->num_info; return 0; } return -ENOENT; } int btf_ext__reloc_func_info(const struct btf *btf, const struct btf_ext *btf_ext, const char *sec_name, __u32 insns_cnt, void **func_info, __u32 *cnt) { return btf_ext_reloc_info(btf, &btf_ext->func_info, sec_name, insns_cnt, func_info, cnt); } int btf_ext__reloc_line_info(const struct btf *btf, const struct btf_ext *btf_ext, const char *sec_name, __u32 insns_cnt, void **line_info, __u32 *cnt) { return btf_ext_reloc_info(btf, &btf_ext->line_info, sec_name, insns_cnt, line_info, cnt); } __u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext) { return btf_ext->func_info.rec_size; } __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext) { return btf_ext->line_info.rec_size; } struct btf_dedup; static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext, const struct btf_dedup_opts *opts); static void btf_dedup_free(struct btf_dedup *d); static int btf_dedup_strings(struct btf_dedup *d); static int btf_dedup_prim_types(struct btf_dedup *d); static int btf_dedup_struct_types(struct btf_dedup *d); static int btf_dedup_ref_types(struct btf_dedup *d); static int btf_dedup_compact_types(struct btf_dedup *d); static int btf_dedup_remap_types(struct btf_dedup *d); /* * Deduplicate BTF types and strings. * * BTF dedup algorithm takes as an input `struct btf` representing `.BTF` ELF * section with all BTF type descriptors and string data. It overwrites that * memory in-place with deduplicated types and strings without any loss of * information. If optional `struct btf_ext` representing '.BTF.ext' ELF section * is provided, all the strings referenced from .BTF.ext section are honored * and updated to point to the right offsets after deduplication. * * If function returns with error, type/string data might be garbled and should * be discarded. * * More verbose and detailed description of both problem btf_dedup is solving, * as well as solution could be found at: * https://facebookmicrosites.github.io/bpf/blog/2018/11/14/btf-enhancement.html * * Problem description and justification * ===================================== * * BTF type information is typically emitted either as a result of conversion * from DWARF to BTF or directly by compiler. In both cases, each compilation * unit contains information about a subset of all the types that are used * in an application. These subsets are frequently overlapping and contain a lot * of duplicated information when later concatenated together into a single * binary. This algorithm ensures that each unique type is represented by single * BTF type descriptor, greatly reducing resulting size of BTF data. * * Compilation unit isolation and subsequent duplication of data is not the only * problem. The same type hierarchy (e.g., struct and all the type that struct * references) in different compilation units can be represented in BTF to * various degrees of completeness (or, rather, incompleteness) due to * struct/union forward declarations. * * Let's take a look at an example, that we'll use to better understand the * problem (and solution). Suppose we have two compilation units, each using * same `struct S`, but each of them having incomplete type information about * struct's fields: * * // CU #1: * struct S; * struct A { * int a; * struct A* self; * struct S* parent; * }; * struct B; * struct S { * struct A* a_ptr; * struct B* b_ptr; * }; * * // CU #2: * struct S; * struct A; * struct B { * int b; * struct B* self; * struct S* parent; * }; * struct S { * struct A* a_ptr; * struct B* b_ptr; * }; * * In case of CU #1, BTF data will know only that `struct B` exist (but no * more), but will know the complete type information about `struct A`. While * for CU #2, it will know full type information about `struct B`, but will * only know about forward declaration of `struct A` (in BTF terms, it will * have `BTF_KIND_FWD` type descriptor with name `B`). * * This compilation unit isolation means that it's possible that there is no * single CU with complete type information describing structs `S`, `A`, and * `B`. Also, we might get tons of duplicated and redundant type information. * * Additional complication we need to keep in mind comes from the fact that * types, in general, can form graphs containing cycles, not just DAGs. * * While algorithm does deduplication, it also merges and resolves type * information (unless disabled throught `struct btf_opts`), whenever possible. * E.g., in the example above with two compilation units having partial type * information for structs `A` and `B`, the output of algorithm will emit * a single copy of each BTF type that describes structs `A`, `B`, and `S` * (as well as type information for `int` and pointers), as if they were defined * in a single compilation unit as: * * struct A { * int a; * struct A* self; * struct S* parent; * }; * struct B { * int b; * struct B* self; * struct S* parent; * }; * struct S { * struct A* a_ptr; * struct B* b_ptr; * }; * * Algorithm summary * ================= * * Algorithm completes its work in 6 separate passes: * * 1. Strings deduplication. * 2. Primitive types deduplication (int, enum, fwd). * 3. Struct/union types deduplication. * 4. Reference types deduplication (pointers, typedefs, arrays, funcs, func * protos, and const/volatile/restrict modifiers). * 5. Types compaction. * 6. Types remapping. * * Algorithm determines canonical type descriptor, which is a single * representative type for each truly unique type. This canonical type is the * one that will go into final deduplicated BTF type information. For * struct/unions, it is also the type that algorithm will merge additional type * information into (while resolving FWDs), as it discovers it from data in * other CUs. Each input BTF type eventually gets either mapped to itself, if * that type is canonical, or to some other type, if that type is equivalent * and was chosen as canonical representative. This mapping is stored in * `btf_dedup->map` array. This map is also used to record STRUCT/UNION that * FWD type got resolved to. * * To facilitate fast discovery of canonical types, we also maintain canonical * index (`btf_dedup->dedup_table`), which maps type descriptor's signature hash * (i.e., hashed kind, name, size, fields, etc) into a list of canonical types * that match that signature. With sufficiently good choice of type signature * hashing function, we can limit number of canonical types for each unique type * signature to a very small number, allowing to find canonical type for any * duplicated type very quickly. * * Struct/union deduplication is the most critical part and algorithm for * deduplicating structs/unions is described in greater details in comments for * `btf_dedup_is_equiv` function. */ int btf__dedup(struct btf *btf, struct btf_ext *btf_ext, const struct btf_dedup_opts *opts) { struct btf_dedup *d = btf_dedup_new(btf, btf_ext, opts); int err; if (IS_ERR(d)) { pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d)); return -EINVAL; } err = btf_dedup_strings(d); if (err < 0) { pr_debug("btf_dedup_strings failed:%d\n", err); goto done; } err = btf_dedup_prim_types(d); if (err < 0) { pr_debug("btf_dedup_prim_types failed:%d\n", err); goto done; } err = btf_dedup_struct_types(d); if (err < 0) { pr_debug("btf_dedup_struct_types failed:%d\n", err); goto done; } err = btf_dedup_ref_types(d); if (err < 0) { pr_debug("btf_dedup_ref_types failed:%d\n", err); goto done; } err = btf_dedup_compact_types(d); if (err < 0) { pr_debug("btf_dedup_compact_types failed:%d\n", err); goto done; } err = btf_dedup_remap_types(d); if (err < 0) { pr_debug("btf_dedup_remap_types failed:%d\n", err); goto done; } done: btf_dedup_free(d); return err; } #define BTF_DEDUP_TABLE_DEFAULT_SIZE (1 << 14) #define BTF_DEDUP_TABLE_MAX_SIZE_LOG 31 #define BTF_UNPROCESSED_ID ((__u32)-1) #define BTF_IN_PROGRESS_ID ((__u32)-2) struct btf_dedup_node { struct btf_dedup_node *next; __u32 type_id; }; struct btf_dedup { /* .BTF section to be deduped in-place */ struct btf *btf; /* * Optional .BTF.ext section. When provided, any strings referenced * from it will be taken into account when deduping strings */ struct btf_ext *btf_ext; /* * This is a map from any type's signature hash to a list of possible * canonical representative type candidates. Hash collisions are * ignored, so even types of various kinds can share same list of * candidates, which is fine because we rely on subsequent * btf_xxx_equal() checks to authoritatively verify type equality. */ struct btf_dedup_node **dedup_table; /* Canonical types map */ __u32 *map; /* Hypothetical mapping, used during type graph equivalence checks */ __u32 *hypot_map; __u32 *hypot_list; size_t hypot_cnt; size_t hypot_cap; /* Various option modifying behavior of algorithm */ struct btf_dedup_opts opts; }; struct btf_str_ptr { const char *str; __u32 new_off; bool used; }; struct btf_str_ptrs { struct btf_str_ptr *ptrs; const char *data; __u32 cnt; __u32 cap; }; static inline __u32 hash_combine(__u32 h, __u32 value) { /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ #define GOLDEN_RATIO_PRIME 0x9e370001UL return h * 37 + value * GOLDEN_RATIO_PRIME; #undef GOLDEN_RATIO_PRIME } #define for_each_dedup_cand(d, hash, node) \ for (node = d->dedup_table[hash & (d->opts.dedup_table_size - 1)]; \ node; \ node = node->next) static int btf_dedup_table_add(struct btf_dedup *d, __u32 hash, __u32 type_id) { struct btf_dedup_node *node = malloc(sizeof(struct btf_dedup_node)); int bucket = hash & (d->opts.dedup_table_size - 1); if (!node) return -ENOMEM; node->type_id = type_id; node->next = d->dedup_table[bucket]; d->dedup_table[bucket] = node; return 0; } static int btf_dedup_hypot_map_add(struct btf_dedup *d, __u32 from_id, __u32 to_id) { if (d->hypot_cnt == d->hypot_cap) { __u32 *new_list; d->hypot_cap += max(16, d->hypot_cap / 2); new_list = realloc(d->hypot_list, sizeof(__u32) * d->hypot_cap); if (!new_list) return -ENOMEM; d->hypot_list = new_list; } d->hypot_list[d->hypot_cnt++] = from_id; d->hypot_map[from_id] = to_id; return 0; } static void btf_dedup_clear_hypot_map(struct btf_dedup *d) { int i; for (i = 0; i < d->hypot_cnt; i++) d->hypot_map[d->hypot_list[i]] = BTF_UNPROCESSED_ID; d->hypot_cnt = 0; } static void btf_dedup_table_free(struct btf_dedup *d) { struct btf_dedup_node *head, *tmp; int i; if (!d->dedup_table) return; for (i = 0; i < d->opts.dedup_table_size; i++) { while (d->dedup_table[i]) { tmp = d->dedup_table[i]; d->dedup_table[i] = tmp->next; free(tmp); } head = d->dedup_table[i]; while (head) { tmp = head; head = head->next; free(tmp); } } free(d->dedup_table); d->dedup_table = NULL; } static void btf_dedup_free(struct btf_dedup *d) { btf_dedup_table_free(d); free(d->map); d->map = NULL; free(d->hypot_map); d->hypot_map = NULL; free(d->hypot_list); d->hypot_list = NULL; free(d); } /* Find closest power of two >= to size, capped at 2^max_size_log */ static __u32 roundup_pow2_max(__u32 size, int max_size_log) { int i; for (i = 0; i < max_size_log && (1U << i) < size; i++) ; return 1U << i; } static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext, const struct btf_dedup_opts *opts) { struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup)); int i, err = 0; __u32 sz; if (!d) return ERR_PTR(-ENOMEM); d->opts.dont_resolve_fwds = opts && opts->dont_resolve_fwds; sz = opts && opts->dedup_table_size ? opts->dedup_table_size : BTF_DEDUP_TABLE_DEFAULT_SIZE; sz = roundup_pow2_max(sz, BTF_DEDUP_TABLE_MAX_SIZE_LOG); d->opts.dedup_table_size = sz; d->btf = btf; d->btf_ext = btf_ext; d->dedup_table = calloc(d->opts.dedup_table_size, sizeof(struct btf_dedup_node *)); if (!d->dedup_table) { err = -ENOMEM; goto done; } d->map = malloc(sizeof(__u32) * (1 + btf->nr_types)); if (!d->map) { err = -ENOMEM; goto done; } /* special BTF "void" type is made canonical immediately */ d->map[0] = 0; for (i = 1; i <= btf->nr_types; i++) d->map[i] = BTF_UNPROCESSED_ID; d->hypot_map = malloc(sizeof(__u32) * (1 + btf->nr_types)); if (!d->hypot_map) { err = -ENOMEM; goto done; } for (i = 0; i <= btf->nr_types; i++) d->hypot_map[i] = BTF_UNPROCESSED_ID; done: if (err) { btf_dedup_free(d); return ERR_PTR(err); } return d; } typedef int (*str_off_fn_t)(__u32 *str_off_ptr, void *ctx); /* * Iterate over all possible places in .BTF and .BTF.ext that can reference * string and pass pointer to it to a provided callback `fn`. */ static int btf_for_each_str_off(struct btf_dedup *d, str_off_fn_t fn, void *ctx) { void *line_data_cur, *line_data_end; int i, j, r, rec_size; struct btf_type *t; for (i = 1; i <= d->btf->nr_types; i++) { t = d->btf->types[i]; r = fn(&t->name_off, ctx); if (r) return r; switch (BTF_INFO_KIND(t->info)) { case BTF_KIND_STRUCT: case BTF_KIND_UNION: { struct btf_member *m = (struct btf_member *)(t + 1); __u16 vlen = BTF_INFO_VLEN(t->info); for (j = 0; j < vlen; j++) { r = fn(&m->name_off, ctx); if (r) return r; m++; } break; } case BTF_KIND_ENUM: { struct btf_enum *m = (struct btf_enum *)(t + 1); __u16 vlen = BTF_INFO_VLEN(t->info); for (j = 0; j < vlen; j++) { r = fn(&m->name_off, ctx); if (r) return r; m++; } break; } case BTF_KIND_FUNC_PROTO: { struct btf_param *m = (struct btf_param *)(t + 1); __u16 vlen = BTF_INFO_VLEN(t->info); for (j = 0; j < vlen; j++) { r = fn(&m->name_off, ctx); if (r) return r; m++; } break; } default: break; } } if (!d->btf_ext) return 0; line_data_cur = d->btf_ext->line_info.info; line_data_end = d->btf_ext->line_info.info + d->btf_ext->line_info.len; rec_size = d->btf_ext->line_info.rec_size; while (line_data_cur < line_data_end) { struct btf_ext_info_sec *sec = line_data_cur; struct bpf_line_info_min *line_info; __u32 num_info = sec->num_info; r = fn(&sec->sec_name_off, ctx); if (r) return r; line_data_cur += sizeof(struct btf_ext_info_sec); for (i = 0; i < num_info; i++) { line_info = line_data_cur; r = fn(&line_info->file_name_off, ctx); if (r) return r; r = fn(&line_info->line_off, ctx); if (r) return r; line_data_cur += rec_size; } } return 0; } static int str_sort_by_content(const void *a1, const void *a2) { const struct btf_str_ptr *p1 = a1; const struct btf_str_ptr *p2 = a2; return strcmp(p1->str, p2->str); } static int str_sort_by_offset(const void *a1, const void *a2) { const struct btf_str_ptr *p1 = a1; const struct btf_str_ptr *p2 = a2; if (p1->str != p2->str) return p1->str < p2->str ? -1 : 1; return 0; } static int btf_dedup_str_ptr_cmp(const void *str_ptr, const void *pelem) { const struct btf_str_ptr *p = pelem; if (str_ptr != p->str) return (const char *)str_ptr < p->str ? -1 : 1; return 0; } static int btf_str_mark_as_used(__u32 *str_off_ptr, void *ctx) { struct btf_str_ptrs *strs; struct btf_str_ptr *s; if (*str_off_ptr == 0) return 0; strs = ctx; s = bsearch(strs->data + *str_off_ptr, strs->ptrs, strs->cnt, sizeof(struct btf_str_ptr), btf_dedup_str_ptr_cmp); if (!s) return -EINVAL; s->used = true; return 0; } static int btf_str_remap_offset(__u32 *str_off_ptr, void *ctx) { struct btf_str_ptrs *strs; struct btf_str_ptr *s; if (*str_off_ptr == 0) return 0; strs = ctx; s = bsearch(strs->data + *str_off_ptr, strs->ptrs, strs->cnt, sizeof(struct btf_str_ptr), btf_dedup_str_ptr_cmp); if (!s) return -EINVAL; *str_off_ptr = s->new_off; return 0; } /* * Dedup string and filter out those that are not referenced from either .BTF * or .BTF.ext (if provided) sections. * * This is done by building index of all strings in BTF's string section, * then iterating over all entities that can reference strings (e.g., type * names, struct field names, .BTF.ext line info, etc) and marking corresponding * strings as used. After that all used strings are deduped and compacted into * sequential blob of memory and new offsets are calculated. Then all the string * references are iterated again and rewritten using new offsets. */ static int btf_dedup_strings(struct btf_dedup *d) { const struct btf_header *hdr = d->btf->hdr; char *start = (char *)d->btf->nohdr_data + hdr->str_off; char *end = start + d->btf->hdr->str_len; char *p = start, *tmp_strs = NULL; struct btf_str_ptrs strs = { .cnt = 0, .cap = 0, .ptrs = NULL, .data = start, }; int i, j, err = 0, grp_idx; bool grp_used; /* build index of all strings */ while (p < end) { if (strs.cnt + 1 > strs.cap) { struct btf_str_ptr *new_ptrs; strs.cap += max(strs.cnt / 2, 16); new_ptrs = realloc(strs.ptrs, sizeof(strs.ptrs[0]) * strs.cap); if (!new_ptrs) { err = -ENOMEM; goto done; } strs.ptrs = new_ptrs; } strs.ptrs[strs.cnt].str = p; strs.ptrs[strs.cnt].used = false; p += strlen(p) + 1; strs.cnt++; } /* temporary storage for deduplicated strings */ tmp_strs = malloc(d->btf->hdr->str_len); if (!tmp_strs) { err = -ENOMEM; goto done; } /* mark all used strings */ strs.ptrs[0].used = true; err = btf_for_each_str_off(d, btf_str_mark_as_used, &strs); if (err) goto done; /* sort strings by context, so that we can identify duplicates */ qsort(strs.ptrs, strs.cnt, sizeof(strs.ptrs[0]), str_sort_by_content); /* * iterate groups of equal strings and if any instance in a group was * referenced, emit single instance and remember new offset */ p = tmp_strs; grp_idx = 0; grp_used = strs.ptrs[0].used; /* iterate past end to avoid code duplication after loop */ for (i = 1; i <= strs.cnt; i++) { /* * when i == strs.cnt, we want to skip string comparison and go * straight to handling last group of strings (otherwise we'd * need to handle last group after the loop w/ duplicated code) */ if (i < strs.cnt && !strcmp(strs.ptrs[i].str, strs.ptrs[grp_idx].str)) { grp_used = grp_used || strs.ptrs[i].used; continue; } /* * this check would have been required after the loop to handle * last group of strings, but due to <= condition in a loop * we avoid that duplication */ if (grp_used) { int new_off = p - tmp_strs; __u32 len = strlen(strs.ptrs[grp_idx].str); memmove(p, strs.ptrs[grp_idx].str, len + 1); for (j = grp_idx; j < i; j++) strs.ptrs[j].new_off = new_off; p += len + 1; } if (i < strs.cnt) { grp_idx = i; grp_used = strs.ptrs[i].used; } } /* replace original strings with deduped ones */ d->btf->hdr->str_len = p - tmp_strs; memmove(start, tmp_strs, d->btf->hdr->str_len); end = start + d->btf->hdr->str_len; /* restore original order for further binary search lookups */ qsort(strs.ptrs, strs.cnt, sizeof(strs.ptrs[0]), str_sort_by_offset); /* remap string offsets */ err = btf_for_each_str_off(d, btf_str_remap_offset, &strs); if (err) goto done; d->btf->hdr->str_len = end - start; done: free(tmp_strs); free(strs.ptrs); return err; } static __u32 btf_hash_common(struct btf_type *t) { __u32 h; h = hash_combine(0, t->name_off); h = hash_combine(h, t->info); h = hash_combine(h, t->size); return h; } static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2) { return t1->name_off == t2->name_off && t1->info == t2->info && t1->size == t2->size; } /* Calculate type signature hash of INT. */ static __u32 btf_hash_int(struct btf_type *t) { __u32 info = *(__u32 *)(t + 1); __u32 h; h = btf_hash_common(t); h = hash_combine(h, info); return h; } /* Check structural equality of two INTs. */ static bool btf_equal_int(struct btf_type *t1, struct btf_type *t2) { __u32 info1, info2; if (!btf_equal_common(t1, t2)) return false; info1 = *(__u32 *)(t1 + 1); info2 = *(__u32 *)(t2 + 1); return info1 == info2; } /* Calculate type signature hash of ENUM. */ static __u32 btf_hash_enum(struct btf_type *t) { __u32 h; /* don't hash vlen and enum members to support enum fwd resolving */ h = hash_combine(0, t->name_off); h = hash_combine(h, t->info & ~0xffff); h = hash_combine(h, t->size); return h; } /* Check structural equality of two ENUMs. */ static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2) { struct btf_enum *m1, *m2; __u16 vlen; int i; if (!btf_equal_common(t1, t2)) return false; vlen = BTF_INFO_VLEN(t1->info); m1 = (struct btf_enum *)(t1 + 1); m2 = (struct btf_enum *)(t2 + 1); for (i = 0; i < vlen; i++) { if (m1->name_off != m2->name_off || m1->val != m2->val) return false; m1++; m2++; } return true; } static inline bool btf_is_enum_fwd(struct btf_type *t) { return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM && BTF_INFO_VLEN(t->info) == 0; } static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2) { if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2)) return btf_equal_enum(t1, t2); /* ignore vlen when comparing */ return t1->name_off == t2->name_off && (t1->info & ~0xffff) == (t2->info & ~0xffff) && t1->size == t2->size; } /* * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs, * as referenced type IDs equivalence is established separately during type * graph equivalence check algorithm. */ static __u32 btf_hash_struct(struct btf_type *t) { struct btf_member *member = (struct btf_member *)(t + 1); __u32 vlen = BTF_INFO_VLEN(t->info); __u32 h = btf_hash_common(t); int i; for (i = 0; i < vlen; i++) { h = hash_combine(h, member->name_off); h = hash_combine(h, member->offset); /* no hashing of referenced type ID, it can be unresolved yet */ member++; } return h; } /* * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type * IDs. This check is performed during type graph equivalence check and * referenced types equivalence is checked separately. */ static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2) { struct btf_member *m1, *m2; __u16 vlen; int i; if (!btf_equal_common(t1, t2)) return false; vlen = BTF_INFO_VLEN(t1->info); m1 = (struct btf_member *)(t1 + 1); m2 = (struct btf_member *)(t2 + 1); for (i = 0; i < vlen; i++) { if (m1->name_off != m2->name_off || m1->offset != m2->offset) return false; m1++; m2++; } return true; } /* * Calculate type signature hash of ARRAY, including referenced type IDs, * under assumption that they were already resolved to canonical type IDs and * are not going to change. */ static __u32 btf_hash_array(struct btf_type *t) { struct btf_array *info = (struct btf_array *)(t + 1); __u32 h = btf_hash_common(t); h = hash_combine(h, info->type); h = hash_combine(h, info->index_type); h = hash_combine(h, info->nelems); return h; } /* * Check exact equality of two ARRAYs, taking into account referenced * type IDs, under assumption that they were already resolved to canonical * type IDs and are not going to change. * This function is called during reference types deduplication to compare * ARRAY to potential canonical representative. */ static bool btf_equal_array(struct btf_type *t1, struct btf_type *t2) { struct btf_array *info1, *info2; if (!btf_equal_common(t1, t2)) return false; info1 = (struct btf_array *)(t1 + 1); info2 = (struct btf_array *)(t2 + 1); return info1->type == info2->type && info1->index_type == info2->index_type && info1->nelems == info2->nelems; } /* * Check structural compatibility of two ARRAYs, ignoring referenced type * IDs. This check is performed during type graph equivalence check and * referenced types equivalence is checked separately. */ static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2) { struct btf_array *info1, *info2; if (!btf_equal_common(t1, t2)) return false; info1 = (struct btf_array *)(t1 + 1); info2 = (struct btf_array *)(t2 + 1); return info1->nelems == info2->nelems; } /* * Calculate type signature hash of FUNC_PROTO, including referenced type IDs, * under assumption that they were already resolved to canonical type IDs and * are not going to change. */ static inline __u32 btf_hash_fnproto(struct btf_type *t) { struct btf_param *member = (struct btf_param *)(t + 1); __u16 vlen = BTF_INFO_VLEN(t->info); __u32 h = btf_hash_common(t); int i; for (i = 0; i < vlen; i++) { h = hash_combine(h, member->name_off); h = hash_combine(h, member->type); member++; } return h; } /* * Check exact equality of two FUNC_PROTOs, taking into account referenced * type IDs, under assumption that they were already resolved to canonical * type IDs and are not going to change. * This function is called during reference types deduplication to compare * FUNC_PROTO to potential canonical representative. */ static inline bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2) { struct btf_param *m1, *m2; __u16 vlen; int i; if (!btf_equal_common(t1, t2)) return false; vlen = BTF_INFO_VLEN(t1->info); m1 = (struct btf_param *)(t1 + 1); m2 = (struct btf_param *)(t2 + 1); for (i = 0; i < vlen; i++) { if (m1->name_off != m2->name_off || m1->type != m2->type) return false; m1++; m2++; } return true; } /* * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type * IDs. This check is performed during type graph equivalence check and * referenced types equivalence is checked separately. */ static inline bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2) { struct btf_param *m1, *m2; __u16 vlen; int i; /* skip return type ID */ if (t1->name_off != t2->name_off || t1->info != t2->info) return false; vlen = BTF_INFO_VLEN(t1->info); m1 = (struct btf_param *)(t1 + 1); m2 = (struct btf_param *)(t2 + 1); for (i = 0; i < vlen; i++) { if (m1->name_off != m2->name_off) return false; m1++; m2++; } return true; } /* * Deduplicate primitive types, that can't reference other types, by calculating * their type signature hash and comparing them with any possible canonical * candidate. If no canonical candidate matches, type itself is marked as * canonical and is added into `btf_dedup->dedup_table` as another candidate. */ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id) { struct btf_type *t = d->btf->types[type_id]; struct btf_type *cand; struct btf_dedup_node *cand_node; /* if we don't find equivalent type, then we are canonical */ __u32 new_id = type_id; __u32 h; switch (BTF_INFO_KIND(t->info)) { case BTF_KIND_CONST: case BTF_KIND_VOLATILE: case BTF_KIND_RESTRICT: case BTF_KIND_PTR: case BTF_KIND_TYPEDEF: case BTF_KIND_ARRAY: case BTF_KIND_STRUCT: case BTF_KIND_UNION: case BTF_KIND_FUNC: case BTF_KIND_FUNC_PROTO: return 0; case BTF_KIND_INT: h = btf_hash_int(t); for_each_dedup_cand(d, h, cand_node) { cand = d->btf->types[cand_node->type_id]; if (btf_equal_int(t, cand)) { new_id = cand_node->type_id; break; } } break; case BTF_KIND_ENUM: h = btf_hash_enum(t); for_each_dedup_cand(d, h, cand_node) { cand = d->btf->types[cand_node->type_id]; if (btf_equal_enum(t, cand)) { new_id = cand_node->type_id; break; } if (d->opts.dont_resolve_fwds) continue; if (btf_compat_enum(t, cand)) { if (btf_is_enum_fwd(t)) { /* resolve fwd to full enum */ new_id = cand_node->type_id; break; } /* resolve canonical enum fwd to full enum */ d->map[cand_node->type_id] = type_id; } } break; case BTF_KIND_FWD: h = btf_hash_common(t); for_each_dedup_cand(d, h, cand_node) { cand = d->btf->types[cand_node->type_id]; if (btf_equal_common(t, cand)) { new_id = cand_node->type_id; break; } } break; default: return -EINVAL; } d->map[type_id] = new_id; if (type_id == new_id && btf_dedup_table_add(d, h, type_id)) return -ENOMEM; return 0; } static int btf_dedup_prim_types(struct btf_dedup *d) { int i, err; for (i = 1; i <= d->btf->nr_types; i++) { err = btf_dedup_prim_type(d, i); if (err) return err; } return 0; } /* * Check whether type is already mapped into canonical one (could be to itself). */ static inline bool is_type_mapped(struct btf_dedup *d, uint32_t type_id) { return d->map[type_id] <= BTF_MAX_NR_TYPES; } /* * Resolve type ID into its canonical type ID, if any; otherwise return original * type ID. If type is FWD and is resolved into STRUCT/UNION already, follow * STRUCT/UNION link and resolve it into canonical type ID as well. */ static inline __u32 resolve_type_id(struct btf_dedup *d, __u32 type_id) { while (is_type_mapped(d, type_id) && d->map[type_id] != type_id) type_id = d->map[type_id]; return type_id; } /* * Resolve FWD to underlying STRUCT/UNION, if any; otherwise return original * type ID. */ static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id) { __u32 orig_type_id = type_id; if (BTF_INFO_KIND(d->btf->types[type_id]->info) != BTF_KIND_FWD) return type_id; while (is_type_mapped(d, type_id) && d->map[type_id] != type_id) type_id = d->map[type_id]; if (BTF_INFO_KIND(d->btf->types[type_id]->info) != BTF_KIND_FWD) return type_id; return orig_type_id; } static inline __u16 btf_fwd_kind(struct btf_type *t) { return BTF_INFO_KFLAG(t->info) ? BTF_KIND_UNION : BTF_KIND_STRUCT; } /* * Check equivalence of BTF type graph formed by candidate struct/union (we'll * call it "candidate graph" in this description for brevity) to a type graph * formed by (potential) canonical struct/union ("canonical graph" for brevity * here, though keep in mind that not all types in canonical graph are * necessarily canonical representatives themselves, some of them might be * duplicates or its uniqueness might not have been established yet). * Returns: * - >0, if type graphs are equivalent; * - 0, if not equivalent; * - <0, on error. * * Algorithm performs side-by-side DFS traversal of both type graphs and checks * equivalence of BTF types at each step. If at any point BTF types in candidate * and canonical graphs are not compatible structurally, whole graphs are * incompatible. If types are structurally equivalent (i.e., all information * except referenced type IDs is exactly the same), a mapping from `canon_id` to * a `cand_id` is recored in hypothetical mapping (`btf_dedup->hypot_map`). * If a type references other types, then those referenced types are checked * for equivalence recursively. * * During DFS traversal, if we find that for current `canon_id` type we * already have some mapping in hypothetical map, we check for two possible * situations: * - `canon_id` is mapped to exactly the same type as `cand_id`. This will * happen when type graphs have cycles. In this case we assume those two * types are equivalent. * - `canon_id` is mapped to different type. This is contradiction in our * hypothetical mapping, because same graph in canonical graph corresponds * to two different types in candidate graph, which for equivalent type * graphs shouldn't happen. This condition terminates equivalence check * with negative result. * * If type graphs traversal exhausts types to check and find no contradiction, * then type graphs are equivalent. * * When checking types for equivalence, there is one special case: FWD types. * If FWD type resolution is allowed and one of the types (either from canonical * or candidate graph) is FWD and other is STRUCT/UNION (depending on FWD's kind * flag) and their names match, hypothetical mapping is updated to point from * FWD to STRUCT/UNION. If graphs will be determined as equivalent successfully, * this mapping will be used to record FWD -> STRUCT/UNION mapping permanently. * * Technically, this could lead to incorrect FWD to STRUCT/UNION resolution, * if there are two exactly named (or anonymous) structs/unions that are * compatible structurally, one of which has FWD field, while other is concrete * STRUCT/UNION, but according to C sources they are different structs/unions * that are referencing different types with the same name. This is extremely * unlikely to happen, but btf_dedup API allows to disable FWD resolution if * this logic is causing problems. * * Doing FWD resolution means that both candidate and/or canonical graphs can * consists of portions of the graph that come from multiple compilation units. * This is due to the fact that types within single compilation unit are always * deduplicated and FWDs are already resolved, if referenced struct/union * definiton is available. So, if we had unresolved FWD and found corresponding * STRUCT/UNION, they will be from different compilation units. This * consequently means that when we "link" FWD to corresponding STRUCT/UNION, * type graph will likely have at least two different BTF types that describe * same type (e.g., most probably there will be two different BTF types for the * same 'int' primitive type) and could even have "overlapping" parts of type * graph that describe same subset of types. * * This in turn means that our assumption that each type in canonical graph * must correspond to exactly one type in candidate graph might not hold * anymore and will make it harder to detect contradictions using hypothetical * map. To handle this problem, we allow to follow FWD -> STRUCT/UNION * resolution only in canonical graph. FWDs in candidate graphs are never * resolved. To see why it's OK, let's check all possible situations w.r.t. FWDs * that can occur: * - Both types in canonical and candidate graphs are FWDs. If they are * structurally equivalent, then they can either be both resolved to the * same STRUCT/UNION or not resolved at all. In both cases they are * equivalent and there is no need to resolve FWD on candidate side. * - Both types in canonical and candidate graphs are concrete STRUCT/UNION, * so nothing to resolve as well, algorithm will check equivalence anyway. * - Type in canonical graph is FWD, while type in candidate is concrete * STRUCT/UNION. In this case candidate graph comes from single compilation * unit, so there is exactly one BTF type for each unique C type. After * resolving FWD into STRUCT/UNION, there might be more than one BTF type * in canonical graph mapping to single BTF type in candidate graph, but * because hypothetical mapping maps from canonical to candidate types, it's * alright, and we still maintain the property of having single `canon_id` * mapping to single `cand_id` (there could be two different `canon_id` * mapped to the same `cand_id`, but it's not contradictory). * - Type in canonical graph is concrete STRUCT/UNION, while type in candidate * graph is FWD. In this case we are just going to check compatibility of * STRUCT/UNION and corresponding FWD, and if they are compatible, we'll * assume that whatever STRUCT/UNION FWD resolves to must be equivalent to * a concrete STRUCT/UNION from canonical graph. If the rest of type graphs * turn out equivalent, we'll re-resolve FWD to concrete STRUCT/UNION from * canonical graph. */ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id, __u32 canon_id) { struct btf_type *cand_type; struct btf_type *canon_type; __u32 hypot_type_id; __u16 cand_kind; __u16 canon_kind; int i, eq; /* if both resolve to the same canonical, they must be equivalent */ if (resolve_type_id(d, cand_id) == resolve_type_id(d, canon_id)) return 1; canon_id = resolve_fwd_id(d, canon_id); hypot_type_id = d->hypot_map[canon_id]; if (hypot_type_id <= BTF_MAX_NR_TYPES) return hypot_type_id == cand_id; if (btf_dedup_hypot_map_add(d, canon_id, cand_id)) return -ENOMEM; cand_type = d->btf->types[cand_id]; canon_type = d->btf->types[canon_id]; cand_kind = BTF_INFO_KIND(cand_type->info); canon_kind = BTF_INFO_KIND(canon_type->info); if (cand_type->name_off != canon_type->name_off) return 0; /* FWD <--> STRUCT/UNION equivalence check, if enabled */ if (!d->opts.dont_resolve_fwds && (cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD) && cand_kind != canon_kind) { __u16 real_kind; __u16 fwd_kind; if (cand_kind == BTF_KIND_FWD) { real_kind = canon_kind; fwd_kind = btf_fwd_kind(cand_type); } else { real_kind = cand_kind; fwd_kind = btf_fwd_kind(canon_type); } return fwd_kind == real_kind; } if (cand_kind != canon_kind) return 0; switch (cand_kind) { case BTF_KIND_INT: return btf_equal_int(cand_type, canon_type); case BTF_KIND_ENUM: if (d->opts.dont_resolve_fwds) return btf_equal_enum(cand_type, canon_type); else return btf_compat_enum(cand_type, canon_type); case BTF_KIND_FWD: return btf_equal_common(cand_type, canon_type); case BTF_KIND_CONST: case BTF_KIND_VOLATILE: case BTF_KIND_RESTRICT: case BTF_KIND_PTR: case BTF_KIND_TYPEDEF: case BTF_KIND_FUNC: if (cand_type->info != canon_type->info) return 0; return btf_dedup_is_equiv(d, cand_type->type, canon_type->type); case BTF_KIND_ARRAY: { struct btf_array *cand_arr, *canon_arr; if (!btf_compat_array(cand_type, canon_type)) return 0; cand_arr = (struct btf_array *)(cand_type + 1); canon_arr = (struct btf_array *)(canon_type + 1); eq = btf_dedup_is_equiv(d, cand_arr->index_type, canon_arr->index_type); if (eq <= 0) return eq; return btf_dedup_is_equiv(d, cand_arr->type, canon_arr->type); } case BTF_KIND_STRUCT: case BTF_KIND_UNION: { struct btf_member *cand_m, *canon_m; __u16 vlen; if (!btf_shallow_equal_struct(cand_type, canon_type)) return 0; vlen = BTF_INFO_VLEN(cand_type->info); cand_m = (struct btf_member *)(cand_type + 1); canon_m = (struct btf_member *)(canon_type + 1); for (i = 0; i < vlen; i++) { eq = btf_dedup_is_equiv(d, cand_m->type, canon_m->type); if (eq <= 0) return eq; cand_m++; canon_m++; } return 1; } case BTF_KIND_FUNC_PROTO: { struct btf_param *cand_p, *canon_p; __u16 vlen; if (!btf_compat_fnproto(cand_type, canon_type)) return 0; eq = btf_dedup_is_equiv(d, cand_type->type, canon_type->type); if (eq <= 0) return eq; vlen = BTF_INFO_VLEN(cand_type->info); cand_p = (struct btf_param *)(cand_type + 1); canon_p = (struct btf_param *)(canon_type + 1); for (i = 0; i < vlen; i++) { eq = btf_dedup_is_equiv(d, cand_p->type, canon_p->type); if (eq <= 0) return eq; cand_p++; canon_p++; } return 1; } default: return -EINVAL; } return 0; } /* * Use hypothetical mapping, produced by successful type graph equivalence * check, to augment existing struct/union canonical mapping, where possible. * * If BTF_KIND_FWD resolution is allowed, this mapping is also used to record * FWD -> STRUCT/UNION correspondence as well. FWD resolution is bidirectional: * it doesn't matter if FWD type was part of canonical graph or candidate one, * we are recording the mapping anyway. As opposed to carefulness required * for struct/union correspondence mapping (described below), for FWD resolution * it's not important, as by the time that FWD type (reference type) will be * deduplicated all structs/unions will be deduped already anyway. * * Recording STRUCT/UNION mapping is purely a performance optimization and is * not required for correctness. It needs to be done carefully to ensure that * struct/union from candidate's type graph is not mapped into corresponding * struct/union from canonical type graph that itself hasn't been resolved into * canonical representative. The only guarantee we have is that canonical * struct/union was determined as canonical and that won't change. But any * types referenced through that struct/union fields could have been not yet * resolved, so in case like that it's too early to establish any kind of * correspondence between structs/unions. * * No canonical correspondence is derived for primitive types (they are already * deduplicated completely already anyway) or reference types (they rely on * stability of struct/union canonical relationship for equivalence checks). */ static void btf_dedup_merge_hypot_map(struct btf_dedup *d) { __u32 cand_type_id, targ_type_id; __u16 t_kind, c_kind; __u32 t_id, c_id; int i; for (i = 0; i < d->hypot_cnt; i++) { cand_type_id = d->hypot_list[i]; targ_type_id = d->hypot_map[cand_type_id]; t_id = resolve_type_id(d, targ_type_id); c_id = resolve_type_id(d, cand_type_id); t_kind = BTF_INFO_KIND(d->btf->types[t_id]->info); c_kind = BTF_INFO_KIND(d->btf->types[c_id]->info); /* * Resolve FWD into STRUCT/UNION. * It's ok to resolve FWD into STRUCT/UNION that's not yet * mapped to canonical representative (as opposed to * STRUCT/UNION <--> STRUCT/UNION mapping logic below), because * eventually that struct is going to be mapped and all resolved * FWDs will automatically resolve to correct canonical * representative. This will happen before ref type deduping, * which critically depends on stability of these mapping. This * stability is not a requirement for STRUCT/UNION equivalence * checks, though. */ if (t_kind != BTF_KIND_FWD && c_kind == BTF_KIND_FWD) d->map[c_id] = t_id; else if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD) d->map[t_id] = c_id; if ((t_kind == BTF_KIND_STRUCT || t_kind == BTF_KIND_UNION) && c_kind != BTF_KIND_FWD && is_type_mapped(d, c_id) && !is_type_mapped(d, t_id)) { /* * as a perf optimization, we can map struct/union * that's part of type graph we just verified for * equivalence. We can do that for struct/union that has * canonical representative only, though. */ d->map[t_id] = c_id; } } } /* * Deduplicate struct/union types. * * For each struct/union type its type signature hash is calculated, taking * into account type's name, size, number, order and names of fields, but * ignoring type ID's referenced from fields, because they might not be deduped * completely until after reference types deduplication phase. This type hash * is used to iterate over all potential canonical types, sharing same hash. * For each canonical candidate we check whether type graphs that they form * (through referenced types in fields and so on) are equivalent using algorithm * implemented in `btf_dedup_is_equiv`. If such equivalence is found and * BTF_KIND_FWD resolution is allowed, then hypothetical mapping * (btf_dedup->hypot_map) produced by aforementioned type graph equivalence * algorithm is used to record FWD -> STRUCT/UNION mapping. It's also used to * potentially map other structs/unions to their canonical representatives, * if such relationship hasn't yet been established. This speeds up algorithm * by eliminating some of the duplicate work. * * If no matching canonical representative was found, struct/union is marked * as canonical for itself and is added into btf_dedup->dedup_table hash map * for further look ups. */ static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id) { struct btf_dedup_node *cand_node; struct btf_type *cand_type, *t; /* if we don't find equivalent type, then we are canonical */ __u32 new_id = type_id; __u16 kind; __u32 h; /* already deduped or is in process of deduping (loop detected) */ if (d->map[type_id] <= BTF_MAX_NR_TYPES) return 0; t = d->btf->types[type_id]; kind = BTF_INFO_KIND(t->info); if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION) return 0; h = btf_hash_struct(t); for_each_dedup_cand(d, h, cand_node) { int eq; /* * Even though btf_dedup_is_equiv() checks for * btf_shallow_equal_struct() internally when checking two * structs (unions) for equivalence, we need to guard here * from picking matching FWD type as a dedup candidate. * This can happen due to hash collision. In such case just * relying on btf_dedup_is_equiv() would lead to potentially * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because * FWD and compatible STRUCT/UNION are considered equivalent. */ cand_type = d->btf->types[cand_node->type_id]; if (!btf_shallow_equal_struct(t, cand_type)) continue; btf_dedup_clear_hypot_map(d); eq = btf_dedup_is_equiv(d, type_id, cand_node->type_id); if (eq < 0) return eq; if (!eq) continue; new_id = cand_node->type_id; btf_dedup_merge_hypot_map(d); break; } d->map[type_id] = new_id; if (type_id == new_id && btf_dedup_table_add(d, h, type_id)) return -ENOMEM; return 0; } static int btf_dedup_struct_types(struct btf_dedup *d) { int i, err; for (i = 1; i <= d->btf->nr_types; i++) { err = btf_dedup_struct_type(d, i); if (err) return err; } return 0; } /* * Deduplicate reference type. * * Once all primitive and struct/union types got deduplicated, we can easily * deduplicate all other (reference) BTF types. This is done in two steps: * * 1. Resolve all referenced type IDs into their canonical type IDs. This * resolution can be done either immediately for primitive or struct/union types * (because they were deduped in previous two phases) or recursively for * reference types. Recursion will always terminate at either primitive or * struct/union type, at which point we can "unwind" chain of reference types * one by one. There is no danger of encountering cycles because in C type * system the only way to form type cycle is through struct/union, so any chain * of reference types, even those taking part in a type cycle, will inevitably * reach struct/union at some point. * * 2. Once all referenced type IDs are resolved into canonical ones, BTF type * becomes "stable", in the sense that no further deduplication will cause * any changes to it. With that, it's now possible to calculate type's signature * hash (this time taking into account referenced type IDs) and loop over all * potential canonical representatives. If no match was found, current type * will become canonical representative of itself and will be added into * btf_dedup->dedup_table as another possible canonical representative. */ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id) { struct btf_dedup_node *cand_node; struct btf_type *t, *cand; /* if we don't find equivalent type, then we are representative type */ __u32 new_id = type_id; int ref_type_id; __u32 h; if (d->map[type_id] == BTF_IN_PROGRESS_ID) return -ELOOP; if (d->map[type_id] <= BTF_MAX_NR_TYPES) return resolve_type_id(d, type_id); t = d->btf->types[type_id]; d->map[type_id] = BTF_IN_PROGRESS_ID; switch (BTF_INFO_KIND(t->info)) { case BTF_KIND_CONST: case BTF_KIND_VOLATILE: case BTF_KIND_RESTRICT: case BTF_KIND_PTR: case BTF_KIND_TYPEDEF: case BTF_KIND_FUNC: ref_type_id = btf_dedup_ref_type(d, t->type); if (ref_type_id < 0) return ref_type_id; t->type = ref_type_id; h = btf_hash_common(t); for_each_dedup_cand(d, h, cand_node) { cand = d->btf->types[cand_node->type_id]; if (btf_equal_common(t, cand)) { new_id = cand_node->type_id; break; } } break; case BTF_KIND_ARRAY: { struct btf_array *info = (struct btf_array *)(t + 1); ref_type_id = btf_dedup_ref_type(d, info->type); if (ref_type_id < 0) return ref_type_id; info->type = ref_type_id; ref_type_id = btf_dedup_ref_type(d, info->index_type); if (ref_type_id < 0) return ref_type_id; info->index_type = ref_type_id; h = btf_hash_array(t); for_each_dedup_cand(d, h, cand_node) { cand = d->btf->types[cand_node->type_id]; if (btf_equal_array(t, cand)) { new_id = cand_node->type_id; break; } } break; } case BTF_KIND_FUNC_PROTO: { struct btf_param *param; __u16 vlen; int i; ref_type_id = btf_dedup_ref_type(d, t->type); if (ref_type_id < 0) return ref_type_id; t->type = ref_type_id; vlen = BTF_INFO_VLEN(t->info); param = (struct btf_param *)(t + 1); for (i = 0; i < vlen; i++) { ref_type_id = btf_dedup_ref_type(d, param->type); if (ref_type_id < 0) return ref_type_id; param->type = ref_type_id; param++; } h = btf_hash_fnproto(t); for_each_dedup_cand(d, h, cand_node) { cand = d->btf->types[cand_node->type_id]; if (btf_equal_fnproto(t, cand)) { new_id = cand_node->type_id; break; } } break; } default: return -EINVAL; } d->map[type_id] = new_id; if (type_id == new_id && btf_dedup_table_add(d, h, type_id)) return -ENOMEM; return new_id; } static int btf_dedup_ref_types(struct btf_dedup *d) { int i, err; for (i = 1; i <= d->btf->nr_types; i++) { err = btf_dedup_ref_type(d, i); if (err < 0) return err; } btf_dedup_table_free(d); return 0; } /* * Compact types. * * After we established for each type its corresponding canonical representative * type, we now can eliminate types that are not canonical and leave only * canonical ones layed out sequentially in memory by copying them over * duplicates. During compaction btf_dedup->hypot_map array is reused to store * a map from original type ID to a new compacted type ID, which will be used * during next phase to "fix up" type IDs, referenced from struct/union and * reference types. */ static int btf_dedup_compact_types(struct btf_dedup *d) { struct btf_type **new_types; __u32 next_type_id = 1; char *types_start, *p; int i, len; /* we are going to reuse hypot_map to store compaction remapping */ d->hypot_map[0] = 0; for (i = 1; i <= d->btf->nr_types; i++) d->hypot_map[i] = BTF_UNPROCESSED_ID; types_start = d->btf->nohdr_data + d->btf->hdr->type_off; p = types_start; for (i = 1; i <= d->btf->nr_types; i++) { if (d->map[i] != i) continue; len = btf_type_size(d->btf->types[i]); if (len < 0) return len; memmove(p, d->btf->types[i], len); d->hypot_map[i] = next_type_id; d->btf->types[next_type_id] = (struct btf_type *)p; p += len; next_type_id++; } /* shrink struct btf's internal types index and update btf_header */ d->btf->nr_types = next_type_id - 1; d->btf->types_size = d->btf->nr_types; d->btf->hdr->type_len = p - types_start; new_types = realloc(d->btf->types, (1 + d->btf->nr_types) * sizeof(struct btf_type *)); if (!new_types) return -ENOMEM; d->btf->types = new_types; /* make sure string section follows type information without gaps */ d->btf->hdr->str_off = p - (char *)d->btf->nohdr_data; memmove(p, d->btf->strings, d->btf->hdr->str_len); d->btf->strings = p; p += d->btf->hdr->str_len; d->btf->data_size = p - (char *)d->btf->data; return 0; } /* * Figure out final (deduplicated and compacted) type ID for provided original * `type_id` by first resolving it into corresponding canonical type ID and * then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map, * which is populated during compaction phase. */ static int btf_dedup_remap_type_id(struct btf_dedup *d, __u32 type_id) { __u32 resolved_type_id, new_type_id; resolved_type_id = resolve_type_id(d, type_id); new_type_id = d->hypot_map[resolved_type_id]; if (new_type_id > BTF_MAX_NR_TYPES) return -EINVAL; return new_type_id; } /* * Remap referenced type IDs into deduped type IDs. * * After BTF types are deduplicated and compacted, their final type IDs may * differ from original ones. The map from original to a corresponding * deduped type ID is stored in btf_dedup->hypot_map and is populated during * compaction phase. During remapping phase we are rewriting all type IDs * referenced from any BTF type (e.g., struct fields, func proto args, etc) to * their final deduped type IDs. */ static int btf_dedup_remap_type(struct btf_dedup *d, __u32 type_id) { struct btf_type *t = d->btf->types[type_id]; int i, r; switch (BTF_INFO_KIND(t->info)) { case BTF_KIND_INT: case BTF_KIND_ENUM: break; case BTF_KIND_FWD: case BTF_KIND_CONST: case BTF_KIND_VOLATILE: case BTF_KIND_RESTRICT: case BTF_KIND_PTR: case BTF_KIND_TYPEDEF: case BTF_KIND_FUNC: r = btf_dedup_remap_type_id(d, t->type); if (r < 0) return r; t->type = r; break; case BTF_KIND_ARRAY: { struct btf_array *arr_info = (struct btf_array *)(t + 1); r = btf_dedup_remap_type_id(d, arr_info->type); if (r < 0) return r; arr_info->type = r; r = btf_dedup_remap_type_id(d, arr_info->index_type); if (r < 0) return r; arr_info->index_type = r; break; } case BTF_KIND_STRUCT: case BTF_KIND_UNION: { struct btf_member *member = (struct btf_member *)(t + 1); __u16 vlen = BTF_INFO_VLEN(t->info); for (i = 0; i < vlen; i++) { r = btf_dedup_remap_type_id(d, member->type); if (r < 0) return r; member->type = r; member++; } break; } case BTF_KIND_FUNC_PROTO: { struct btf_param *param = (struct btf_param *)(t + 1); __u16 vlen = BTF_INFO_VLEN(t->info); r = btf_dedup_remap_type_id(d, t->type); if (r < 0) return r; t->type = r; for (i = 0; i < vlen; i++) { r = btf_dedup_remap_type_id(d, param->type); if (r < 0) return r; param->type = r; param++; } break; } default: return -EINVAL; } return 0; } static int btf_dedup_remap_types(struct btf_dedup *d) { int i, r; for (i = 1; i <= d->btf->nr_types; i++) { r = btf_dedup_remap_type(d, i); if (r < 0) return r; } return 0; } dwarves-dfsg-1.15/lib/bpf/src/btf.h000066400000000000000000000066351344730411300170760ustar00rootroot00000000000000/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ /* Copyright (c) 2018 Facebook */ #ifndef __LIBBPF_BTF_H #define __LIBBPF_BTF_H #include #ifdef __cplusplus extern "C" { #endif #ifndef LIBBPF_API #define LIBBPF_API __attribute__((visibility("default"))) #endif #define BTF_ELF_SEC ".BTF" #define BTF_EXT_ELF_SEC ".BTF.ext" struct btf; struct btf_ext; struct btf_type; /* * The .BTF.ext ELF section layout defined as * struct btf_ext_header * func_info subsection * * The func_info subsection layout: * record size for struct bpf_func_info in the func_info subsection * struct btf_sec_func_info for section #1 * a list of bpf_func_info records for section #1 * where struct bpf_func_info mimics one in include/uapi/linux/bpf.h * but may not be identical * struct btf_sec_func_info for section #2 * a list of bpf_func_info records for section #2 * ...... * * Note that the bpf_func_info record size in .BTF.ext may not * be the same as the one defined in include/uapi/linux/bpf.h. * The loader should ensure that record_size meets minimum * requirement and pass the record as is to the kernel. The * kernel will handle the func_info properly based on its contents. */ struct btf_ext_header { __u16 magic; __u8 version; __u8 flags; __u32 hdr_len; /* All offsets are in bytes relative to the end of this header */ __u32 func_info_off; __u32 func_info_len; __u32 line_info_off; __u32 line_info_len; }; LIBBPF_API void btf__free(struct btf *btf); LIBBPF_API struct btf *btf__new(__u8 *data, __u32 size); LIBBPF_API int btf__load(struct btf *btf); LIBBPF_API __s32 btf__find_by_name(const struct btf *btf, const char *type_name); LIBBPF_API __u32 btf__get_nr_types(const struct btf *btf); LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 id); LIBBPF_API __s64 btf__resolve_size(const struct btf *btf, __u32 type_id); LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id); LIBBPF_API int btf__fd(const struct btf *btf); LIBBPF_API const void *btf__get_raw_data(const struct btf *btf, __u32 *size); LIBBPF_API const char *btf__name_by_offset(const struct btf *btf, __u32 offset); LIBBPF_API int btf__get_from_id(__u32 id, struct btf **btf); LIBBPF_API int btf__get_map_kv_tids(const struct btf *btf, const char *map_name, __u32 expected_key_size, __u32 expected_value_size, __u32 *key_type_id, __u32 *value_type_id); LIBBPF_API struct btf_ext *btf_ext__new(__u8 *data, __u32 size); LIBBPF_API void btf_ext__free(struct btf_ext *btf_ext); LIBBPF_API const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size); LIBBPF_API int btf_ext__reloc_func_info(const struct btf *btf, const struct btf_ext *btf_ext, const char *sec_name, __u32 insns_cnt, void **func_info, __u32 *cnt); LIBBPF_API int btf_ext__reloc_line_info(const struct btf *btf, const struct btf_ext *btf_ext, const char *sec_name, __u32 insns_cnt, void **line_info, __u32 *cnt); LIBBPF_API __u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext); LIBBPF_API __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext); struct btf_dedup_opts { unsigned int dedup_table_size; bool dont_resolve_fwds; }; LIBBPF_API int btf__dedup(struct btf *btf, struct btf_ext *btf_ext, const struct btf_dedup_opts *opts); #ifdef __cplusplus } /* extern "C" */ #endif #endif /* __LIBBPF_BTF_H */ dwarves-dfsg-1.15/lib/bpf/src/libbpf.c000066400000000000000000002247661344730411300175630ustar00rootroot00000000000000// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* * Common eBPF ELF object loading operations. * * Copyright (C) 2013-2015 Alexei Starovoitov * Copyright (C) 2015 Wang Nan * Copyright (C) 2015 Huawei Inc. * Copyright (C) 2017 Nicira, Inc. */ #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "libbpf.h" #include "bpf.h" #include "btf.h" #include "str_error.h" #include "libbpf_util.h" #ifndef EM_BPF #define EM_BPF 247 #endif #ifndef BPF_FS_MAGIC #define BPF_FS_MAGIC 0xcafe4a11 #endif #define __printf(a, b) __attribute__((format(printf, a, b))) static int __base_pr(enum libbpf_print_level level, const char *format, va_list args) { if (level == LIBBPF_DEBUG) return 0; return vfprintf(stderr, format, args); } static libbpf_print_fn_t __libbpf_pr = __base_pr; void libbpf_set_print(libbpf_print_fn_t fn) { __libbpf_pr = fn; } __printf(2, 3) void libbpf_print(enum libbpf_print_level level, const char *format, ...) { va_list args; if (!__libbpf_pr) return; va_start(args, format); __libbpf_pr(level, format, args); va_end(args); } #define STRERR_BUFSIZE 128 #define CHECK_ERR(action, err, out) do { \ err = action; \ if (err) \ goto out; \ } while(0) /* Copied from tools/perf/util/util.h */ #ifndef zfree # define zfree(ptr) ({ free(*ptr); *ptr = NULL; }) #endif #ifndef zclose # define zclose(fd) ({ \ int ___err = 0; \ if ((fd) >= 0) \ ___err = close((fd)); \ fd = -1; \ ___err; }) #endif #ifdef HAVE_LIBELF_MMAP_SUPPORT # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP #else # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ #endif static inline __u64 ptr_to_u64(const void *ptr) { return (__u64) (unsigned long) ptr; } struct bpf_capabilities { /* v4.14: kernel support for program & map names. */ __u32 name:1; }; /* * bpf_prog should be a better name but it has been used in * linux/filter.h. */ struct bpf_program { /* Index in elf obj file, for relocation use. */ int idx; char *name; int prog_ifindex; char *section_name; /* section_name with / replaced by _; makes recursive pinning * in bpf_object__pin_programs easier */ char *pin_name; struct bpf_insn *insns; size_t insns_cnt, main_prog_cnt; enum bpf_prog_type type; struct reloc_desc { enum { RELO_LD64, RELO_CALL, } type; int insn_idx; union { int map_idx; int text_off; }; } *reloc_desc; int nr_reloc; struct { int nr; int *fds; } instances; bpf_program_prep_t preprocessor; struct bpf_object *obj; void *priv; bpf_program_clear_priv_t clear_priv; enum bpf_attach_type expected_attach_type; int btf_fd; void *func_info; __u32 func_info_rec_size; __u32 func_info_cnt; struct bpf_capabilities *caps; void *line_info; __u32 line_info_rec_size; __u32 line_info_cnt; }; struct bpf_map { int fd; char *name; size_t offset; int map_ifindex; int inner_map_fd; struct bpf_map_def def; __u32 btf_key_type_id; __u32 btf_value_type_id; void *priv; bpf_map_clear_priv_t clear_priv; }; static LIST_HEAD(bpf_objects_list); struct bpf_object { char license[64]; __u32 kern_version; struct bpf_program *programs; size_t nr_programs; struct bpf_map *maps; size_t nr_maps; bool loaded; bool has_pseudo_calls; /* * Information when doing elf related work. Only valid if fd * is valid. */ struct { int fd; void *obj_buf; size_t obj_buf_sz; Elf *elf; GElf_Ehdr ehdr; Elf_Data *symbols; size_t strtabidx; struct { GElf_Shdr shdr; Elf_Data *data; } *reloc; int nr_reloc; int maps_shndx; int text_shndx; } efile; /* * All loaded bpf_object is linked in a list, which is * hidden to caller. bpf_objects__ handlers deal with * all objects. */ struct list_head list; struct btf *btf; struct btf_ext *btf_ext; void *priv; bpf_object_clear_priv_t clear_priv; struct bpf_capabilities caps; char path[]; }; #define obj_elf_valid(o) ((o)->efile.elf) void bpf_program__unload(struct bpf_program *prog) { int i; if (!prog) return; /* * If the object is opened but the program was never loaded, * it is possible that prog->instances.nr == -1. */ if (prog->instances.nr > 0) { for (i = 0; i < prog->instances.nr; i++) zclose(prog->instances.fds[i]); } else if (prog->instances.nr != -1) { pr_warning("Internal error: instances.nr is %d\n", prog->instances.nr); } prog->instances.nr = -1; zfree(&prog->instances.fds); zclose(prog->btf_fd); zfree(&prog->func_info); zfree(&prog->line_info); } static void bpf_program__exit(struct bpf_program *prog) { if (!prog) return; if (prog->clear_priv) prog->clear_priv(prog, prog->priv); prog->priv = NULL; prog->clear_priv = NULL; bpf_program__unload(prog); zfree(&prog->name); zfree(&prog->section_name); zfree(&prog->pin_name); zfree(&prog->insns); zfree(&prog->reloc_desc); prog->nr_reloc = 0; prog->insns_cnt = 0; prog->idx = -1; } static char *__bpf_program__pin_name(struct bpf_program *prog) { char *name, *p; name = p = strdup(prog->section_name); while ((p = strchr(p, '/'))) *p = '_'; return name; } static int bpf_program__init(void *data, size_t size, char *section_name, int idx, struct bpf_program *prog) { if (size < sizeof(struct bpf_insn)) { pr_warning("corrupted section '%s'\n", section_name); return -EINVAL; } memset(prog, 0, sizeof(*prog)); prog->section_name = strdup(section_name); if (!prog->section_name) { pr_warning("failed to alloc name for prog under section(%d) %s\n", idx, section_name); goto errout; } prog->pin_name = __bpf_program__pin_name(prog); if (!prog->pin_name) { pr_warning("failed to alloc pin name for prog under section(%d) %s\n", idx, section_name); goto errout; } prog->insns = malloc(size); if (!prog->insns) { pr_warning("failed to alloc insns for prog under section %s\n", section_name); goto errout; } prog->insns_cnt = size / sizeof(struct bpf_insn); memcpy(prog->insns, data, prog->insns_cnt * sizeof(struct bpf_insn)); prog->idx = idx; prog->instances.fds = NULL; prog->instances.nr = -1; prog->type = BPF_PROG_TYPE_UNSPEC; prog->btf_fd = -1; return 0; errout: bpf_program__exit(prog); return -ENOMEM; } static int bpf_object__add_program(struct bpf_object *obj, void *data, size_t size, char *section_name, int idx) { struct bpf_program prog, *progs; int nr_progs, err; err = bpf_program__init(data, size, section_name, idx, &prog); if (err) return err; prog.caps = &obj->caps; progs = obj->programs; nr_progs = obj->nr_programs; progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0])); if (!progs) { /* * In this case the original obj->programs * is still valid, so don't need special treat for * bpf_close_object(). */ pr_warning("failed to alloc a new program under section '%s'\n", section_name); bpf_program__exit(&prog); return -ENOMEM; } pr_debug("found program %s\n", prog.section_name); obj->programs = progs; obj->nr_programs = nr_progs + 1; prog.obj = obj; progs[nr_progs] = prog; return 0; } static int bpf_object__init_prog_names(struct bpf_object *obj) { Elf_Data *symbols = obj->efile.symbols; struct bpf_program *prog; size_t pi, si; for (pi = 0; pi < obj->nr_programs; pi++) { const char *name = NULL; prog = &obj->programs[pi]; for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name; si++) { GElf_Sym sym; if (!gelf_getsym(symbols, si, &sym)) continue; if (sym.st_shndx != prog->idx) continue; if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL) continue; name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, sym.st_name); if (!name) { pr_warning("failed to get sym name string for prog %s\n", prog->section_name); return -LIBBPF_ERRNO__LIBELF; } } if (!name && prog->idx == obj->efile.text_shndx) name = ".text"; if (!name) { pr_warning("failed to find sym for prog %s\n", prog->section_name); return -EINVAL; } prog->name = strdup(name); if (!prog->name) { pr_warning("failed to allocate memory for prog sym %s\n", name); return -ENOMEM; } } return 0; } static struct bpf_object *bpf_object__new(const char *path, void *obj_buf, size_t obj_buf_sz) { struct bpf_object *obj; obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1); if (!obj) { pr_warning("alloc memory failed for %s\n", path); return ERR_PTR(-ENOMEM); } strcpy(obj->path, path); obj->efile.fd = -1; /* * Caller of this function should also calls * bpf_object__elf_finish() after data collection to return * obj_buf to user. If not, we should duplicate the buffer to * avoid user freeing them before elf finish. */ obj->efile.obj_buf = obj_buf; obj->efile.obj_buf_sz = obj_buf_sz; obj->efile.maps_shndx = -1; obj->loaded = false; INIT_LIST_HEAD(&obj->list); list_add(&obj->list, &bpf_objects_list); return obj; } static void bpf_object__elf_finish(struct bpf_object *obj) { if (!obj_elf_valid(obj)) return; if (obj->efile.elf) { elf_end(obj->efile.elf); obj->efile.elf = NULL; } obj->efile.symbols = NULL; zfree(&obj->efile.reloc); obj->efile.nr_reloc = 0; zclose(obj->efile.fd); obj->efile.obj_buf = NULL; obj->efile.obj_buf_sz = 0; } static int bpf_object__elf_init(struct bpf_object *obj) { int err = 0; GElf_Ehdr *ep; if (obj_elf_valid(obj)) { pr_warning("elf init: internal error\n"); return -LIBBPF_ERRNO__LIBELF; } if (obj->efile.obj_buf_sz > 0) { /* * obj_buf should have been validated by * bpf_object__open_buffer(). */ obj->efile.elf = elf_memory(obj->efile.obj_buf, obj->efile.obj_buf_sz); } else { obj->efile.fd = open(obj->path, O_RDONLY); if (obj->efile.fd < 0) { char errmsg[STRERR_BUFSIZE]; char *cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); pr_warning("failed to open %s: %s\n", obj->path, cp); return -errno; } obj->efile.elf = elf_begin(obj->efile.fd, LIBBPF_ELF_C_READ_MMAP, NULL); } if (!obj->efile.elf) { pr_warning("failed to open %s as ELF file\n", obj->path); err = -LIBBPF_ERRNO__LIBELF; goto errout; } if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) { pr_warning("failed to get EHDR from %s\n", obj->path); err = -LIBBPF_ERRNO__FORMAT; goto errout; } ep = &obj->efile.ehdr; /* Old LLVM set e_machine to EM_NONE */ if ((ep->e_type != ET_REL) || (ep->e_machine && (ep->e_machine != EM_BPF))) { pr_warning("%s is not an eBPF object file\n", obj->path); err = -LIBBPF_ERRNO__FORMAT; goto errout; } return 0; errout: bpf_object__elf_finish(obj); return err; } static int bpf_object__check_endianness(struct bpf_object *obj) { static unsigned int const endian = 1; switch (obj->efile.ehdr.e_ident[EI_DATA]) { case ELFDATA2LSB: /* We are big endian, BPF obj is little endian. */ if (*(unsigned char const *)&endian != 1) goto mismatch; break; case ELFDATA2MSB: /* We are little endian, BPF obj is big endian. */ if (*(unsigned char const *)&endian != 0) goto mismatch; break; default: return -LIBBPF_ERRNO__ENDIAN; } return 0; mismatch: pr_warning("Error: endianness mismatch.\n"); return -LIBBPF_ERRNO__ENDIAN; } static int bpf_object__init_license(struct bpf_object *obj, void *data, size_t size) { memcpy(obj->license, data, min(size, sizeof(obj->license) - 1)); pr_debug("license of %s is %s\n", obj->path, obj->license); return 0; } static int bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size) { __u32 kver; if (size != sizeof(kver)) { pr_warning("invalid kver section in %s\n", obj->path); return -LIBBPF_ERRNO__FORMAT; } memcpy(&kver, data, sizeof(kver)); obj->kern_version = kver; pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version); return 0; } static int compare_bpf_map(const void *_a, const void *_b) { const struct bpf_map *a = _a; const struct bpf_map *b = _b; return a->offset - b->offset; } static bool bpf_map_type__is_map_in_map(enum bpf_map_type type) { if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS || type == BPF_MAP_TYPE_HASH_OF_MAPS) return true; return false; } static int bpf_object__init_maps(struct bpf_object *obj, int flags) { bool strict = !(flags & MAPS_RELAX_COMPAT); int i, map_idx, map_def_sz, nr_maps = 0; Elf_Scn *scn; Elf_Data *data = NULL; Elf_Data *symbols = obj->efile.symbols; if (obj->efile.maps_shndx < 0) return -EINVAL; if (!symbols) return -EINVAL; scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx); if (scn) data = elf_getdata(scn, NULL); if (!scn || !data) { pr_warning("failed to get Elf_Data from map section %d\n", obj->efile.maps_shndx); return -EINVAL; } /* * Count number of maps. Each map has a name. * Array of maps is not supported: only the first element is * considered. * * TODO: Detect array of map and report error. */ for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) { GElf_Sym sym; if (!gelf_getsym(symbols, i, &sym)) continue; if (sym.st_shndx != obj->efile.maps_shndx) continue; nr_maps++; } /* Alloc obj->maps and fill nr_maps. */ pr_debug("maps in %s: %d maps in %zd bytes\n", obj->path, nr_maps, data->d_size); if (!nr_maps) return 0; /* Assume equally sized map definitions */ map_def_sz = data->d_size / nr_maps; if (!data->d_size || (data->d_size % nr_maps) != 0) { pr_warning("unable to determine map definition size " "section %s, %d maps in %zd bytes\n", obj->path, nr_maps, data->d_size); return -EINVAL; } obj->maps = calloc(nr_maps, sizeof(obj->maps[0])); if (!obj->maps) { pr_warning("alloc maps for object failed\n"); return -ENOMEM; } obj->nr_maps = nr_maps; for (i = 0; i < nr_maps; i++) { /* * fill all fd with -1 so won't close incorrect * fd (fd=0 is stdin) when failure (zclose won't close * negative fd)). */ obj->maps[i].fd = -1; obj->maps[i].inner_map_fd = -1; } /* * Fill obj->maps using data in "maps" section. */ for (i = 0, map_idx = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) { GElf_Sym sym; const char *map_name; struct bpf_map_def *def; if (!gelf_getsym(symbols, i, &sym)) continue; if (sym.st_shndx != obj->efile.maps_shndx) continue; map_name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, sym.st_name); obj->maps[map_idx].offset = sym.st_value; if (sym.st_value + map_def_sz > data->d_size) { pr_warning("corrupted maps section in %s: last map \"%s\" too small\n", obj->path, map_name); return -EINVAL; } obj->maps[map_idx].name = strdup(map_name); if (!obj->maps[map_idx].name) { pr_warning("failed to alloc map name\n"); return -ENOMEM; } pr_debug("map %d is \"%s\"\n", map_idx, obj->maps[map_idx].name); def = (struct bpf_map_def *)(data->d_buf + sym.st_value); /* * If the definition of the map in the object file fits in * bpf_map_def, copy it. Any extra fields in our version * of bpf_map_def will default to zero as a result of the * calloc above. */ if (map_def_sz <= sizeof(struct bpf_map_def)) { memcpy(&obj->maps[map_idx].def, def, map_def_sz); } else { /* * Here the map structure being read is bigger than what * we expect, truncate if the excess bits are all zero. * If they are not zero, reject this map as * incompatible. */ char *b; for (b = ((char *)def) + sizeof(struct bpf_map_def); b < ((char *)def) + map_def_sz; b++) { if (*b != 0) { pr_warning("maps section in %s: \"%s\" " "has unrecognized, non-zero " "options\n", obj->path, map_name); if (strict) return -EINVAL; } } memcpy(&obj->maps[map_idx].def, def, sizeof(struct bpf_map_def)); } map_idx++; } qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]), compare_bpf_map); return 0; } static bool section_have_execinstr(struct bpf_object *obj, int idx) { Elf_Scn *scn; GElf_Shdr sh; scn = elf_getscn(obj->efile.elf, idx); if (!scn) return false; if (gelf_getshdr(scn, &sh) != &sh) return false; if (sh.sh_flags & SHF_EXECINSTR) return true; return false; } static int bpf_object__elf_collect(struct bpf_object *obj, int flags) { Elf *elf = obj->efile.elf; GElf_Ehdr *ep = &obj->efile.ehdr; Elf_Data *btf_ext_data = NULL; Elf_Scn *scn = NULL; int idx = 0, err = 0; /* Elf is corrupted/truncated, avoid calling elf_strptr. */ if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) { pr_warning("failed to get e_shstrndx from %s\n", obj->path); return -LIBBPF_ERRNO__FORMAT; } while ((scn = elf_nextscn(elf, scn)) != NULL) { char *name; GElf_Shdr sh; Elf_Data *data; idx++; if (gelf_getshdr(scn, &sh) != &sh) { pr_warning("failed to get section(%d) header from %s\n", idx, obj->path); err = -LIBBPF_ERRNO__FORMAT; goto out; } name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name); if (!name) { pr_warning("failed to get section(%d) name from %s\n", idx, obj->path); err = -LIBBPF_ERRNO__FORMAT; goto out; } data = elf_getdata(scn, 0); if (!data) { pr_warning("failed to get section(%d) data from %s(%s)\n", idx, name, obj->path); err = -LIBBPF_ERRNO__FORMAT; goto out; } pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n", idx, name, (unsigned long)data->d_size, (int)sh.sh_link, (unsigned long)sh.sh_flags, (int)sh.sh_type); if (strcmp(name, "license") == 0) err = bpf_object__init_license(obj, data->d_buf, data->d_size); else if (strcmp(name, "version") == 0) err = bpf_object__init_kversion(obj, data->d_buf, data->d_size); else if (strcmp(name, "maps") == 0) obj->efile.maps_shndx = idx; else if (strcmp(name, BTF_ELF_SEC) == 0) { obj->btf = btf__new(data->d_buf, data->d_size); if (IS_ERR(obj->btf)) { pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n", BTF_ELF_SEC, PTR_ERR(obj->btf)); obj->btf = NULL; continue; } err = btf__load(obj->btf); if (err) { pr_warning("Error loading %s into kernel: %d. Ignored and continue.\n", BTF_ELF_SEC, err); btf__free(obj->btf); obj->btf = NULL; err = 0; } } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) { btf_ext_data = data; } else if (sh.sh_type == SHT_SYMTAB) { if (obj->efile.symbols) { pr_warning("bpf: multiple SYMTAB in %s\n", obj->path); err = -LIBBPF_ERRNO__FORMAT; } else { obj->efile.symbols = data; obj->efile.strtabidx = sh.sh_link; } } else if ((sh.sh_type == SHT_PROGBITS) && (sh.sh_flags & SHF_EXECINSTR) && (data->d_size > 0)) { if (strcmp(name, ".text") == 0) obj->efile.text_shndx = idx; err = bpf_object__add_program(obj, data->d_buf, data->d_size, name, idx); if (err) { char errmsg[STRERR_BUFSIZE]; char *cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); pr_warning("failed to alloc program %s (%s): %s", name, obj->path, cp); } } else if (sh.sh_type == SHT_REL) { void *reloc = obj->efile.reloc; int nr_reloc = obj->efile.nr_reloc + 1; int sec = sh.sh_info; /* points to other section */ /* Only do relo for section with exec instructions */ if (!section_have_execinstr(obj, sec)) { pr_debug("skip relo %s(%d) for section(%d)\n", name, idx, sec); continue; } reloc = reallocarray(reloc, nr_reloc, sizeof(*obj->efile.reloc)); if (!reloc) { pr_warning("realloc failed\n"); err = -ENOMEM; } else { int n = nr_reloc - 1; obj->efile.reloc = reloc; obj->efile.nr_reloc = nr_reloc; obj->efile.reloc[n].shdr = sh; obj->efile.reloc[n].data = data; } } else { pr_debug("skip section(%d) %s\n", idx, name); } if (err) goto out; } if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) { pr_warning("Corrupted ELF file: index of strtab invalid\n"); return LIBBPF_ERRNO__FORMAT; } if (btf_ext_data) { if (!obj->btf) { pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n", BTF_EXT_ELF_SEC, BTF_ELF_SEC); } else { obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size); if (IS_ERR(obj->btf_ext)) { pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n", BTF_EXT_ELF_SEC, PTR_ERR(obj->btf_ext)); obj->btf_ext = NULL; } } } if (obj->efile.maps_shndx >= 0) { err = bpf_object__init_maps(obj, flags); if (err) goto out; } err = bpf_object__init_prog_names(obj); out: return err; } static struct bpf_program * bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx) { struct bpf_program *prog; size_t i; for (i = 0; i < obj->nr_programs; i++) { prog = &obj->programs[i]; if (prog->idx == idx) return prog; } return NULL; } struct bpf_program * bpf_object__find_program_by_title(struct bpf_object *obj, const char *title) { struct bpf_program *pos; bpf_object__for_each_program(pos, obj) { if (pos->section_name && !strcmp(pos->section_name, title)) return pos; } return NULL; } static int bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr, Elf_Data *data, struct bpf_object *obj) { Elf_Data *symbols = obj->efile.symbols; int text_shndx = obj->efile.text_shndx; int maps_shndx = obj->efile.maps_shndx; struct bpf_map *maps = obj->maps; size_t nr_maps = obj->nr_maps; int i, nrels; pr_debug("collecting relocating info for: '%s'\n", prog->section_name); nrels = shdr->sh_size / shdr->sh_entsize; prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels); if (!prog->reloc_desc) { pr_warning("failed to alloc memory in relocation\n"); return -ENOMEM; } prog->nr_reloc = nrels; for (i = 0; i < nrels; i++) { GElf_Sym sym; GElf_Rel rel; unsigned int insn_idx; struct bpf_insn *insns = prog->insns; size_t map_idx; if (!gelf_getrel(data, i, &rel)) { pr_warning("relocation: failed to get %d reloc\n", i); return -LIBBPF_ERRNO__FORMAT; } if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) { pr_warning("relocation: symbol %"PRIx64" not found\n", GELF_R_SYM(rel.r_info)); return -LIBBPF_ERRNO__FORMAT; } pr_debug("relo for %lld value %lld name %d\n", (long long) (rel.r_info >> 32), (long long) sym.st_value, sym.st_name); if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) { pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n", prog->section_name, sym.st_shndx); return -LIBBPF_ERRNO__RELOC; } insn_idx = rel.r_offset / sizeof(struct bpf_insn); pr_debug("relocation: insn_idx=%u\n", insn_idx); if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) { if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) { pr_warning("incorrect bpf_call opcode\n"); return -LIBBPF_ERRNO__RELOC; } prog->reloc_desc[i].type = RELO_CALL; prog->reloc_desc[i].insn_idx = insn_idx; prog->reloc_desc[i].text_off = sym.st_value; obj->has_pseudo_calls = true; continue; } if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) { pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n", insn_idx, insns[insn_idx].code); return -LIBBPF_ERRNO__RELOC; } /* TODO: 'maps' is sorted. We can use bsearch to make it faster. */ for (map_idx = 0; map_idx < nr_maps; map_idx++) { if (maps[map_idx].offset == sym.st_value) { pr_debug("relocation: find map %zd (%s) for insn %u\n", map_idx, maps[map_idx].name, insn_idx); break; } } if (map_idx >= nr_maps) { pr_warning("bpf relocation: map_idx %d large than %d\n", (int)map_idx, (int)nr_maps - 1); return -LIBBPF_ERRNO__RELOC; } prog->reloc_desc[i].type = RELO_LD64; prog->reloc_desc[i].insn_idx = insn_idx; prog->reloc_desc[i].map_idx = map_idx; } return 0; } static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf) { struct bpf_map_def *def = &map->def; __u32 key_type_id, value_type_id; int ret; ret = btf__get_map_kv_tids(btf, map->name, def->key_size, def->value_size, &key_type_id, &value_type_id); if (ret) return ret; map->btf_key_type_id = key_type_id; map->btf_value_type_id = value_type_id; return 0; } int bpf_map__reuse_fd(struct bpf_map *map, int fd) { struct bpf_map_info info = {}; __u32 len = sizeof(info); int new_fd, err; char *new_name; err = bpf_obj_get_info_by_fd(fd, &info, &len); if (err) return err; new_name = strdup(info.name); if (!new_name) return -errno; new_fd = open("/", O_RDONLY | O_CLOEXEC); if (new_fd < 0) goto err_free_new_name; new_fd = dup3(fd, new_fd, O_CLOEXEC); if (new_fd < 0) goto err_close_new_fd; err = zclose(map->fd); if (err) goto err_close_new_fd; free(map->name); map->fd = new_fd; map->name = new_name; map->def.type = info.type; map->def.key_size = info.key_size; map->def.value_size = info.value_size; map->def.max_entries = info.max_entries; map->def.map_flags = info.map_flags; map->btf_key_type_id = info.btf_key_type_id; map->btf_value_type_id = info.btf_value_type_id; return 0; err_close_new_fd: close(new_fd); err_free_new_name: free(new_name); return -errno; } int bpf_map__resize(struct bpf_map *map, __u32 max_entries) { if (!map || !max_entries) return -EINVAL; /* If map already created, its attributes can't be changed. */ if (map->fd >= 0) return -EBUSY; map->def.max_entries = max_entries; return 0; } static int bpf_object__probe_name(struct bpf_object *obj) { struct bpf_load_program_attr attr; char *cp, errmsg[STRERR_BUFSIZE]; struct bpf_insn insns[] = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }; int ret; /* make sure basic loading works */ memset(&attr, 0, sizeof(attr)); attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; attr.insns = insns; attr.insns_cnt = ARRAY_SIZE(insns); attr.license = "GPL"; ret = bpf_load_program_xattr(&attr, NULL, 0); if (ret < 0) { cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); pr_warning("Error in %s():%s(%d). Couldn't load basic 'r0 = 0' BPF program.\n", __func__, cp, errno); return -errno; } close(ret); /* now try the same program, but with the name */ attr.name = "test"; ret = bpf_load_program_xattr(&attr, NULL, 0); if (ret >= 0) { obj->caps.name = 1; close(ret); } return 0; } static int bpf_object__probe_caps(struct bpf_object *obj) { return bpf_object__probe_name(obj); } static int bpf_object__create_maps(struct bpf_object *obj) { struct bpf_create_map_attr create_attr = {}; unsigned int i; int err; for (i = 0; i < obj->nr_maps; i++) { struct bpf_map *map = &obj->maps[i]; struct bpf_map_def *def = &map->def; char *cp, errmsg[STRERR_BUFSIZE]; int *pfd = &map->fd; if (map->fd >= 0) { pr_debug("skip map create (preset) %s: fd=%d\n", map->name, map->fd); continue; } if (obj->caps.name) create_attr.name = map->name; create_attr.map_ifindex = map->map_ifindex; create_attr.map_type = def->type; create_attr.map_flags = def->map_flags; create_attr.key_size = def->key_size; create_attr.value_size = def->value_size; create_attr.max_entries = def->max_entries; create_attr.btf_fd = 0; create_attr.btf_key_type_id = 0; create_attr.btf_value_type_id = 0; if (bpf_map_type__is_map_in_map(def->type) && map->inner_map_fd >= 0) create_attr.inner_map_fd = map->inner_map_fd; if (obj->btf && !bpf_map_find_btf_info(map, obj->btf)) { create_attr.btf_fd = btf__fd(obj->btf); create_attr.btf_key_type_id = map->btf_key_type_id; create_attr.btf_value_type_id = map->btf_value_type_id; } *pfd = bpf_create_map_xattr(&create_attr); if (*pfd < 0 && create_attr.btf_key_type_id) { cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n", map->name, cp, errno); create_attr.btf_fd = 0; create_attr.btf_key_type_id = 0; create_attr.btf_value_type_id = 0; map->btf_key_type_id = 0; map->btf_value_type_id = 0; *pfd = bpf_create_map_xattr(&create_attr); } if (*pfd < 0) { size_t j; err = *pfd; cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); pr_warning("failed to create map (name: '%s'): %s\n", map->name, cp); for (j = 0; j < i; j++) zclose(obj->maps[j].fd); return err; } pr_debug("create map %s: fd=%d\n", map->name, *pfd); } return 0; } static int check_btf_ext_reloc_err(struct bpf_program *prog, int err, void *btf_prog_info, const char *info_name) { if (err != -ENOENT) { pr_warning("Error in loading %s for sec %s.\n", info_name, prog->section_name); return err; } /* err == -ENOENT (i.e. prog->section_name not found in btf_ext) */ if (btf_prog_info) { /* * Some info has already been found but has problem * in the last btf_ext reloc. Must have to error * out. */ pr_warning("Error in relocating %s for sec %s.\n", info_name, prog->section_name); return err; } /* * Have problem loading the very first info. Ignore * the rest. */ pr_warning("Cannot find %s for main program sec %s. Ignore all %s.\n", info_name, prog->section_name, info_name); return 0; } static int bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj, const char *section_name, __u32 insn_offset) { int err; if (!insn_offset || prog->func_info) { /* * !insn_offset => main program * * For sub prog, the main program's func_info has to * be loaded first (i.e. prog->func_info != NULL) */ err = btf_ext__reloc_func_info(obj->btf, obj->btf_ext, section_name, insn_offset, &prog->func_info, &prog->func_info_cnt); if (err) return check_btf_ext_reloc_err(prog, err, prog->func_info, "bpf_func_info"); prog->func_info_rec_size = btf_ext__func_info_rec_size(obj->btf_ext); } if (!insn_offset || prog->line_info) { err = btf_ext__reloc_line_info(obj->btf, obj->btf_ext, section_name, insn_offset, &prog->line_info, &prog->line_info_cnt); if (err) return check_btf_ext_reloc_err(prog, err, prog->line_info, "bpf_line_info"); prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext); } if (!insn_offset) prog->btf_fd = btf__fd(obj->btf); return 0; } static int bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj, struct reloc_desc *relo) { struct bpf_insn *insn, *new_insn; struct bpf_program *text; size_t new_cnt; int err; if (relo->type != RELO_CALL) return -LIBBPF_ERRNO__RELOC; if (prog->idx == obj->efile.text_shndx) { pr_warning("relo in .text insn %d into off %d\n", relo->insn_idx, relo->text_off); return -LIBBPF_ERRNO__RELOC; } if (prog->main_prog_cnt == 0) { text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx); if (!text) { pr_warning("no .text section found yet relo into text exist\n"); return -LIBBPF_ERRNO__RELOC; } new_cnt = prog->insns_cnt + text->insns_cnt; new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn)); if (!new_insn) { pr_warning("oom in prog realloc\n"); return -ENOMEM; } if (obj->btf_ext) { err = bpf_program_reloc_btf_ext(prog, obj, text->section_name, prog->insns_cnt); if (err) return err; } memcpy(new_insn + prog->insns_cnt, text->insns, text->insns_cnt * sizeof(*insn)); prog->insns = new_insn; prog->main_prog_cnt = prog->insns_cnt; prog->insns_cnt = new_cnt; pr_debug("added %zd insn from %s to prog %s\n", text->insns_cnt, text->section_name, prog->section_name); } insn = &prog->insns[relo->insn_idx]; insn->imm += prog->main_prog_cnt - relo->insn_idx; return 0; } static int bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj) { int i, err; if (!prog) return 0; if (obj->btf_ext) { err = bpf_program_reloc_btf_ext(prog, obj, prog->section_name, 0); if (err) return err; } if (!prog->reloc_desc) return 0; for (i = 0; i < prog->nr_reloc; i++) { if (prog->reloc_desc[i].type == RELO_LD64) { struct bpf_insn *insns = prog->insns; int insn_idx, map_idx; insn_idx = prog->reloc_desc[i].insn_idx; map_idx = prog->reloc_desc[i].map_idx; if (insn_idx >= (int)prog->insns_cnt) { pr_warning("relocation out of range: '%s'\n", prog->section_name); return -LIBBPF_ERRNO__RELOC; } insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD; insns[insn_idx].imm = obj->maps[map_idx].fd; } else { err = bpf_program__reloc_text(prog, obj, &prog->reloc_desc[i]); if (err) return err; } } zfree(&prog->reloc_desc); prog->nr_reloc = 0; return 0; } static int bpf_object__relocate(struct bpf_object *obj) { struct bpf_program *prog; size_t i; int err; for (i = 0; i < obj->nr_programs; i++) { prog = &obj->programs[i]; err = bpf_program__relocate(prog, obj); if (err) { pr_warning("failed to relocate '%s'\n", prog->section_name); return err; } } return 0; } static int bpf_object__collect_reloc(struct bpf_object *obj) { int i, err; if (!obj_elf_valid(obj)) { pr_warning("Internal error: elf object is closed\n"); return -LIBBPF_ERRNO__INTERNAL; } for (i = 0; i < obj->efile.nr_reloc; i++) { GElf_Shdr *shdr = &obj->efile.reloc[i].shdr; Elf_Data *data = obj->efile.reloc[i].data; int idx = shdr->sh_info; struct bpf_program *prog; if (shdr->sh_type != SHT_REL) { pr_warning("internal error at %d\n", __LINE__); return -LIBBPF_ERRNO__INTERNAL; } prog = bpf_object__find_prog_by_idx(obj, idx); if (!prog) { pr_warning("relocation failed: no section(%d)\n", idx); return -LIBBPF_ERRNO__RELOC; } err = bpf_program__collect_reloc(prog, shdr, data, obj); if (err) return err; } return 0; } static int load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, char *license, __u32 kern_version, int *pfd) { struct bpf_load_program_attr load_attr; char *cp, errmsg[STRERR_BUFSIZE]; char *log_buf; int ret; memset(&load_attr, 0, sizeof(struct bpf_load_program_attr)); load_attr.prog_type = prog->type; load_attr.expected_attach_type = prog->expected_attach_type; if (prog->caps->name) load_attr.name = prog->name; load_attr.insns = insns; load_attr.insns_cnt = insns_cnt; load_attr.license = license; load_attr.kern_version = kern_version; load_attr.prog_ifindex = prog->prog_ifindex; load_attr.prog_btf_fd = prog->btf_fd >= 0 ? prog->btf_fd : 0; load_attr.func_info = prog->func_info; load_attr.func_info_rec_size = prog->func_info_rec_size; load_attr.func_info_cnt = prog->func_info_cnt; load_attr.line_info = prog->line_info; load_attr.line_info_rec_size = prog->line_info_rec_size; load_attr.line_info_cnt = prog->line_info_cnt; if (!load_attr.insns || !load_attr.insns_cnt) return -EINVAL; log_buf = malloc(BPF_LOG_BUF_SIZE); if (!log_buf) pr_warning("Alloc log buffer for bpf loader error, continue without log\n"); ret = bpf_load_program_xattr(&load_attr, log_buf, BPF_LOG_BUF_SIZE); if (ret >= 0) { *pfd = ret; ret = 0; goto out; } ret = -LIBBPF_ERRNO__LOAD; cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); pr_warning("load bpf program failed: %s\n", cp); if (log_buf && log_buf[0] != '\0') { ret = -LIBBPF_ERRNO__VERIFY; pr_warning("-- BEGIN DUMP LOG ---\n"); pr_warning("\n%s\n", log_buf); pr_warning("-- END LOG --\n"); } else if (load_attr.insns_cnt >= BPF_MAXINSNS) { pr_warning("Program too large (%zu insns), at most %d insns\n", load_attr.insns_cnt, BPF_MAXINSNS); ret = -LIBBPF_ERRNO__PROG2BIG; } else { /* Wrong program type? */ if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) { int fd; load_attr.prog_type = BPF_PROG_TYPE_KPROBE; load_attr.expected_attach_type = 0; fd = bpf_load_program_xattr(&load_attr, NULL, 0); if (fd >= 0) { close(fd); ret = -LIBBPF_ERRNO__PROGTYPE; goto out; } } if (log_buf) ret = -LIBBPF_ERRNO__KVER; } out: free(log_buf); return ret; } int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_version) { int err = 0, fd, i; if (prog->instances.nr < 0 || !prog->instances.fds) { if (prog->preprocessor) { pr_warning("Internal error: can't load program '%s'\n", prog->section_name); return -LIBBPF_ERRNO__INTERNAL; } prog->instances.fds = malloc(sizeof(int)); if (!prog->instances.fds) { pr_warning("Not enough memory for BPF fds\n"); return -ENOMEM; } prog->instances.nr = 1; prog->instances.fds[0] = -1; } if (!prog->preprocessor) { if (prog->instances.nr != 1) { pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n", prog->section_name, prog->instances.nr); } err = load_program(prog, prog->insns, prog->insns_cnt, license, kern_version, &fd); if (!err) prog->instances.fds[0] = fd; goto out; } for (i = 0; i < prog->instances.nr; i++) { struct bpf_prog_prep_result result; bpf_program_prep_t preprocessor = prog->preprocessor; memset(&result, 0, sizeof(result)); err = preprocessor(prog, i, prog->insns, prog->insns_cnt, &result); if (err) { pr_warning("Preprocessing the %dth instance of program '%s' failed\n", i, prog->section_name); goto out; } if (!result.new_insn_ptr || !result.new_insn_cnt) { pr_debug("Skip loading the %dth instance of program '%s'\n", i, prog->section_name); prog->instances.fds[i] = -1; if (result.pfd) *result.pfd = -1; continue; } err = load_program(prog, result.new_insn_ptr, result.new_insn_cnt, license, kern_version, &fd); if (err) { pr_warning("Loading the %dth instance of program '%s' failed\n", i, prog->section_name); goto out; } if (result.pfd) *result.pfd = fd; prog->instances.fds[i] = fd; } out: if (err) pr_warning("failed to load program '%s'\n", prog->section_name); zfree(&prog->insns); prog->insns_cnt = 0; return err; } static bool bpf_program__is_function_storage(struct bpf_program *prog, struct bpf_object *obj) { return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls; } static int bpf_object__load_progs(struct bpf_object *obj) { size_t i; int err; for (i = 0; i < obj->nr_programs; i++) { if (bpf_program__is_function_storage(&obj->programs[i], obj)) continue; err = bpf_program__load(&obj->programs[i], obj->license, obj->kern_version); if (err) return err; } return 0; } static bool bpf_prog_type__needs_kver(enum bpf_prog_type type) { switch (type) { case BPF_PROG_TYPE_SOCKET_FILTER: case BPF_PROG_TYPE_SCHED_CLS: case BPF_PROG_TYPE_SCHED_ACT: case BPF_PROG_TYPE_XDP: case BPF_PROG_TYPE_CGROUP_SKB: case BPF_PROG_TYPE_CGROUP_SOCK: case BPF_PROG_TYPE_LWT_IN: case BPF_PROG_TYPE_LWT_OUT: case BPF_PROG_TYPE_LWT_XMIT: case BPF_PROG_TYPE_LWT_SEG6LOCAL: case BPF_PROG_TYPE_SOCK_OPS: case BPF_PROG_TYPE_SK_SKB: case BPF_PROG_TYPE_CGROUP_DEVICE: case BPF_PROG_TYPE_SK_MSG: case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: case BPF_PROG_TYPE_LIRC_MODE2: case BPF_PROG_TYPE_SK_REUSEPORT: case BPF_PROG_TYPE_FLOW_DISSECTOR: case BPF_PROG_TYPE_UNSPEC: case BPF_PROG_TYPE_TRACEPOINT: case BPF_PROG_TYPE_RAW_TRACEPOINT: case BPF_PROG_TYPE_PERF_EVENT: return false; case BPF_PROG_TYPE_KPROBE: default: return true; } } static int bpf_object__validate(struct bpf_object *obj, bool needs_kver) { if (needs_kver && obj->kern_version == 0) { pr_warning("%s doesn't provide kernel version\n", obj->path); return -LIBBPF_ERRNO__KVERSION; } return 0; } static struct bpf_object * __bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz, bool needs_kver, int flags) { struct bpf_object *obj; int err; if (elf_version(EV_CURRENT) == EV_NONE) { pr_warning("failed to init libelf for %s\n", path); return ERR_PTR(-LIBBPF_ERRNO__LIBELF); } obj = bpf_object__new(path, obj_buf, obj_buf_sz); if (IS_ERR(obj)) return obj; CHECK_ERR(bpf_object__elf_init(obj), err, out); CHECK_ERR(bpf_object__check_endianness(obj), err, out); CHECK_ERR(bpf_object__elf_collect(obj, flags), err, out); CHECK_ERR(bpf_object__collect_reloc(obj), err, out); CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out); bpf_object__elf_finish(obj); return obj; out: bpf_object__close(obj); return ERR_PTR(err); } struct bpf_object *__bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags) { /* param validation */ if (!attr->file) return NULL; pr_debug("loading %s\n", attr->file); return __bpf_object__open(attr->file, NULL, 0, bpf_prog_type__needs_kver(attr->prog_type), flags); } struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr) { return __bpf_object__open_xattr(attr, 0); } struct bpf_object *bpf_object__open(const char *path) { struct bpf_object_open_attr attr = { .file = path, .prog_type = BPF_PROG_TYPE_UNSPEC, }; return bpf_object__open_xattr(&attr); } struct bpf_object *bpf_object__open_buffer(void *obj_buf, size_t obj_buf_sz, const char *name) { char tmp_name[64]; /* param validation */ if (!obj_buf || obj_buf_sz <= 0) return NULL; if (!name) { snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx", (unsigned long)obj_buf, (unsigned long)obj_buf_sz); tmp_name[sizeof(tmp_name) - 1] = '\0'; name = tmp_name; } pr_debug("loading object '%s' from buffer\n", name); return __bpf_object__open(name, obj_buf, obj_buf_sz, true, true); } int bpf_object__unload(struct bpf_object *obj) { size_t i; if (!obj) return -EINVAL; for (i = 0; i < obj->nr_maps; i++) zclose(obj->maps[i].fd); for (i = 0; i < obj->nr_programs; i++) bpf_program__unload(&obj->programs[i]); return 0; } int bpf_object__load(struct bpf_object *obj) { int err; if (!obj) return -EINVAL; if (obj->loaded) { pr_warning("object should not be loaded twice\n"); return -EINVAL; } obj->loaded = true; CHECK_ERR(bpf_object__probe_caps(obj), err, out); CHECK_ERR(bpf_object__create_maps(obj), err, out); CHECK_ERR(bpf_object__relocate(obj), err, out); CHECK_ERR(bpf_object__load_progs(obj), err, out); return 0; out: bpf_object__unload(obj); pr_warning("failed to load object '%s'\n", obj->path); return err; } static int check_path(const char *path) { char *cp, errmsg[STRERR_BUFSIZE]; struct statfs st_fs; char *dname, *dir; int err = 0; if (path == NULL) return -EINVAL; dname = strdup(path); if (dname == NULL) return -ENOMEM; dir = dirname(dname); if (statfs(dir, &st_fs)) { cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); pr_warning("failed to statfs %s: %s\n", dir, cp); err = -errno; } free(dname); if (!err && st_fs.f_type != BPF_FS_MAGIC) { pr_warning("specified path %s is not on BPF FS\n", path); err = -EINVAL; } return err; } int bpf_program__pin_instance(struct bpf_program *prog, const char *path, int instance) { char *cp, errmsg[STRERR_BUFSIZE]; int err; err = check_path(path); if (err) return err; if (prog == NULL) { pr_warning("invalid program pointer\n"); return -EINVAL; } if (instance < 0 || instance >= prog->instances.nr) { pr_warning("invalid prog instance %d of prog %s (max %d)\n", instance, prog->section_name, prog->instances.nr); return -EINVAL; } if (bpf_obj_pin(prog->instances.fds[instance], path)) { cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); pr_warning("failed to pin program: %s\n", cp); return -errno; } pr_debug("pinned program '%s'\n", path); return 0; } int bpf_program__unpin_instance(struct bpf_program *prog, const char *path, int instance) { int err; err = check_path(path); if (err) return err; if (prog == NULL) { pr_warning("invalid program pointer\n"); return -EINVAL; } if (instance < 0 || instance >= prog->instances.nr) { pr_warning("invalid prog instance %d of prog %s (max %d)\n", instance, prog->section_name, prog->instances.nr); return -EINVAL; } err = unlink(path); if (err != 0) return -errno; pr_debug("unpinned program '%s'\n", path); return 0; } static int make_dir(const char *path) { char *cp, errmsg[STRERR_BUFSIZE]; int err = 0; if (mkdir(path, 0700) && errno != EEXIST) err = -errno; if (err) { cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); pr_warning("failed to mkdir %s: %s\n", path, cp); } return err; } int bpf_program__pin(struct bpf_program *prog, const char *path) { int i, err; err = check_path(path); if (err) return err; if (prog == NULL) { pr_warning("invalid program pointer\n"); return -EINVAL; } if (prog->instances.nr <= 0) { pr_warning("no instances of prog %s to pin\n", prog->section_name); return -EINVAL; } if (prog->instances.nr == 1) { /* don't create subdirs when pinning single instance */ return bpf_program__pin_instance(prog, path, 0); } err = make_dir(path); if (err) return err; for (i = 0; i < prog->instances.nr; i++) { char buf[PATH_MAX]; int len; len = snprintf(buf, PATH_MAX, "%s/%d", path, i); if (len < 0) { err = -EINVAL; goto err_unpin; } else if (len >= PATH_MAX) { err = -ENAMETOOLONG; goto err_unpin; } err = bpf_program__pin_instance(prog, buf, i); if (err) goto err_unpin; } return 0; err_unpin: for (i = i - 1; i >= 0; i--) { char buf[PATH_MAX]; int len; len = snprintf(buf, PATH_MAX, "%s/%d", path, i); if (len < 0) continue; else if (len >= PATH_MAX) continue; bpf_program__unpin_instance(prog, buf, i); } rmdir(path); return err; } int bpf_program__unpin(struct bpf_program *prog, const char *path) { int i, err; err = check_path(path); if (err) return err; if (prog == NULL) { pr_warning("invalid program pointer\n"); return -EINVAL; } if (prog->instances.nr <= 0) { pr_warning("no instances of prog %s to pin\n", prog->section_name); return -EINVAL; } if (prog->instances.nr == 1) { /* don't create subdirs when pinning single instance */ return bpf_program__unpin_instance(prog, path, 0); } for (i = 0; i < prog->instances.nr; i++) { char buf[PATH_MAX]; int len; len = snprintf(buf, PATH_MAX, "%s/%d", path, i); if (len < 0) return -EINVAL; else if (len >= PATH_MAX) return -ENAMETOOLONG; err = bpf_program__unpin_instance(prog, buf, i); if (err) return err; } err = rmdir(path); if (err) return -errno; return 0; } int bpf_map__pin(struct bpf_map *map, const char *path) { char *cp, errmsg[STRERR_BUFSIZE]; int err; err = check_path(path); if (err) return err; if (map == NULL) { pr_warning("invalid map pointer\n"); return -EINVAL; } if (bpf_obj_pin(map->fd, path)) { cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); pr_warning("failed to pin map: %s\n", cp); return -errno; } pr_debug("pinned map '%s'\n", path); return 0; } int bpf_map__unpin(struct bpf_map *map, const char *path) { int err; err = check_path(path); if (err) return err; if (map == NULL) { pr_warning("invalid map pointer\n"); return -EINVAL; } err = unlink(path); if (err != 0) return -errno; pr_debug("unpinned map '%s'\n", path); return 0; } int bpf_object__pin_maps(struct bpf_object *obj, const char *path) { struct bpf_map *map; int err; if (!obj) return -ENOENT; if (!obj->loaded) { pr_warning("object not yet loaded; load it first\n"); return -ENOENT; } err = make_dir(path); if (err) return err; bpf_object__for_each_map(map, obj) { char buf[PATH_MAX]; int len; len = snprintf(buf, PATH_MAX, "%s/%s", path, bpf_map__name(map)); if (len < 0) { err = -EINVAL; goto err_unpin_maps; } else if (len >= PATH_MAX) { err = -ENAMETOOLONG; goto err_unpin_maps; } err = bpf_map__pin(map, buf); if (err) goto err_unpin_maps; } return 0; err_unpin_maps: while ((map = bpf_map__prev(map, obj))) { char buf[PATH_MAX]; int len; len = snprintf(buf, PATH_MAX, "%s/%s", path, bpf_map__name(map)); if (len < 0) continue; else if (len >= PATH_MAX) continue; bpf_map__unpin(map, buf); } return err; } int bpf_object__unpin_maps(struct bpf_object *obj, const char *path) { struct bpf_map *map; int err; if (!obj) return -ENOENT; bpf_object__for_each_map(map, obj) { char buf[PATH_MAX]; int len; len = snprintf(buf, PATH_MAX, "%s/%s", path, bpf_map__name(map)); if (len < 0) return -EINVAL; else if (len >= PATH_MAX) return -ENAMETOOLONG; err = bpf_map__unpin(map, buf); if (err) return err; } return 0; } int bpf_object__pin_programs(struct bpf_object *obj, const char *path) { struct bpf_program *prog; int err; if (!obj) return -ENOENT; if (!obj->loaded) { pr_warning("object not yet loaded; load it first\n"); return -ENOENT; } err = make_dir(path); if (err) return err; bpf_object__for_each_program(prog, obj) { char buf[PATH_MAX]; int len; len = snprintf(buf, PATH_MAX, "%s/%s", path, prog->pin_name); if (len < 0) { err = -EINVAL; goto err_unpin_programs; } else if (len >= PATH_MAX) { err = -ENAMETOOLONG; goto err_unpin_programs; } err = bpf_program__pin(prog, buf); if (err) goto err_unpin_programs; } return 0; err_unpin_programs: while ((prog = bpf_program__prev(prog, obj))) { char buf[PATH_MAX]; int len; len = snprintf(buf, PATH_MAX, "%s/%s", path, prog->pin_name); if (len < 0) continue; else if (len >= PATH_MAX) continue; bpf_program__unpin(prog, buf); } return err; } int bpf_object__unpin_programs(struct bpf_object *obj, const char *path) { struct bpf_program *prog; int err; if (!obj) return -ENOENT; bpf_object__for_each_program(prog, obj) { char buf[PATH_MAX]; int len; len = snprintf(buf, PATH_MAX, "%s/%s", path, prog->pin_name); if (len < 0) return -EINVAL; else if (len >= PATH_MAX) return -ENAMETOOLONG; err = bpf_program__unpin(prog, buf); if (err) return err; } return 0; } int bpf_object__pin(struct bpf_object *obj, const char *path) { int err; err = bpf_object__pin_maps(obj, path); if (err) return err; err = bpf_object__pin_programs(obj, path); if (err) { bpf_object__unpin_maps(obj, path); return err; } return 0; } void bpf_object__close(struct bpf_object *obj) { size_t i; if (!obj) return; if (obj->clear_priv) obj->clear_priv(obj, obj->priv); bpf_object__elf_finish(obj); bpf_object__unload(obj); btf__free(obj->btf); btf_ext__free(obj->btf_ext); for (i = 0; i < obj->nr_maps; i++) { zfree(&obj->maps[i].name); if (obj->maps[i].clear_priv) obj->maps[i].clear_priv(&obj->maps[i], obj->maps[i].priv); obj->maps[i].priv = NULL; obj->maps[i].clear_priv = NULL; } zfree(&obj->maps); obj->nr_maps = 0; if (obj->programs && obj->nr_programs) { for (i = 0; i < obj->nr_programs; i++) bpf_program__exit(&obj->programs[i]); } zfree(&obj->programs); list_del(&obj->list); free(obj); } struct bpf_object * bpf_object__next(struct bpf_object *prev) { struct bpf_object *next; if (!prev) next = list_first_entry(&bpf_objects_list, struct bpf_object, list); else next = list_next_entry(prev, list); /* Empty list is noticed here so don't need checking on entry. */ if (&next->list == &bpf_objects_list) return NULL; return next; } const char *bpf_object__name(struct bpf_object *obj) { return obj ? obj->path : ERR_PTR(-EINVAL); } unsigned int bpf_object__kversion(struct bpf_object *obj) { return obj ? obj->kern_version : 0; } struct btf *bpf_object__btf(struct bpf_object *obj) { return obj ? obj->btf : NULL; } int bpf_object__btf_fd(const struct bpf_object *obj) { return obj->btf ? btf__fd(obj->btf) : -1; } int bpf_object__set_priv(struct bpf_object *obj, void *priv, bpf_object_clear_priv_t clear_priv) { if (obj->priv && obj->clear_priv) obj->clear_priv(obj, obj->priv); obj->priv = priv; obj->clear_priv = clear_priv; return 0; } void *bpf_object__priv(struct bpf_object *obj) { return obj ? obj->priv : ERR_PTR(-EINVAL); } static struct bpf_program * __bpf_program__iter(struct bpf_program *p, struct bpf_object *obj, bool forward) { size_t nr_programs = obj->nr_programs; ssize_t idx; if (!nr_programs) return NULL; if (!p) /* Iter from the beginning */ return forward ? &obj->programs[0] : &obj->programs[nr_programs - 1]; if (p->obj != obj) { pr_warning("error: program handler doesn't match object\n"); return NULL; } idx = (p - obj->programs) + (forward ? 1 : -1); if (idx >= obj->nr_programs || idx < 0) return NULL; return &obj->programs[idx]; } struct bpf_program * bpf_program__next(struct bpf_program *prev, struct bpf_object *obj) { struct bpf_program *prog = prev; do { prog = __bpf_program__iter(prog, obj, true); } while (prog && bpf_program__is_function_storage(prog, obj)); return prog; } struct bpf_program * bpf_program__prev(struct bpf_program *next, struct bpf_object *obj) { struct bpf_program *prog = next; do { prog = __bpf_program__iter(prog, obj, false); } while (prog && bpf_program__is_function_storage(prog, obj)); return prog; } int bpf_program__set_priv(struct bpf_program *prog, void *priv, bpf_program_clear_priv_t clear_priv) { if (prog->priv && prog->clear_priv) prog->clear_priv(prog, prog->priv); prog->priv = priv; prog->clear_priv = clear_priv; return 0; } void *bpf_program__priv(struct bpf_program *prog) { return prog ? prog->priv : ERR_PTR(-EINVAL); } void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex) { prog->prog_ifindex = ifindex; } const char *bpf_program__title(struct bpf_program *prog, bool needs_copy) { const char *title; title = prog->section_name; if (needs_copy) { title = strdup(title); if (!title) { pr_warning("failed to strdup program title\n"); return ERR_PTR(-ENOMEM); } } return title; } int bpf_program__fd(struct bpf_program *prog) { return bpf_program__nth_fd(prog, 0); } int bpf_program__set_prep(struct bpf_program *prog, int nr_instances, bpf_program_prep_t prep) { int *instances_fds; if (nr_instances <= 0 || !prep) return -EINVAL; if (prog->instances.nr > 0 || prog->instances.fds) { pr_warning("Can't set pre-processor after loading\n"); return -EINVAL; } instances_fds = malloc(sizeof(int) * nr_instances); if (!instances_fds) { pr_warning("alloc memory failed for fds\n"); return -ENOMEM; } /* fill all fd with -1 */ memset(instances_fds, -1, sizeof(int) * nr_instances); prog->instances.nr = nr_instances; prog->instances.fds = instances_fds; prog->preprocessor = prep; return 0; } int bpf_program__nth_fd(struct bpf_program *prog, int n) { int fd; if (!prog) return -EINVAL; if (n >= prog->instances.nr || n < 0) { pr_warning("Can't get the %dth fd from program %s: only %d instances\n", n, prog->section_name, prog->instances.nr); return -EINVAL; } fd = prog->instances.fds[n]; if (fd < 0) { pr_warning("%dth instance of program '%s' is invalid\n", n, prog->section_name); return -ENOENT; } return fd; } void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type) { prog->type = type; } static bool bpf_program__is_type(struct bpf_program *prog, enum bpf_prog_type type) { return prog ? (prog->type == type) : false; } #define BPF_PROG_TYPE_FNS(NAME, TYPE) \ int bpf_program__set_##NAME(struct bpf_program *prog) \ { \ if (!prog) \ return -EINVAL; \ bpf_program__set_type(prog, TYPE); \ return 0; \ } \ \ bool bpf_program__is_##NAME(struct bpf_program *prog) \ { \ return bpf_program__is_type(prog, TYPE); \ } \ BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER); BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE); BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS); BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT); BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT); BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT); BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP); BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT); void bpf_program__set_expected_attach_type(struct bpf_program *prog, enum bpf_attach_type type) { prog->expected_attach_type = type; } #define BPF_PROG_SEC_IMPL(string, ptype, eatype, is_attachable, atype) \ { string, sizeof(string) - 1, ptype, eatype, is_attachable, atype } /* Programs that can NOT be attached. */ #define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0) /* Programs that can be attached. */ #define BPF_APROG_SEC(string, ptype, atype) \ BPF_PROG_SEC_IMPL(string, ptype, 0, 1, atype) /* Programs that must specify expected attach type at load time. */ #define BPF_EAPROG_SEC(string, ptype, eatype) \ BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, eatype) /* Programs that can be attached but attach type can't be identified by section * name. Kept for backward compatibility. */ #define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype) static const struct { const char *sec; size_t len; enum bpf_prog_type prog_type; enum bpf_attach_type expected_attach_type; int is_attachable; enum bpf_attach_type attach_type; } section_names[] = { BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER), BPF_PROG_SEC("kprobe/", BPF_PROG_TYPE_KPROBE), BPF_PROG_SEC("kretprobe/", BPF_PROG_TYPE_KPROBE), BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS), BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT), BPF_PROG_SEC("tracepoint/", BPF_PROG_TYPE_TRACEPOINT), BPF_PROG_SEC("raw_tracepoint/", BPF_PROG_TYPE_RAW_TRACEPOINT), BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP), BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT), BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN), BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT), BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT), BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL), BPF_APROG_SEC("cgroup_skb/ingress", BPF_PROG_TYPE_CGROUP_SKB, BPF_CGROUP_INET_INGRESS), BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB, BPF_CGROUP_INET_EGRESS), BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB), BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE), BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND), BPF_EAPROG_SEC("cgroup/post_bind6", BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET6_POST_BIND), BPF_APROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE, BPF_CGROUP_DEVICE), BPF_APROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS, BPF_CGROUP_SOCK_OPS), BPF_APROG_SEC("sk_skb/stream_parser", BPF_PROG_TYPE_SK_SKB, BPF_SK_SKB_STREAM_PARSER), BPF_APROG_SEC("sk_skb/stream_verdict", BPF_PROG_TYPE_SK_SKB, BPF_SK_SKB_STREAM_VERDICT), BPF_APROG_COMPAT("sk_skb", BPF_PROG_TYPE_SK_SKB), BPF_APROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG, BPF_SK_MSG_VERDICT), BPF_APROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2, BPF_LIRC_MODE2), BPF_APROG_SEC("flow_dissector", BPF_PROG_TYPE_FLOW_DISSECTOR, BPF_FLOW_DISSECTOR), BPF_EAPROG_SEC("cgroup/bind4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND), BPF_EAPROG_SEC("cgroup/bind6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_BIND), BPF_EAPROG_SEC("cgroup/connect4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_CONNECT), BPF_EAPROG_SEC("cgroup/connect6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_CONNECT), BPF_EAPROG_SEC("cgroup/sendmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_SENDMSG), BPF_EAPROG_SEC("cgroup/sendmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG), }; #undef BPF_PROG_SEC_IMPL #undef BPF_PROG_SEC #undef BPF_APROG_SEC #undef BPF_EAPROG_SEC #undef BPF_APROG_COMPAT #define MAX_TYPE_NAME_SIZE 32 static char *libbpf_get_type_names(bool attach_type) { int i, len = ARRAY_SIZE(section_names) * MAX_TYPE_NAME_SIZE; char *buf; buf = malloc(len); if (!buf) return NULL; buf[0] = '\0'; /* Forge string buf with all available names */ for (i = 0; i < ARRAY_SIZE(section_names); i++) { if (attach_type && !section_names[i].is_attachable) continue; if (strlen(buf) + strlen(section_names[i].sec) + 2 > len) { free(buf); return NULL; } strcat(buf, " "); strcat(buf, section_names[i].sec); } return buf; } int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, enum bpf_attach_type *expected_attach_type) { char *type_names; int i; if (!name) return -EINVAL; for (i = 0; i < ARRAY_SIZE(section_names); i++) { if (strncmp(name, section_names[i].sec, section_names[i].len)) continue; *prog_type = section_names[i].prog_type; *expected_attach_type = section_names[i].expected_attach_type; return 0; } pr_warning("failed to guess program type based on ELF section name '%s'\n", name); type_names = libbpf_get_type_names(false); if (type_names != NULL) { pr_info("supported section(type) names are:%s\n", type_names); free(type_names); } return -EINVAL; } int libbpf_attach_type_by_name(const char *name, enum bpf_attach_type *attach_type) { char *type_names; int i; if (!name) return -EINVAL; for (i = 0; i < ARRAY_SIZE(section_names); i++) { if (strncmp(name, section_names[i].sec, section_names[i].len)) continue; if (!section_names[i].is_attachable) return -EINVAL; *attach_type = section_names[i].attach_type; return 0; } pr_warning("failed to guess attach type based on ELF section name '%s'\n", name); type_names = libbpf_get_type_names(true); if (type_names != NULL) { pr_info("attachable section(type) names are:%s\n", type_names); free(type_names); } return -EINVAL; } static int bpf_program__identify_section(struct bpf_program *prog, enum bpf_prog_type *prog_type, enum bpf_attach_type *expected_attach_type) { return libbpf_prog_type_by_name(prog->section_name, prog_type, expected_attach_type); } int bpf_map__fd(struct bpf_map *map) { return map ? map->fd : -EINVAL; } const struct bpf_map_def *bpf_map__def(struct bpf_map *map) { return map ? &map->def : ERR_PTR(-EINVAL); } const char *bpf_map__name(struct bpf_map *map) { return map ? map->name : NULL; } __u32 bpf_map__btf_key_type_id(const struct bpf_map *map) { return map ? map->btf_key_type_id : 0; } __u32 bpf_map__btf_value_type_id(const struct bpf_map *map) { return map ? map->btf_value_type_id : 0; } int bpf_map__set_priv(struct bpf_map *map, void *priv, bpf_map_clear_priv_t clear_priv) { if (!map) return -EINVAL; if (map->priv) { if (map->clear_priv) map->clear_priv(map, map->priv); } map->priv = priv; map->clear_priv = clear_priv; return 0; } void *bpf_map__priv(struct bpf_map *map) { return map ? map->priv : ERR_PTR(-EINVAL); } bool bpf_map__is_offload_neutral(struct bpf_map *map) { return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; } void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex) { map->map_ifindex = ifindex; } int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd) { if (!bpf_map_type__is_map_in_map(map->def.type)) { pr_warning("error: unsupported map type\n"); return -EINVAL; } if (map->inner_map_fd != -1) { pr_warning("error: inner_map_fd already specified\n"); return -EINVAL; } map->inner_map_fd = fd; return 0; } static struct bpf_map * __bpf_map__iter(struct bpf_map *m, struct bpf_object *obj, int i) { ssize_t idx; struct bpf_map *s, *e; if (!obj || !obj->maps) return NULL; s = obj->maps; e = obj->maps + obj->nr_maps; if ((m < s) || (m >= e)) { pr_warning("error in %s: map handler doesn't belong to object\n", __func__); return NULL; } idx = (m - obj->maps) + i; if (idx >= obj->nr_maps || idx < 0) return NULL; return &obj->maps[idx]; } struct bpf_map * bpf_map__next(struct bpf_map *prev, struct bpf_object *obj) { if (prev == NULL) return obj->maps; return __bpf_map__iter(prev, obj, 1); } struct bpf_map * bpf_map__prev(struct bpf_map *next, struct bpf_object *obj) { if (next == NULL) { if (!obj->nr_maps) return NULL; return obj->maps + obj->nr_maps - 1; } return __bpf_map__iter(next, obj, -1); } struct bpf_map * bpf_object__find_map_by_name(struct bpf_object *obj, const char *name) { struct bpf_map *pos; bpf_object__for_each_map(pos, obj) { if (pos->name && !strcmp(pos->name, name)) return pos; } return NULL; } int bpf_object__find_map_fd_by_name(struct bpf_object *obj, const char *name) { return bpf_map__fd(bpf_object__find_map_by_name(obj, name)); } struct bpf_map * bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset) { int i; for (i = 0; i < obj->nr_maps; i++) { if (obj->maps[i].offset == offset) return &obj->maps[i]; } return ERR_PTR(-ENOENT); } long libbpf_get_error(const void *ptr) { if (IS_ERR(ptr)) return PTR_ERR(ptr); return 0; } int bpf_prog_load(const char *file, enum bpf_prog_type type, struct bpf_object **pobj, int *prog_fd) { struct bpf_prog_load_attr attr; memset(&attr, 0, sizeof(struct bpf_prog_load_attr)); attr.file = file; attr.prog_type = type; attr.expected_attach_type = 0; return bpf_prog_load_xattr(&attr, pobj, prog_fd); } int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, struct bpf_object **pobj, int *prog_fd) { struct bpf_object_open_attr open_attr = { .file = attr->file, .prog_type = attr->prog_type, }; struct bpf_program *prog, *first_prog = NULL; enum bpf_attach_type expected_attach_type; enum bpf_prog_type prog_type; struct bpf_object *obj; struct bpf_map *map; int err; if (!attr) return -EINVAL; if (!attr->file) return -EINVAL; obj = bpf_object__open_xattr(&open_attr); if (IS_ERR_OR_NULL(obj)) return -ENOENT; bpf_object__for_each_program(prog, obj) { /* * If type is not specified, try to guess it based on * section name. */ prog_type = attr->prog_type; prog->prog_ifindex = attr->ifindex; expected_attach_type = attr->expected_attach_type; if (prog_type == BPF_PROG_TYPE_UNSPEC) { err = bpf_program__identify_section(prog, &prog_type, &expected_attach_type); if (err < 0) { bpf_object__close(obj); return -EINVAL; } } bpf_program__set_type(prog, prog_type); bpf_program__set_expected_attach_type(prog, expected_attach_type); if (!first_prog) first_prog = prog; } bpf_object__for_each_map(map, obj) { if (!bpf_map__is_offload_neutral(map)) map->map_ifindex = attr->ifindex; } if (!first_prog) { pr_warning("object file doesn't contain bpf program\n"); bpf_object__close(obj); return -ENOENT; } err = bpf_object__load(obj); if (err) { bpf_object__close(obj); return -EINVAL; } *pobj = obj; *prog_fd = bpf_program__fd(first_prog); return 0; } enum bpf_perf_event_ret bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size, void **copy_mem, size_t *copy_size, bpf_perf_event_print_t fn, void *private_data) { struct perf_event_mmap_page *header = mmap_mem; __u64 data_head = ring_buffer_read_head(header); __u64 data_tail = header->data_tail; void *base = ((__u8 *)header) + page_size; int ret = LIBBPF_PERF_EVENT_CONT; struct perf_event_header *ehdr; size_t ehdr_size; while (data_head != data_tail) { ehdr = base + (data_tail & (mmap_size - 1)); ehdr_size = ehdr->size; if (((void *)ehdr) + ehdr_size > base + mmap_size) { void *copy_start = ehdr; size_t len_first = base + mmap_size - copy_start; size_t len_secnd = ehdr_size - len_first; if (*copy_size < ehdr_size) { free(*copy_mem); *copy_mem = malloc(ehdr_size); if (!*copy_mem) { *copy_size = 0; ret = LIBBPF_PERF_EVENT_ERROR; break; } *copy_size = ehdr_size; } memcpy(*copy_mem, copy_start, len_first); memcpy(*copy_mem + len_first, base, len_secnd); ehdr = *copy_mem; } ret = fn(ehdr, private_data); data_tail += ehdr_size; if (ret != LIBBPF_PERF_EVENT_CONT) break; } ring_buffer_write_tail(header, data_tail); return ret; } struct bpf_prog_info_array_desc { int array_offset; /* e.g. offset of jited_prog_insns */ int count_offset; /* e.g. offset of jited_prog_len */ int size_offset; /* > 0: offset of rec size, * < 0: fix size of -size_offset */ }; static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = { [BPF_PROG_INFO_JITED_INSNS] = { offsetof(struct bpf_prog_info, jited_prog_insns), offsetof(struct bpf_prog_info, jited_prog_len), -1, }, [BPF_PROG_INFO_XLATED_INSNS] = { offsetof(struct bpf_prog_info, xlated_prog_insns), offsetof(struct bpf_prog_info, xlated_prog_len), -1, }, [BPF_PROG_INFO_MAP_IDS] = { offsetof(struct bpf_prog_info, map_ids), offsetof(struct bpf_prog_info, nr_map_ids), -(int)sizeof(__u32), }, [BPF_PROG_INFO_JITED_KSYMS] = { offsetof(struct bpf_prog_info, jited_ksyms), offsetof(struct bpf_prog_info, nr_jited_ksyms), -(int)sizeof(__u64), }, [BPF_PROG_INFO_JITED_FUNC_LENS] = { offsetof(struct bpf_prog_info, jited_func_lens), offsetof(struct bpf_prog_info, nr_jited_func_lens), -(int)sizeof(__u32), }, [BPF_PROG_INFO_FUNC_INFO] = { offsetof(struct bpf_prog_info, func_info), offsetof(struct bpf_prog_info, nr_func_info), offsetof(struct bpf_prog_info, func_info_rec_size), }, [BPF_PROG_INFO_LINE_INFO] = { offsetof(struct bpf_prog_info, line_info), offsetof(struct bpf_prog_info, nr_line_info), offsetof(struct bpf_prog_info, line_info_rec_size), }, [BPF_PROG_INFO_JITED_LINE_INFO] = { offsetof(struct bpf_prog_info, jited_line_info), offsetof(struct bpf_prog_info, nr_jited_line_info), offsetof(struct bpf_prog_info, jited_line_info_rec_size), }, [BPF_PROG_INFO_PROG_TAGS] = { offsetof(struct bpf_prog_info, prog_tags), offsetof(struct bpf_prog_info, nr_prog_tags), -(int)sizeof(__u8) * BPF_TAG_SIZE, }, }; static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info, int offset) { __u32 *array = (__u32 *)info; if (offset >= 0) return array[offset / sizeof(__u32)]; return -(int)offset; } static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info, int offset) { __u64 *array = (__u64 *)info; if (offset >= 0) return array[offset / sizeof(__u64)]; return -(int)offset; } static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset, __u32 val) { __u32 *array = (__u32 *)info; if (offset >= 0) array[offset / sizeof(__u32)] = val; } static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset, __u64 val) { __u64 *array = (__u64 *)info; if (offset >= 0) array[offset / sizeof(__u64)] = val; } struct bpf_prog_info_linear * bpf_program__get_prog_info_linear(int fd, __u64 arrays) { struct bpf_prog_info_linear *info_linear; struct bpf_prog_info info = {}; __u32 info_len = sizeof(info); __u32 data_len = 0; int i, err; void *ptr; if (arrays >> BPF_PROG_INFO_LAST_ARRAY) return ERR_PTR(-EINVAL); /* step 1: get array dimensions */ err = bpf_obj_get_info_by_fd(fd, &info, &info_len); if (err) { pr_debug("can't get prog info: %s", strerror(errno)); return ERR_PTR(-EFAULT); } /* step 2: calculate total size of all arrays */ for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { bool include_array = (arrays & (1UL << i)) > 0; struct bpf_prog_info_array_desc *desc; __u32 count, size; desc = bpf_prog_info_array_desc + i; /* kernel is too old to support this field */ if (info_len < desc->array_offset + sizeof(__u32) || info_len < desc->count_offset + sizeof(__u32) || (desc->size_offset > 0 && info_len < desc->size_offset)) include_array = false; if (!include_array) { arrays &= ~(1UL << i); /* clear the bit */ continue; } count = bpf_prog_info_read_offset_u32(&info, desc->count_offset); size = bpf_prog_info_read_offset_u32(&info, desc->size_offset); data_len += count * size; } /* step 3: allocate continuous memory */ data_len = roundup(data_len, sizeof(__u64)); info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len); if (!info_linear) return ERR_PTR(-ENOMEM); /* step 4: fill data to info_linear->info */ info_linear->arrays = arrays; memset(&info_linear->info, 0, sizeof(info)); ptr = info_linear->data; for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { struct bpf_prog_info_array_desc *desc; __u32 count, size; if ((arrays & (1UL << i)) == 0) continue; desc = bpf_prog_info_array_desc + i; count = bpf_prog_info_read_offset_u32(&info, desc->count_offset); size = bpf_prog_info_read_offset_u32(&info, desc->size_offset); bpf_prog_info_set_offset_u32(&info_linear->info, desc->count_offset, count); bpf_prog_info_set_offset_u32(&info_linear->info, desc->size_offset, size); bpf_prog_info_set_offset_u64(&info_linear->info, desc->array_offset, ptr_to_u64(ptr)); ptr += count * size; } /* step 5: call syscall again to get required arrays */ err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len); if (err) { pr_debug("can't get prog info: %s", strerror(errno)); free(info_linear); return ERR_PTR(-EFAULT); } /* step 6: verify the data */ for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { struct bpf_prog_info_array_desc *desc; __u32 v1, v2; if ((arrays & (1UL << i)) == 0) continue; desc = bpf_prog_info_array_desc + i; v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset); v2 = bpf_prog_info_read_offset_u32(&info_linear->info, desc->count_offset); if (v1 != v2) pr_warning("%s: mismatch in element count\n", __func__); v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset); v2 = bpf_prog_info_read_offset_u32(&info_linear->info, desc->size_offset); if (v1 != v2) pr_warning("%s: mismatch in rec size\n", __func__); } /* step 7: update info_len and data_len */ info_linear->info_len = sizeof(struct bpf_prog_info); info_linear->data_len = data_len; return info_linear; } void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear) { int i; for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { struct bpf_prog_info_array_desc *desc; __u64 addr, offs; if ((info_linear->arrays & (1UL << i)) == 0) continue; desc = bpf_prog_info_array_desc + i; addr = bpf_prog_info_read_offset_u64(&info_linear->info, desc->array_offset); offs = addr - ptr_to_u64(info_linear->data); bpf_prog_info_set_offset_u64(&info_linear->info, desc->array_offset, offs); } } void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear) { int i; for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { struct bpf_prog_info_array_desc *desc; __u64 addr, offs; if ((info_linear->arrays & (1UL << i)) == 0) continue; desc = bpf_prog_info_array_desc + i; offs = bpf_prog_info_read_offset_u64(&info_linear->info, desc->array_offset); addr = offs + ptr_to_u64(info_linear->data); bpf_prog_info_set_offset_u64(&info_linear->info, desc->array_offset, addr); } } dwarves-dfsg-1.15/lib/bpf/src/libbpf.h000066400000000000000000000401061344730411300175500ustar00rootroot00000000000000/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ /* * Common eBPF ELF object loading operations. * * Copyright (C) 2013-2015 Alexei Starovoitov * Copyright (C) 2015 Wang Nan * Copyright (C) 2015 Huawei Inc. */ #ifndef __LIBBPF_LIBBPF_H #define __LIBBPF_LIBBPF_H #include #include #include #include #include // for size_t #include #ifdef __cplusplus extern "C" { #endif #ifndef LIBBPF_API #define LIBBPF_API __attribute__((visibility("default"))) #endif enum libbpf_errno { __LIBBPF_ERRNO__START = 4000, /* Something wrong in libelf */ LIBBPF_ERRNO__LIBELF = __LIBBPF_ERRNO__START, LIBBPF_ERRNO__FORMAT, /* BPF object format invalid */ LIBBPF_ERRNO__KVERSION, /* Incorrect or no 'version' section */ LIBBPF_ERRNO__ENDIAN, /* Endian mismatch */ LIBBPF_ERRNO__INTERNAL, /* Internal error in libbpf */ LIBBPF_ERRNO__RELOC, /* Relocation failed */ LIBBPF_ERRNO__LOAD, /* Load program failure for unknown reason */ LIBBPF_ERRNO__VERIFY, /* Kernel verifier blocks program loading */ LIBBPF_ERRNO__PROG2BIG, /* Program too big */ LIBBPF_ERRNO__KVER, /* Incorrect kernel version */ LIBBPF_ERRNO__PROGTYPE, /* Kernel doesn't support this program type */ LIBBPF_ERRNO__WRNGPID, /* Wrong pid in netlink message */ LIBBPF_ERRNO__INVSEQ, /* Invalid netlink sequence */ LIBBPF_ERRNO__NLPARSE, /* netlink parsing error */ __LIBBPF_ERRNO__END, }; LIBBPF_API int libbpf_strerror(int err, char *buf, size_t size); enum libbpf_print_level { LIBBPF_WARN, LIBBPF_INFO, LIBBPF_DEBUG, }; typedef int (*libbpf_print_fn_t)(enum libbpf_print_level level, const char *, va_list ap); LIBBPF_API void libbpf_set_print(libbpf_print_fn_t fn); /* Hide internal to user */ struct bpf_object; struct bpf_object_open_attr { const char *file; enum bpf_prog_type prog_type; }; LIBBPF_API struct bpf_object *bpf_object__open(const char *path); LIBBPF_API struct bpf_object * bpf_object__open_xattr(struct bpf_object_open_attr *attr); struct bpf_object *__bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags); LIBBPF_API struct bpf_object *bpf_object__open_buffer(void *obj_buf, size_t obj_buf_sz, const char *name); LIBBPF_API int bpf_object__pin_maps(struct bpf_object *obj, const char *path); LIBBPF_API int bpf_object__unpin_maps(struct bpf_object *obj, const char *path); LIBBPF_API int bpf_object__pin_programs(struct bpf_object *obj, const char *path); LIBBPF_API int bpf_object__unpin_programs(struct bpf_object *obj, const char *path); LIBBPF_API int bpf_object__pin(struct bpf_object *object, const char *path); LIBBPF_API void bpf_object__close(struct bpf_object *object); /* Load/unload object into/from kernel */ LIBBPF_API int bpf_object__load(struct bpf_object *obj); LIBBPF_API int bpf_object__unload(struct bpf_object *obj); LIBBPF_API const char *bpf_object__name(struct bpf_object *obj); LIBBPF_API unsigned int bpf_object__kversion(struct bpf_object *obj); struct btf; LIBBPF_API struct btf *bpf_object__btf(struct bpf_object *obj); LIBBPF_API int bpf_object__btf_fd(const struct bpf_object *obj); LIBBPF_API struct bpf_program * bpf_object__find_program_by_title(struct bpf_object *obj, const char *title); LIBBPF_API struct bpf_object *bpf_object__next(struct bpf_object *prev); #define bpf_object__for_each_safe(pos, tmp) \ for ((pos) = bpf_object__next(NULL), \ (tmp) = bpf_object__next(pos); \ (pos) != NULL; \ (pos) = (tmp), (tmp) = bpf_object__next(tmp)) typedef void (*bpf_object_clear_priv_t)(struct bpf_object *, void *); LIBBPF_API int bpf_object__set_priv(struct bpf_object *obj, void *priv, bpf_object_clear_priv_t clear_priv); LIBBPF_API void *bpf_object__priv(struct bpf_object *prog); LIBBPF_API int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, enum bpf_attach_type *expected_attach_type); LIBBPF_API int libbpf_attach_type_by_name(const char *name, enum bpf_attach_type *attach_type); /* Accessors of bpf_program */ struct bpf_program; LIBBPF_API struct bpf_program *bpf_program__next(struct bpf_program *prog, struct bpf_object *obj); #define bpf_object__for_each_program(pos, obj) \ for ((pos) = bpf_program__next(NULL, (obj)); \ (pos) != NULL; \ (pos) = bpf_program__next((pos), (obj))) LIBBPF_API struct bpf_program *bpf_program__prev(struct bpf_program *prog, struct bpf_object *obj); typedef void (*bpf_program_clear_priv_t)(struct bpf_program *, void *); LIBBPF_API int bpf_program__set_priv(struct bpf_program *prog, void *priv, bpf_program_clear_priv_t clear_priv); LIBBPF_API void *bpf_program__priv(struct bpf_program *prog); LIBBPF_API void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex); LIBBPF_API const char *bpf_program__title(struct bpf_program *prog, bool needs_copy); LIBBPF_API int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_version); LIBBPF_API int bpf_program__fd(struct bpf_program *prog); LIBBPF_API int bpf_program__pin_instance(struct bpf_program *prog, const char *path, int instance); LIBBPF_API int bpf_program__unpin_instance(struct bpf_program *prog, const char *path, int instance); LIBBPF_API int bpf_program__pin(struct bpf_program *prog, const char *path); LIBBPF_API int bpf_program__unpin(struct bpf_program *prog, const char *path); LIBBPF_API void bpf_program__unload(struct bpf_program *prog); struct bpf_insn; /* * Libbpf allows callers to adjust BPF programs before being loaded * into kernel. One program in an object file can be transformed into * multiple variants to be attached to different hooks. * * bpf_program_prep_t, bpf_program__set_prep and bpf_program__nth_fd * form an API for this purpose. * * - bpf_program_prep_t: * Defines a 'preprocessor', which is a caller defined function * passed to libbpf through bpf_program__set_prep(), and will be * called before program is loaded. The processor should adjust * the program one time for each instance according to the instance id * passed to it. * * - bpf_program__set_prep: * Attaches a preprocessor to a BPF program. The number of instances * that should be created is also passed through this function. * * - bpf_program__nth_fd: * After the program is loaded, get resulting FD of a given instance * of the BPF program. * * If bpf_program__set_prep() is not used, the program would be loaded * without adjustment during bpf_object__load(). The program has only * one instance. In this case bpf_program__fd(prog) is equal to * bpf_program__nth_fd(prog, 0). */ struct bpf_prog_prep_result { /* * If not NULL, load new instruction array. * If set to NULL, don't load this instance. */ struct bpf_insn *new_insn_ptr; int new_insn_cnt; /* If not NULL, result FD is written to it. */ int *pfd; }; /* * Parameters of bpf_program_prep_t: * - prog: The bpf_program being loaded. * - n: Index of instance being generated. * - insns: BPF instructions array. * - insns_cnt:Number of instructions in insns. * - res: Output parameter, result of transformation. * * Return value: * - Zero: pre-processing success. * - Non-zero: pre-processing error, stop loading. */ typedef int (*bpf_program_prep_t)(struct bpf_program *prog, int n, struct bpf_insn *insns, int insns_cnt, struct bpf_prog_prep_result *res); LIBBPF_API int bpf_program__set_prep(struct bpf_program *prog, int nr_instance, bpf_program_prep_t prep); LIBBPF_API int bpf_program__nth_fd(struct bpf_program *prog, int n); /* * Adjust type of BPF program. Default is kprobe. */ LIBBPF_API int bpf_program__set_socket_filter(struct bpf_program *prog); LIBBPF_API int bpf_program__set_tracepoint(struct bpf_program *prog); LIBBPF_API int bpf_program__set_raw_tracepoint(struct bpf_program *prog); LIBBPF_API int bpf_program__set_kprobe(struct bpf_program *prog); LIBBPF_API int bpf_program__set_sched_cls(struct bpf_program *prog); LIBBPF_API int bpf_program__set_sched_act(struct bpf_program *prog); LIBBPF_API int bpf_program__set_xdp(struct bpf_program *prog); LIBBPF_API int bpf_program__set_perf_event(struct bpf_program *prog); LIBBPF_API void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type); LIBBPF_API void bpf_program__set_expected_attach_type(struct bpf_program *prog, enum bpf_attach_type type); LIBBPF_API bool bpf_program__is_socket_filter(struct bpf_program *prog); LIBBPF_API bool bpf_program__is_tracepoint(struct bpf_program *prog); LIBBPF_API bool bpf_program__is_raw_tracepoint(struct bpf_program *prog); LIBBPF_API bool bpf_program__is_kprobe(struct bpf_program *prog); LIBBPF_API bool bpf_program__is_sched_cls(struct bpf_program *prog); LIBBPF_API bool bpf_program__is_sched_act(struct bpf_program *prog); LIBBPF_API bool bpf_program__is_xdp(struct bpf_program *prog); LIBBPF_API bool bpf_program__is_perf_event(struct bpf_program *prog); /* * No need for __attribute__((packed)), all members of 'bpf_map_def' * are all aligned. In addition, using __attribute__((packed)) * would trigger a -Wpacked warning message, and lead to an error * if -Werror is set. */ struct bpf_map_def { unsigned int type; unsigned int key_size; unsigned int value_size; unsigned int max_entries; unsigned int map_flags; }; /* * The 'struct bpf_map' in include/linux/bpf.h is internal to the kernel, * so no need to worry about a name clash. */ struct bpf_map; LIBBPF_API struct bpf_map * bpf_object__find_map_by_name(struct bpf_object *obj, const char *name); LIBBPF_API int bpf_object__find_map_fd_by_name(struct bpf_object *obj, const char *name); /* * Get bpf_map through the offset of corresponding struct bpf_map_def * in the BPF object file. */ LIBBPF_API struct bpf_map * bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset); LIBBPF_API struct bpf_map * bpf_map__next(struct bpf_map *map, struct bpf_object *obj); #define bpf_object__for_each_map(pos, obj) \ for ((pos) = bpf_map__next(NULL, (obj)); \ (pos) != NULL; \ (pos) = bpf_map__next((pos), (obj))) #define bpf_map__for_each bpf_object__for_each_map LIBBPF_API struct bpf_map * bpf_map__prev(struct bpf_map *map, struct bpf_object *obj); LIBBPF_API int bpf_map__fd(struct bpf_map *map); LIBBPF_API const struct bpf_map_def *bpf_map__def(struct bpf_map *map); LIBBPF_API const char *bpf_map__name(struct bpf_map *map); LIBBPF_API __u32 bpf_map__btf_key_type_id(const struct bpf_map *map); LIBBPF_API __u32 bpf_map__btf_value_type_id(const struct bpf_map *map); typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *); LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv, bpf_map_clear_priv_t clear_priv); LIBBPF_API void *bpf_map__priv(struct bpf_map *map); LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd); LIBBPF_API int bpf_map__resize(struct bpf_map *map, __u32 max_entries); LIBBPF_API bool bpf_map__is_offload_neutral(struct bpf_map *map); LIBBPF_API void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex); LIBBPF_API int bpf_map__pin(struct bpf_map *map, const char *path); LIBBPF_API int bpf_map__unpin(struct bpf_map *map, const char *path); LIBBPF_API int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd); LIBBPF_API long libbpf_get_error(const void *ptr); struct bpf_prog_load_attr { const char *file; enum bpf_prog_type prog_type; enum bpf_attach_type expected_attach_type; int ifindex; }; LIBBPF_API int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, struct bpf_object **pobj, int *prog_fd); LIBBPF_API int bpf_prog_load(const char *file, enum bpf_prog_type type, struct bpf_object **pobj, int *prog_fd); LIBBPF_API int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags); LIBBPF_API int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags); enum bpf_perf_event_ret { LIBBPF_PERF_EVENT_DONE = 0, LIBBPF_PERF_EVENT_ERROR = -1, LIBBPF_PERF_EVENT_CONT = -2, }; struct perf_event_header; typedef enum bpf_perf_event_ret (*bpf_perf_event_print_t)(struct perf_event_header *hdr, void *private_data); LIBBPF_API enum bpf_perf_event_ret bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size, void **copy_mem, size_t *copy_size, bpf_perf_event_print_t fn, void *private_data); struct nlattr; typedef int (*libbpf_dump_nlmsg_t)(void *cookie, void *msg, struct nlattr **tb); int libbpf_netlink_open(unsigned int *nl_pid); int libbpf_nl_get_link(int sock, unsigned int nl_pid, libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie); int libbpf_nl_get_class(int sock, unsigned int nl_pid, int ifindex, libbpf_dump_nlmsg_t dump_class_nlmsg, void *cookie); int libbpf_nl_get_qdisc(int sock, unsigned int nl_pid, int ifindex, libbpf_dump_nlmsg_t dump_qdisc_nlmsg, void *cookie); int libbpf_nl_get_filter(int sock, unsigned int nl_pid, int ifindex, int handle, libbpf_dump_nlmsg_t dump_filter_nlmsg, void *cookie); struct bpf_prog_linfo; struct bpf_prog_info; LIBBPF_API void bpf_prog_linfo__free(struct bpf_prog_linfo *prog_linfo); LIBBPF_API struct bpf_prog_linfo * bpf_prog_linfo__new(const struct bpf_prog_info *info); LIBBPF_API const struct bpf_line_info * bpf_prog_linfo__lfind_addr_func(const struct bpf_prog_linfo *prog_linfo, __u64 addr, __u32 func_idx, __u32 nr_skip); LIBBPF_API const struct bpf_line_info * bpf_prog_linfo__lfind(const struct bpf_prog_linfo *prog_linfo, __u32 insn_off, __u32 nr_skip); /* * Probe for supported system features * * Note that running many of these probes in a short amount of time can cause * the kernel to reach the maximal size of lockable memory allowed for the * user, causing subsequent probes to fail. In this case, the caller may want * to adjust that limit with setrlimit(). */ LIBBPF_API bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex); LIBBPF_API bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex); LIBBPF_API bool bpf_probe_helper(enum bpf_func_id id, enum bpf_prog_type prog_type, __u32 ifindex); /* * Get bpf_prog_info in continuous memory * * struct bpf_prog_info has multiple arrays. The user has option to choose * arrays to fetch from kernel. The following APIs provide an uniform way to * fetch these data. All arrays in bpf_prog_info are stored in a single * continuous memory region. This makes it easy to store the info in a * file. * * Before writing bpf_prog_info_linear to files, it is necessary to * translate pointers in bpf_prog_info to offsets. Helper functions * bpf_program__bpil_addr_to_offs() and bpf_program__bpil_offs_to_addr() * are introduced to switch between pointers and offsets. * * Examples: * # To fetch map_ids and prog_tags: * __u64 arrays = (1UL << BPF_PROG_INFO_MAP_IDS) | * (1UL << BPF_PROG_INFO_PROG_TAGS); * struct bpf_prog_info_linear *info_linear = * bpf_program__get_prog_info_linear(fd, arrays); * * # To save data in file * bpf_program__bpil_addr_to_offs(info_linear); * write(f, info_linear, sizeof(*info_linear) + info_linear->data_len); * * # To read data from file * read(f, info_linear, ); * bpf_program__bpil_offs_to_addr(info_linear); */ enum bpf_prog_info_array { BPF_PROG_INFO_FIRST_ARRAY = 0, BPF_PROG_INFO_JITED_INSNS = 0, BPF_PROG_INFO_XLATED_INSNS, BPF_PROG_INFO_MAP_IDS, BPF_PROG_INFO_JITED_KSYMS, BPF_PROG_INFO_JITED_FUNC_LENS, BPF_PROG_INFO_FUNC_INFO, BPF_PROG_INFO_LINE_INFO, BPF_PROG_INFO_JITED_LINE_INFO, BPF_PROG_INFO_PROG_TAGS, BPF_PROG_INFO_LAST_ARRAY, }; struct bpf_prog_info_linear { /* size of struct bpf_prog_info, when the tool is compiled */ __u32 info_len; /* total bytes allocated for data, round up to 8 bytes */ __u32 data_len; /* which arrays are included in data */ __u64 arrays; struct bpf_prog_info info; __u8 data[]; }; LIBBPF_API struct bpf_prog_info_linear * bpf_program__get_prog_info_linear(int fd, __u64 arrays); LIBBPF_API void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear); LIBBPF_API void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear); #ifdef __cplusplus } /* extern "C" */ #endif #endif /* __LIBBPF_LIBBPF_H */ dwarves-dfsg-1.15/lib/bpf/src/libbpf.map000066400000000000000000000071011344730411300200740ustar00rootroot00000000000000LIBBPF_0.0.1 { global: bpf_btf_get_fd_by_id; bpf_create_map; bpf_create_map_in_map; bpf_create_map_in_map_node; bpf_create_map_name; bpf_create_map_node; bpf_create_map_xattr; bpf_load_btf; bpf_load_program; bpf_load_program_xattr; bpf_map__btf_key_type_id; bpf_map__btf_value_type_id; bpf_map__def; bpf_map__fd; bpf_map__is_offload_neutral; bpf_map__name; bpf_map__next; bpf_map__pin; bpf_map__prev; bpf_map__priv; bpf_map__reuse_fd; bpf_map__set_ifindex; bpf_map__set_inner_map_fd; bpf_map__set_priv; bpf_map__unpin; bpf_map_delete_elem; bpf_map_get_fd_by_id; bpf_map_get_next_id; bpf_map_get_next_key; bpf_map_lookup_and_delete_elem; bpf_map_lookup_elem; bpf_map_update_elem; bpf_obj_get; bpf_obj_get_info_by_fd; bpf_obj_pin; bpf_object__btf_fd; bpf_object__close; bpf_object__find_map_by_name; bpf_object__find_map_by_offset; bpf_object__find_program_by_title; bpf_object__kversion; bpf_object__load; bpf_object__name; bpf_object__next; bpf_object__open; bpf_object__open_buffer; bpf_object__open_xattr; bpf_object__pin; bpf_object__pin_maps; bpf_object__pin_programs; bpf_object__priv; bpf_object__set_priv; bpf_object__unload; bpf_object__unpin_maps; bpf_object__unpin_programs; bpf_perf_event_read_simple; bpf_prog_attach; bpf_prog_detach; bpf_prog_detach2; bpf_prog_get_fd_by_id; bpf_prog_get_next_id; bpf_prog_load; bpf_prog_load_xattr; bpf_prog_query; bpf_prog_test_run; bpf_prog_test_run_xattr; bpf_program__fd; bpf_program__is_kprobe; bpf_program__is_perf_event; bpf_program__is_raw_tracepoint; bpf_program__is_sched_act; bpf_program__is_sched_cls; bpf_program__is_socket_filter; bpf_program__is_tracepoint; bpf_program__is_xdp; bpf_program__load; bpf_program__next; bpf_program__nth_fd; bpf_program__pin; bpf_program__pin_instance; bpf_program__prev; bpf_program__priv; bpf_program__set_expected_attach_type; bpf_program__set_ifindex; bpf_program__set_kprobe; bpf_program__set_perf_event; bpf_program__set_prep; bpf_program__set_priv; bpf_program__set_raw_tracepoint; bpf_program__set_sched_act; bpf_program__set_sched_cls; bpf_program__set_socket_filter; bpf_program__set_tracepoint; bpf_program__set_type; bpf_program__set_xdp; bpf_program__title; bpf_program__unload; bpf_program__unpin; bpf_program__unpin_instance; bpf_prog_linfo__free; bpf_prog_linfo__new; bpf_prog_linfo__lfind_addr_func; bpf_prog_linfo__lfind; bpf_raw_tracepoint_open; bpf_set_link_xdp_fd; bpf_task_fd_query; bpf_verify_program; btf__fd; btf__find_by_name; btf__free; btf__get_from_id; btf__name_by_offset; btf__new; btf__resolve_size; btf__resolve_type; btf__type_by_id; libbpf_attach_type_by_name; libbpf_get_error; libbpf_prog_type_by_name; libbpf_set_print; libbpf_strerror; local: *; }; LIBBPF_0.0.2 { global: bpf_probe_helper; bpf_probe_map_type; bpf_probe_prog_type; bpf_map__resize; bpf_map_lookup_elem_flags; bpf_object__btf; bpf_object__find_map_fd_by_name; bpf_get_link_xdp_id; btf__dedup; btf__get_map_kv_tids; btf__get_nr_types; btf__get_raw_data; btf__load; btf_ext__free; btf_ext__func_info_rec_size; btf_ext__get_raw_data; btf_ext__line_info_rec_size; btf_ext__new; btf_ext__reloc_func_info; btf_ext__reloc_line_info; xsk_umem__create; xsk_socket__create; xsk_umem__delete; xsk_socket__delete; xsk_umem__fd; xsk_socket__fd; bpf_program__get_prog_info_linear; bpf_program__bpil_addr_to_offs; bpf_program__bpil_offs_to_addr; } LIBBPF_0.0.1; dwarves-dfsg-1.15/lib/bpf/src/libbpf.pc.template000066400000000000000000000003671344730411300215420ustar00rootroot00000000000000# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) prefix=@PREFIX@ libdir=@LIBDIR@ includedir=${prefix}/include Name: libbpf Description: BPF library Version: @VERSION@ Libs: -L${libdir} -lbpf Requires.private: libelf Cflags: -I${includedir} dwarves-dfsg-1.15/lib/bpf/src/libbpf_errno.c000066400000000000000000000034671344730411300207610ustar00rootroot00000000000000// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* * Copyright (C) 2013-2015 Alexei Starovoitov * Copyright (C) 2015 Wang Nan * Copyright (C) 2015 Huawei Inc. * Copyright (C) 2017 Nicira, Inc. */ #undef _GNU_SOURCE #include #include #include "libbpf.h" #define ERRNO_OFFSET(e) ((e) - __LIBBPF_ERRNO__START) #define ERRCODE_OFFSET(c) ERRNO_OFFSET(LIBBPF_ERRNO__##c) #define NR_ERRNO (__LIBBPF_ERRNO__END - __LIBBPF_ERRNO__START) static const char *libbpf_strerror_table[NR_ERRNO] = { [ERRCODE_OFFSET(LIBELF)] = "Something wrong in libelf", [ERRCODE_OFFSET(FORMAT)] = "BPF object format invalid", [ERRCODE_OFFSET(KVERSION)] = "'version' section incorrect or lost", [ERRCODE_OFFSET(ENDIAN)] = "Endian mismatch", [ERRCODE_OFFSET(INTERNAL)] = "Internal error in libbpf", [ERRCODE_OFFSET(RELOC)] = "Relocation failed", [ERRCODE_OFFSET(VERIFY)] = "Kernel verifier blocks program loading", [ERRCODE_OFFSET(PROG2BIG)] = "Program too big", [ERRCODE_OFFSET(KVER)] = "Incorrect kernel version", [ERRCODE_OFFSET(PROGTYPE)] = "Kernel doesn't support this program type", [ERRCODE_OFFSET(WRNGPID)] = "Wrong pid in netlink message", [ERRCODE_OFFSET(INVSEQ)] = "Invalid netlink sequence", [ERRCODE_OFFSET(NLPARSE)] = "Incorrect netlink message parsing", }; int libbpf_strerror(int err, char *buf, size_t size) { if (!buf || !size) return -1; err = err > 0 ? err : -err; if (err < __LIBBPF_ERRNO__START) { int ret; ret = strerror_r(err, buf, size); buf[size - 1] = '\0'; return ret; } if (err < __LIBBPF_ERRNO__END) { const char *msg; msg = libbpf_strerror_table[ERRNO_OFFSET(err)]; snprintf(buf, size, "%s", msg); buf[size - 1] = '\0'; return 0; } snprintf(buf, size, "Unknown libbpf error %d", err); buf[size - 1] = '\0'; return -1; } dwarves-dfsg-1.15/lib/bpf/src/libbpf_probes.c000066400000000000000000000131421344730411300211150ustar00rootroot00000000000000// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* Copyright (c) 2019 Netronome Systems, Inc. */ #include #include #include #include #include #include #include #include #include #include "bpf.h" #include "libbpf.h" static bool grep(const char *buffer, const char *pattern) { return !!strstr(buffer, pattern); } static int get_vendor_id(int ifindex) { char ifname[IF_NAMESIZE], path[64], buf[8]; ssize_t len; int fd; if (!if_indextoname(ifindex, ifname)) return -1; snprintf(path, sizeof(path), "/sys/class/net/%s/device/vendor", ifname); fd = open(path, O_RDONLY); if (fd < 0) return -1; len = read(fd, buf, sizeof(buf)); close(fd); if (len < 0) return -1; if (len >= (ssize_t)sizeof(buf)) return -1; buf[len] = '\0'; return strtol(buf, NULL, 0); } static int get_kernel_version(void) { int version, subversion, patchlevel; struct utsname utsn; /* Return 0 on failure, and attempt to probe with empty kversion */ if (uname(&utsn)) return 0; if (sscanf(utsn.release, "%d.%d.%d", &version, &subversion, &patchlevel) != 3) return 0; return (version << 16) + (subversion << 8) + patchlevel; } static void probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns, size_t insns_cnt, char *buf, size_t buf_len, __u32 ifindex) { struct bpf_load_program_attr xattr = {}; int fd; switch (prog_type) { case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: xattr.expected_attach_type = BPF_CGROUP_INET4_CONNECT; break; case BPF_PROG_TYPE_KPROBE: xattr.kern_version = get_kernel_version(); break; case BPF_PROG_TYPE_UNSPEC: case BPF_PROG_TYPE_SOCKET_FILTER: case BPF_PROG_TYPE_SCHED_CLS: case BPF_PROG_TYPE_SCHED_ACT: case BPF_PROG_TYPE_TRACEPOINT: case BPF_PROG_TYPE_XDP: case BPF_PROG_TYPE_PERF_EVENT: case BPF_PROG_TYPE_CGROUP_SKB: case BPF_PROG_TYPE_CGROUP_SOCK: case BPF_PROG_TYPE_LWT_IN: case BPF_PROG_TYPE_LWT_OUT: case BPF_PROG_TYPE_LWT_XMIT: case BPF_PROG_TYPE_SOCK_OPS: case BPF_PROG_TYPE_SK_SKB: case BPF_PROG_TYPE_CGROUP_DEVICE: case BPF_PROG_TYPE_SK_MSG: case BPF_PROG_TYPE_RAW_TRACEPOINT: case BPF_PROG_TYPE_LWT_SEG6LOCAL: case BPF_PROG_TYPE_LIRC_MODE2: case BPF_PROG_TYPE_SK_REUSEPORT: case BPF_PROG_TYPE_FLOW_DISSECTOR: default: break; } xattr.prog_type = prog_type; xattr.insns = insns; xattr.insns_cnt = insns_cnt; xattr.license = "GPL"; xattr.prog_ifindex = ifindex; fd = bpf_load_program_xattr(&xattr, buf, buf_len); if (fd >= 0) close(fd); } bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex) { struct bpf_insn insns[2] = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN() }; if (ifindex && prog_type == BPF_PROG_TYPE_SCHED_CLS) /* nfp returns -EINVAL on exit(0) with TC offload */ insns[0].imm = 2; errno = 0; probe_load(prog_type, insns, ARRAY_SIZE(insns), NULL, 0, ifindex); return errno != EINVAL && errno != EOPNOTSUPP; } bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex) { int key_size, value_size, max_entries, map_flags; struct bpf_create_map_attr attr = {}; int fd = -1, fd_inner; key_size = sizeof(__u32); value_size = sizeof(__u32); max_entries = 1; map_flags = 0; switch (map_type) { case BPF_MAP_TYPE_STACK_TRACE: value_size = sizeof(__u64); break; case BPF_MAP_TYPE_LPM_TRIE: key_size = sizeof(__u64); value_size = sizeof(__u64); map_flags = BPF_F_NO_PREALLOC; break; case BPF_MAP_TYPE_CGROUP_STORAGE: case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: key_size = sizeof(struct bpf_cgroup_storage_key); value_size = sizeof(__u64); max_entries = 0; break; case BPF_MAP_TYPE_QUEUE: case BPF_MAP_TYPE_STACK: key_size = 0; break; case BPF_MAP_TYPE_UNSPEC: case BPF_MAP_TYPE_HASH: case BPF_MAP_TYPE_ARRAY: case BPF_MAP_TYPE_PROG_ARRAY: case BPF_MAP_TYPE_PERF_EVENT_ARRAY: case BPF_MAP_TYPE_PERCPU_HASH: case BPF_MAP_TYPE_PERCPU_ARRAY: case BPF_MAP_TYPE_CGROUP_ARRAY: case BPF_MAP_TYPE_LRU_HASH: case BPF_MAP_TYPE_LRU_PERCPU_HASH: case BPF_MAP_TYPE_ARRAY_OF_MAPS: case BPF_MAP_TYPE_HASH_OF_MAPS: case BPF_MAP_TYPE_DEVMAP: case BPF_MAP_TYPE_SOCKMAP: case BPF_MAP_TYPE_CPUMAP: case BPF_MAP_TYPE_XSKMAP: case BPF_MAP_TYPE_SOCKHASH: case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: default: break; } if (map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS || map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { /* TODO: probe for device, once libbpf has a function to create * map-in-map for offload */ if (ifindex) return false; fd_inner = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(__u32), sizeof(__u32), 1, 0); if (fd_inner < 0) return false; fd = bpf_create_map_in_map(map_type, NULL, sizeof(__u32), fd_inner, 1, 0); close(fd_inner); } else { /* Note: No other restriction on map type probes for offload */ attr.map_type = map_type; attr.key_size = key_size; attr.value_size = value_size; attr.max_entries = max_entries; attr.map_flags = map_flags; attr.map_ifindex = ifindex; fd = bpf_create_map_xattr(&attr); } if (fd >= 0) close(fd); return fd >= 0; } bool bpf_probe_helper(enum bpf_func_id id, enum bpf_prog_type prog_type, __u32 ifindex) { struct bpf_insn insns[2] = { BPF_EMIT_CALL(id), BPF_EXIT_INSN() }; char buf[4096] = {}; bool res; probe_load(prog_type, insns, ARRAY_SIZE(insns), buf, sizeof(buf), ifindex); res = !grep(buf, "invalid func ") && !grep(buf, "unknown func "); if (ifindex) { switch (get_vendor_id(ifindex)) { case 0x19ee: /* Netronome specific */ res = res && !grep(buf, "not supported by FW") && !grep(buf, "unsupported function id"); break; default: break; } } return res; } dwarves-dfsg-1.15/lib/bpf/src/libbpf_util.h000066400000000000000000000013011344730411300205770ustar00rootroot00000000000000/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ /* Copyright (c) 2019 Facebook */ #ifndef __LIBBPF_LIBBPF_UTIL_H #define __LIBBPF_LIBBPF_UTIL_H #include #ifdef __cplusplus extern "C" { #endif extern void libbpf_print(enum libbpf_print_level level, const char *format, ...) __attribute__((format(printf, 2, 3))); #define __pr(level, fmt, ...) \ do { \ libbpf_print(level, "libbpf: " fmt, ##__VA_ARGS__); \ } while (0) #define pr_warning(fmt, ...) __pr(LIBBPF_WARN, fmt, ##__VA_ARGS__) #define pr_info(fmt, ...) __pr(LIBBPF_INFO, fmt, ##__VA_ARGS__) #define pr_debug(fmt, ...) __pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__) #ifdef __cplusplus } /* extern "C" */ #endif #endif dwarves-dfsg-1.15/lib/bpf/src/netlink.c000066400000000000000000000234541344730411300177600ustar00rootroot00000000000000// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* Copyright (c) 2018 Facebook */ #include #include #include #include #include #include #include #include #include "bpf.h" #include "libbpf.h" #include "nlattr.h" #ifndef SOL_NETLINK #define SOL_NETLINK 270 #endif typedef int (*__dump_nlmsg_t)(struct nlmsghdr *nlmsg, libbpf_dump_nlmsg_t, void *cookie); struct xdp_id_md { int ifindex; __u32 flags; __u32 id; }; int libbpf_netlink_open(__u32 *nl_pid) { struct sockaddr_nl sa; socklen_t addrlen; int one = 1, ret; int sock; memset(&sa, 0, sizeof(sa)); sa.nl_family = AF_NETLINK; sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE); if (sock < 0) return -errno; if (setsockopt(sock, SOL_NETLINK, NETLINK_EXT_ACK, &one, sizeof(one)) < 0) { fprintf(stderr, "Netlink error reporting not supported\n"); } if (bind(sock, (struct sockaddr *)&sa, sizeof(sa)) < 0) { ret = -errno; goto cleanup; } addrlen = sizeof(sa); if (getsockname(sock, (struct sockaddr *)&sa, &addrlen) < 0) { ret = -errno; goto cleanup; } if (addrlen != sizeof(sa)) { ret = -LIBBPF_ERRNO__INTERNAL; goto cleanup; } *nl_pid = sa.nl_pid; return sock; cleanup: close(sock); return ret; } static int bpf_netlink_recv(int sock, __u32 nl_pid, int seq, __dump_nlmsg_t _fn, libbpf_dump_nlmsg_t fn, void *cookie) { bool multipart = true; struct nlmsgerr *err; struct nlmsghdr *nh; char buf[4096]; int len, ret; while (multipart) { multipart = false; len = recv(sock, buf, sizeof(buf), 0); if (len < 0) { ret = -errno; goto done; } if (len == 0) break; for (nh = (struct nlmsghdr *)buf; NLMSG_OK(nh, len); nh = NLMSG_NEXT(nh, len)) { if (nh->nlmsg_pid != nl_pid) { ret = -LIBBPF_ERRNO__WRNGPID; goto done; } if (nh->nlmsg_seq != seq) { ret = -LIBBPF_ERRNO__INVSEQ; goto done; } if (nh->nlmsg_flags & NLM_F_MULTI) multipart = true; switch (nh->nlmsg_type) { case NLMSG_ERROR: err = (struct nlmsgerr *)NLMSG_DATA(nh); if (!err->error) continue; ret = err->error; libbpf_nla_dump_errormsg(nh); goto done; case NLMSG_DONE: return 0; default: break; } if (_fn) { ret = _fn(nh, fn, cookie); if (ret) return ret; } } } ret = 0; done: return ret; } int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags) { int sock, seq = 0, ret; struct nlattr *nla, *nla_xdp; struct { struct nlmsghdr nh; struct ifinfomsg ifinfo; char attrbuf[64]; } req; __u32 nl_pid; sock = libbpf_netlink_open(&nl_pid); if (sock < 0) return sock; memset(&req, 0, sizeof(req)); req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)); req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK; req.nh.nlmsg_type = RTM_SETLINK; req.nh.nlmsg_pid = 0; req.nh.nlmsg_seq = ++seq; req.ifinfo.ifi_family = AF_UNSPEC; req.ifinfo.ifi_index = ifindex; /* started nested attribute for XDP */ nla = (struct nlattr *)(((char *)&req) + NLMSG_ALIGN(req.nh.nlmsg_len)); nla->nla_type = NLA_F_NESTED | IFLA_XDP; nla->nla_len = NLA_HDRLEN; /* add XDP fd */ nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len); nla_xdp->nla_type = IFLA_XDP_FD; nla_xdp->nla_len = NLA_HDRLEN + sizeof(int); memcpy((char *)nla_xdp + NLA_HDRLEN, &fd, sizeof(fd)); nla->nla_len += nla_xdp->nla_len; /* if user passed in any flags, add those too */ if (flags) { nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len); nla_xdp->nla_type = IFLA_XDP_FLAGS; nla_xdp->nla_len = NLA_HDRLEN + sizeof(flags); memcpy((char *)nla_xdp + NLA_HDRLEN, &flags, sizeof(flags)); nla->nla_len += nla_xdp->nla_len; } req.nh.nlmsg_len += NLA_ALIGN(nla->nla_len); if (send(sock, &req, req.nh.nlmsg_len, 0) < 0) { ret = -errno; goto cleanup; } ret = bpf_netlink_recv(sock, nl_pid, seq, NULL, NULL, NULL); cleanup: close(sock); return ret; } static int __dump_link_nlmsg(struct nlmsghdr *nlh, libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie) { struct nlattr *tb[IFLA_MAX + 1], *attr; struct ifinfomsg *ifi = NLMSG_DATA(nlh); int len; len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)); attr = (struct nlattr *) ((void *) ifi + NLMSG_ALIGN(sizeof(*ifi))); if (libbpf_nla_parse(tb, IFLA_MAX, attr, len, NULL) != 0) return -LIBBPF_ERRNO__NLPARSE; return dump_link_nlmsg(cookie, ifi, tb); } static unsigned char get_xdp_id_attr(unsigned char mode, __u32 flags) { if (mode != XDP_ATTACHED_MULTI) return IFLA_XDP_PROG_ID; if (flags & XDP_FLAGS_DRV_MODE) return IFLA_XDP_DRV_PROG_ID; if (flags & XDP_FLAGS_HW_MODE) return IFLA_XDP_HW_PROG_ID; if (flags & XDP_FLAGS_SKB_MODE) return IFLA_XDP_SKB_PROG_ID; return IFLA_XDP_UNSPEC; } static int get_xdp_id(void *cookie, void *msg, struct nlattr **tb) { struct nlattr *xdp_tb[IFLA_XDP_MAX + 1]; struct xdp_id_md *xdp_id = cookie; struct ifinfomsg *ifinfo = msg; unsigned char mode, xdp_attr; int ret; if (xdp_id->ifindex && xdp_id->ifindex != ifinfo->ifi_index) return 0; if (!tb[IFLA_XDP]) return 0; ret = libbpf_nla_parse_nested(xdp_tb, IFLA_XDP_MAX, tb[IFLA_XDP], NULL); if (ret) return ret; if (!xdp_tb[IFLA_XDP_ATTACHED]) return 0; mode = libbpf_nla_getattr_u8(xdp_tb[IFLA_XDP_ATTACHED]); if (mode == XDP_ATTACHED_NONE) return 0; xdp_attr = get_xdp_id_attr(mode, xdp_id->flags); if (!xdp_attr || !xdp_tb[xdp_attr]) return 0; xdp_id->id = libbpf_nla_getattr_u32(xdp_tb[xdp_attr]); return 0; } int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags) { struct xdp_id_md xdp_id = {}; int sock, ret; __u32 nl_pid; __u32 mask; if (flags & ~XDP_FLAGS_MASK) return -EINVAL; /* Check whether the single {HW,DRV,SKB} mode is set */ flags &= (XDP_FLAGS_SKB_MODE | XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE); mask = flags - 1; if (flags && flags & mask) return -EINVAL; sock = libbpf_netlink_open(&nl_pid); if (sock < 0) return sock; xdp_id.ifindex = ifindex; xdp_id.flags = flags; ret = libbpf_nl_get_link(sock, nl_pid, get_xdp_id, &xdp_id); if (!ret) *prog_id = xdp_id.id; close(sock); return ret; } int libbpf_nl_get_link(int sock, unsigned int nl_pid, libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie) { struct { struct nlmsghdr nlh; struct ifinfomsg ifm; } req = { .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)), .nlh.nlmsg_type = RTM_GETLINK, .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST, .ifm.ifi_family = AF_PACKET, }; int seq = time(NULL); req.nlh.nlmsg_seq = seq; if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0) return -errno; return bpf_netlink_recv(sock, nl_pid, seq, __dump_link_nlmsg, dump_link_nlmsg, cookie); } static int __dump_class_nlmsg(struct nlmsghdr *nlh, libbpf_dump_nlmsg_t dump_class_nlmsg, void *cookie) { struct nlattr *tb[TCA_MAX + 1], *attr; struct tcmsg *t = NLMSG_DATA(nlh); int len; len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*t)); attr = (struct nlattr *) ((void *) t + NLMSG_ALIGN(sizeof(*t))); if (libbpf_nla_parse(tb, TCA_MAX, attr, len, NULL) != 0) return -LIBBPF_ERRNO__NLPARSE; return dump_class_nlmsg(cookie, t, tb); } int libbpf_nl_get_class(int sock, unsigned int nl_pid, int ifindex, libbpf_dump_nlmsg_t dump_class_nlmsg, void *cookie) { struct { struct nlmsghdr nlh; struct tcmsg t; } req = { .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)), .nlh.nlmsg_type = RTM_GETTCLASS, .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST, .t.tcm_family = AF_UNSPEC, .t.tcm_ifindex = ifindex, }; int seq = time(NULL); req.nlh.nlmsg_seq = seq; if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0) return -errno; return bpf_netlink_recv(sock, nl_pid, seq, __dump_class_nlmsg, dump_class_nlmsg, cookie); } static int __dump_qdisc_nlmsg(struct nlmsghdr *nlh, libbpf_dump_nlmsg_t dump_qdisc_nlmsg, void *cookie) { struct nlattr *tb[TCA_MAX + 1], *attr; struct tcmsg *t = NLMSG_DATA(nlh); int len; len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*t)); attr = (struct nlattr *) ((void *) t + NLMSG_ALIGN(sizeof(*t))); if (libbpf_nla_parse(tb, TCA_MAX, attr, len, NULL) != 0) return -LIBBPF_ERRNO__NLPARSE; return dump_qdisc_nlmsg(cookie, t, tb); } int libbpf_nl_get_qdisc(int sock, unsigned int nl_pid, int ifindex, libbpf_dump_nlmsg_t dump_qdisc_nlmsg, void *cookie) { struct { struct nlmsghdr nlh; struct tcmsg t; } req = { .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)), .nlh.nlmsg_type = RTM_GETQDISC, .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST, .t.tcm_family = AF_UNSPEC, .t.tcm_ifindex = ifindex, }; int seq = time(NULL); req.nlh.nlmsg_seq = seq; if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0) return -errno; return bpf_netlink_recv(sock, nl_pid, seq, __dump_qdisc_nlmsg, dump_qdisc_nlmsg, cookie); } static int __dump_filter_nlmsg(struct nlmsghdr *nlh, libbpf_dump_nlmsg_t dump_filter_nlmsg, void *cookie) { struct nlattr *tb[TCA_MAX + 1], *attr; struct tcmsg *t = NLMSG_DATA(nlh); int len; len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*t)); attr = (struct nlattr *) ((void *) t + NLMSG_ALIGN(sizeof(*t))); if (libbpf_nla_parse(tb, TCA_MAX, attr, len, NULL) != 0) return -LIBBPF_ERRNO__NLPARSE; return dump_filter_nlmsg(cookie, t, tb); } int libbpf_nl_get_filter(int sock, unsigned int nl_pid, int ifindex, int handle, libbpf_dump_nlmsg_t dump_filter_nlmsg, void *cookie) { struct { struct nlmsghdr nlh; struct tcmsg t; } req = { .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)), .nlh.nlmsg_type = RTM_GETTFILTER, .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST, .t.tcm_family = AF_UNSPEC, .t.tcm_ifindex = ifindex, .t.tcm_parent = handle, }; int seq = time(NULL); req.nlh.nlmsg_seq = seq; if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0) return -errno; return bpf_netlink_recv(sock, nl_pid, seq, __dump_filter_nlmsg, dump_filter_nlmsg, cookie); } dwarves-dfsg-1.15/lib/bpf/src/nlattr.c000066400000000000000000000115771344730411300176230ustar00rootroot00000000000000// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* * NETLINK Netlink attributes * * Copyright (c) 2003-2013 Thomas Graf */ #include #include "nlattr.h" #include #include #include static uint16_t nla_attr_minlen[LIBBPF_NLA_TYPE_MAX+1] = { [LIBBPF_NLA_U8] = sizeof(uint8_t), [LIBBPF_NLA_U16] = sizeof(uint16_t), [LIBBPF_NLA_U32] = sizeof(uint32_t), [LIBBPF_NLA_U64] = sizeof(uint64_t), [LIBBPF_NLA_STRING] = 1, [LIBBPF_NLA_FLAG] = 0, }; static struct nlattr *nla_next(const struct nlattr *nla, int *remaining) { int totlen = NLA_ALIGN(nla->nla_len); *remaining -= totlen; return (struct nlattr *) ((char *) nla + totlen); } static int nla_ok(const struct nlattr *nla, int remaining) { return remaining >= sizeof(*nla) && nla->nla_len >= sizeof(*nla) && nla->nla_len <= remaining; } static int nla_type(const struct nlattr *nla) { return nla->nla_type & NLA_TYPE_MASK; } static int validate_nla(struct nlattr *nla, int maxtype, struct libbpf_nla_policy *policy) { struct libbpf_nla_policy *pt; unsigned int minlen = 0; int type = nla_type(nla); if (type < 0 || type > maxtype) return 0; pt = &policy[type]; if (pt->type > LIBBPF_NLA_TYPE_MAX) return 0; if (pt->minlen) minlen = pt->minlen; else if (pt->type != LIBBPF_NLA_UNSPEC) minlen = nla_attr_minlen[pt->type]; if (libbpf_nla_len(nla) < minlen) return -1; if (pt->maxlen && libbpf_nla_len(nla) > pt->maxlen) return -1; if (pt->type == LIBBPF_NLA_STRING) { char *data = libbpf_nla_data(nla); if (data[libbpf_nla_len(nla) - 1] != '\0') return -1; } return 0; } static inline int nlmsg_len(const struct nlmsghdr *nlh) { return nlh->nlmsg_len - NLMSG_HDRLEN; } /** * Create attribute index based on a stream of attributes. * @arg tb Index array to be filled (maxtype+1 elements). * @arg maxtype Maximum attribute type expected and accepted. * @arg head Head of attribute stream. * @arg len Length of attribute stream. * @arg policy Attribute validation policy. * * Iterates over the stream of attributes and stores a pointer to each * attribute in the index array using the attribute type as index to * the array. Attribute with a type greater than the maximum type * specified will be silently ignored in order to maintain backwards * compatibility. If \a policy is not NULL, the attribute will be * validated using the specified policy. * * @see nla_validate * @return 0 on success or a negative error code. */ int libbpf_nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, int len, struct libbpf_nla_policy *policy) { struct nlattr *nla; int rem, err; memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); libbpf_nla_for_each_attr(nla, head, len, rem) { int type = nla_type(nla); if (type > maxtype) continue; if (policy) { err = validate_nla(nla, maxtype, policy); if (err < 0) goto errout; } if (tb[type]) fprintf(stderr, "Attribute of type %#x found multiple times in message, " "previous attribute is being ignored.\n", type); tb[type] = nla; } err = 0; errout: return err; } /** * Create attribute index based on nested attribute * @arg tb Index array to be filled (maxtype+1 elements). * @arg maxtype Maximum attribute type expected and accepted. * @arg nla Nested Attribute. * @arg policy Attribute validation policy. * * Feeds the stream of attributes nested into the specified attribute * to libbpf_nla_parse(). * * @see libbpf_nla_parse * @return 0 on success or a negative error code. */ int libbpf_nla_parse_nested(struct nlattr *tb[], int maxtype, struct nlattr *nla, struct libbpf_nla_policy *policy) { return libbpf_nla_parse(tb, maxtype, libbpf_nla_data(nla), libbpf_nla_len(nla), policy); } /* dump netlink extended ack error message */ int libbpf_nla_dump_errormsg(struct nlmsghdr *nlh) { struct libbpf_nla_policy extack_policy[NLMSGERR_ATTR_MAX + 1] = { [NLMSGERR_ATTR_MSG] = { .type = LIBBPF_NLA_STRING }, [NLMSGERR_ATTR_OFFS] = { .type = LIBBPF_NLA_U32 }, }; struct nlattr *tb[NLMSGERR_ATTR_MAX + 1], *attr; struct nlmsgerr *err; char *errmsg = NULL; int hlen, alen; /* no TLVs, nothing to do here */ if (!(nlh->nlmsg_flags & NLM_F_ACK_TLVS)) return 0; err = (struct nlmsgerr *)NLMSG_DATA(nlh); hlen = sizeof(*err); /* if NLM_F_CAPPED is set then the inner err msg was capped */ if (!(nlh->nlmsg_flags & NLM_F_CAPPED)) hlen += nlmsg_len(&err->msg); attr = (struct nlattr *) ((void *) err + hlen); alen = nlh->nlmsg_len - hlen; if (libbpf_nla_parse(tb, NLMSGERR_ATTR_MAX, attr, alen, extack_policy) != 0) { fprintf(stderr, "Failed to parse extended error attributes\n"); return 0; } if (tb[NLMSGERR_ATTR_MSG]) errmsg = (char *) libbpf_nla_data(tb[NLMSGERR_ATTR_MSG]); fprintf(stderr, "Kernel error message: %s\n", errmsg); return 0; } dwarves-dfsg-1.15/lib/bpf/src/nlattr.h000066400000000000000000000052501344730411300176170ustar00rootroot00000000000000/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ /* * NETLINK Netlink attributes * * Copyright (c) 2003-2013 Thomas Graf */ #ifndef __LIBBPF_NLATTR_H #define __LIBBPF_NLATTR_H #include #include /* avoid multiple definition of netlink features */ #define __LINUX_NETLINK_H /** * Standard attribute types to specify validation policy */ enum { LIBBPF_NLA_UNSPEC, /**< Unspecified type, binary data chunk */ LIBBPF_NLA_U8, /**< 8 bit integer */ LIBBPF_NLA_U16, /**< 16 bit integer */ LIBBPF_NLA_U32, /**< 32 bit integer */ LIBBPF_NLA_U64, /**< 64 bit integer */ LIBBPF_NLA_STRING, /**< NUL terminated character string */ LIBBPF_NLA_FLAG, /**< Flag */ LIBBPF_NLA_MSECS, /**< Micro seconds (64bit) */ LIBBPF_NLA_NESTED, /**< Nested attributes */ __LIBBPF_NLA_TYPE_MAX, }; #define LIBBPF_NLA_TYPE_MAX (__LIBBPF_NLA_TYPE_MAX - 1) /** * @ingroup attr * Attribute validation policy. * * See section @core_doc{core_attr_parse,Attribute Parsing} for more details. */ struct libbpf_nla_policy { /** Type of attribute or LIBBPF_NLA_UNSPEC */ uint16_t type; /** Minimal length of payload required */ uint16_t minlen; /** Maximal length of payload allowed */ uint16_t maxlen; }; /** * @ingroup attr * Iterate over a stream of attributes * @arg pos loop counter, set to current attribute * @arg head head of attribute stream * @arg len length of attribute stream * @arg rem initialized to len, holds bytes currently remaining in stream */ #define libbpf_nla_for_each_attr(pos, head, len, rem) \ for (pos = head, rem = len; \ nla_ok(pos, rem); \ pos = nla_next(pos, &(rem))) /** * libbpf_nla_data - head of payload * @nla: netlink attribute */ static inline void *libbpf_nla_data(const struct nlattr *nla) { return (char *) nla + NLA_HDRLEN; } static inline uint8_t libbpf_nla_getattr_u8(const struct nlattr *nla) { return *(uint8_t *)libbpf_nla_data(nla); } static inline uint32_t libbpf_nla_getattr_u32(const struct nlattr *nla) { return *(uint32_t *)libbpf_nla_data(nla); } static inline const char *libbpf_nla_getattr_str(const struct nlattr *nla) { return (const char *)libbpf_nla_data(nla); } /** * libbpf_nla_len - length of payload * @nla: netlink attribute */ static inline int libbpf_nla_len(const struct nlattr *nla) { return nla->nla_len - NLA_HDRLEN; } int libbpf_nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, int len, struct libbpf_nla_policy *policy); int libbpf_nla_parse_nested(struct nlattr *tb[], int maxtype, struct nlattr *nla, struct libbpf_nla_policy *policy); int libbpf_nla_dump_errormsg(struct nlmsghdr *nlh); #endif /* __LIBBPF_NLATTR_H */ dwarves-dfsg-1.15/lib/bpf/src/str_error.c000066400000000000000000000007711344730411300203320ustar00rootroot00000000000000// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) #undef _GNU_SOURCE #include #include #include "str_error.h" /* * Wrapper to allow for building in non-GNU systems such as Alpine Linux's musl * libc, while checking strerror_r() return to avoid having to check this in * all places calling it. */ char *libbpf_strerror_r(int err, char *dst, int len) { int ret = strerror_r(err, dst, len); if (ret) snprintf(dst, len, "ERROR: strerror_r(%d)=%d", err, ret); return dst; } dwarves-dfsg-1.15/lib/bpf/src/str_error.h000066400000000000000000000003151344730411300203310ustar00rootroot00000000000000/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ #ifndef __LIBBPF_STR_ERROR_H #define __LIBBPF_STR_ERROR_H char *libbpf_strerror_r(int err, char *dst, int len); #endif /* __LIBBPF_STR_ERROR_H */ dwarves-dfsg-1.15/lib/bpf/src/xsk.c000066400000000000000000000403061344730411300171140ustar00rootroot00000000000000// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* * AF_XDP user-space access library. * * Copyright(c) 2018 - 2019 Intel Corporation. * * Author(s): Magnus Karlsson */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "bpf.h" #include "libbpf.h" #include "libbpf_util.h" #include "xsk.h" #ifndef SOL_XDP #define SOL_XDP 283 #endif #ifndef AF_XDP #define AF_XDP 44 #endif #ifndef PF_XDP #define PF_XDP AF_XDP #endif struct xsk_umem { struct xsk_ring_prod *fill; struct xsk_ring_cons *comp; char *umem_area; struct xsk_umem_config config; int fd; int refcount; }; struct xsk_socket { struct xsk_ring_cons *rx; struct xsk_ring_prod *tx; __u64 outstanding_tx; struct xsk_umem *umem; struct xsk_socket_config config; int fd; int xsks_map; int ifindex; int prog_fd; int qidconf_map_fd; int xsks_map_fd; __u32 queue_id; char ifname[IFNAMSIZ]; }; struct xsk_nl_info { bool xdp_prog_attached; int ifindex; int fd; }; /* For 32-bit systems, we need to use mmap2 as the offsets are 64-bit. * Unfortunately, it is not part of glibc. */ static inline void *xsk_mmap(void *addr, size_t length, int prot, int flags, int fd, __u64 offset) { #ifdef __NR_mmap2 unsigned int page_shift = __builtin_ffs(getpagesize()) - 1; long ret = syscall(__NR_mmap2, addr, length, prot, flags, fd, (off_t)(offset >> page_shift)); return (void *)ret; #else return mmap(addr, length, prot, flags, fd, offset); #endif } int xsk_umem__fd(const struct xsk_umem *umem) { return umem ? umem->fd : -EINVAL; } int xsk_socket__fd(const struct xsk_socket *xsk) { return xsk ? xsk->fd : -EINVAL; } static bool xsk_page_aligned(void *buffer) { unsigned long addr = (unsigned long)buffer; return !(addr & (getpagesize() - 1)); } static void xsk_set_umem_config(struct xsk_umem_config *cfg, const struct xsk_umem_config *usr_cfg) { if (!usr_cfg) { cfg->fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS; cfg->comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS; cfg->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE; cfg->frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM; return; } cfg->fill_size = usr_cfg->fill_size; cfg->comp_size = usr_cfg->comp_size; cfg->frame_size = usr_cfg->frame_size; cfg->frame_headroom = usr_cfg->frame_headroom; } static int xsk_set_xdp_socket_config(struct xsk_socket_config *cfg, const struct xsk_socket_config *usr_cfg) { if (!usr_cfg) { cfg->rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS; cfg->tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS; cfg->libbpf_flags = 0; cfg->xdp_flags = 0; cfg->bind_flags = 0; return 0; } if (usr_cfg->libbpf_flags & ~XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD) return -EINVAL; cfg->rx_size = usr_cfg->rx_size; cfg->tx_size = usr_cfg->tx_size; cfg->libbpf_flags = usr_cfg->libbpf_flags; cfg->xdp_flags = usr_cfg->xdp_flags; cfg->bind_flags = usr_cfg->bind_flags; return 0; } int xsk_umem__create(struct xsk_umem **umem_ptr, void *umem_area, __u64 size, struct xsk_ring_prod *fill, struct xsk_ring_cons *comp, const struct xsk_umem_config *usr_config) { struct xdp_mmap_offsets off; struct xdp_umem_reg mr; struct xsk_umem *umem; socklen_t optlen; void *map; int err; if (!umem_area || !umem_ptr || !fill || !comp) return -EFAULT; if (!size && !xsk_page_aligned(umem_area)) return -EINVAL; umem = calloc(1, sizeof(*umem)); if (!umem) return -ENOMEM; umem->fd = socket(AF_XDP, SOCK_RAW, 0); if (umem->fd < 0) { err = -errno; goto out_umem_alloc; } umem->umem_area = umem_area; xsk_set_umem_config(&umem->config, usr_config); mr.addr = (uintptr_t)umem_area; mr.len = size; mr.chunk_size = umem->config.frame_size; mr.headroom = umem->config.frame_headroom; err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr)); if (err) { err = -errno; goto out_socket; } err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_FILL_RING, &umem->config.fill_size, sizeof(umem->config.fill_size)); if (err) { err = -errno; goto out_socket; } err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_COMPLETION_RING, &umem->config.comp_size, sizeof(umem->config.comp_size)); if (err) { err = -errno; goto out_socket; } optlen = sizeof(off); err = getsockopt(umem->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen); if (err) { err = -errno; goto out_socket; } map = xsk_mmap(NULL, off.fr.desc + umem->config.fill_size * sizeof(__u64), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, umem->fd, XDP_UMEM_PGOFF_FILL_RING); if (map == MAP_FAILED) { err = -errno; goto out_socket; } umem->fill = fill; fill->mask = umem->config.fill_size - 1; fill->size = umem->config.fill_size; fill->producer = map + off.fr.producer; fill->consumer = map + off.fr.consumer; fill->ring = map + off.fr.desc; fill->cached_cons = umem->config.fill_size; map = xsk_mmap(NULL, off.cr.desc + umem->config.comp_size * sizeof(__u64), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, umem->fd, XDP_UMEM_PGOFF_COMPLETION_RING); if (map == MAP_FAILED) { err = -errno; goto out_mmap; } umem->comp = comp; comp->mask = umem->config.comp_size - 1; comp->size = umem->config.comp_size; comp->producer = map + off.cr.producer; comp->consumer = map + off.cr.consumer; comp->ring = map + off.cr.desc; *umem_ptr = umem; return 0; out_mmap: munmap(umem->fill, off.fr.desc + umem->config.fill_size * sizeof(__u64)); out_socket: close(umem->fd); out_umem_alloc: free(umem); return err; } static int xsk_load_xdp_prog(struct xsk_socket *xsk) { char bpf_log_buf[BPF_LOG_BUF_SIZE]; int err, prog_fd; /* This is the C-program: * SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx) * { * int *qidconf, index = ctx->rx_queue_index; * * // A set entry here means that the correspnding queue_id * // has an active AF_XDP socket bound to it. * qidconf = bpf_map_lookup_elem(&qidconf_map, &index); * if (!qidconf) * return XDP_ABORTED; * * if (*qidconf) * return bpf_redirect_map(&xsks_map, index, 0); * * return XDP_PASS; * } */ struct bpf_insn prog[] = { /* r1 = *(u32 *)(r1 + 16) */ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 16), /* *(u32 *)(r10 - 4) = r1 */ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_1, -4), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), BPF_LD_MAP_FD(BPF_REG_1, xsk->qidconf_map_fd), BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), BPF_MOV32_IMM(BPF_REG_0, 0), /* if r1 == 0 goto +8 */ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8), BPF_MOV32_IMM(BPF_REG_0, 2), /* r1 = *(u32 *)(r1 + 0) */ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 0), /* if r1 == 0 goto +5 */ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 5), /* r2 = *(u32 *)(r10 - 4) */ BPF_LD_MAP_FD(BPF_REG_1, xsk->xsks_map_fd), BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_10, -4), BPF_MOV32_IMM(BPF_REG_3, 0), BPF_EMIT_CALL(BPF_FUNC_redirect_map), /* The jumps are to this instruction */ BPF_EXIT_INSN(), }; size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn); prog_fd = bpf_load_program(BPF_PROG_TYPE_XDP, prog, insns_cnt, "LGPL-2.1 or BSD-2-Clause", 0, bpf_log_buf, BPF_LOG_BUF_SIZE); if (prog_fd < 0) { pr_warning("BPF log buffer:\n%s", bpf_log_buf); return prog_fd; } err = bpf_set_link_xdp_fd(xsk->ifindex, prog_fd, xsk->config.xdp_flags); if (err) { close(prog_fd); return err; } xsk->prog_fd = prog_fd; return 0; } static int xsk_get_max_queues(struct xsk_socket *xsk) { struct ethtool_channels channels; struct ifreq ifr; int fd, err, ret; fd = socket(AF_INET, SOCK_DGRAM, 0); if (fd < 0) return -errno; channels.cmd = ETHTOOL_GCHANNELS; ifr.ifr_data = (void *)&channels; strncpy(ifr.ifr_name, xsk->ifname, IFNAMSIZ); err = ioctl(fd, SIOCETHTOOL, &ifr); if (err && errno != EOPNOTSUPP) { ret = -errno; goto out; } if (channels.max_combined == 0 || errno == EOPNOTSUPP) /* If the device says it has no channels, then all traffic * is sent to a single stream, so max queues = 1. */ ret = 1; else ret = channels.max_combined; out: close(fd); return ret; } static int xsk_create_bpf_maps(struct xsk_socket *xsk) { int max_queues; int fd; max_queues = xsk_get_max_queues(xsk); if (max_queues < 0) return max_queues; fd = bpf_create_map_name(BPF_MAP_TYPE_ARRAY, "qidconf_map", sizeof(int), sizeof(int), max_queues, 0); if (fd < 0) return fd; xsk->qidconf_map_fd = fd; fd = bpf_create_map_name(BPF_MAP_TYPE_XSKMAP, "xsks_map", sizeof(int), sizeof(int), max_queues, 0); if (fd < 0) { close(xsk->qidconf_map_fd); return fd; } xsk->xsks_map_fd = fd; return 0; } static void xsk_delete_bpf_maps(struct xsk_socket *xsk) { close(xsk->qidconf_map_fd); close(xsk->xsks_map_fd); } static int xsk_update_bpf_maps(struct xsk_socket *xsk, int qidconf_value, int xsks_value) { bool qidconf_map_updated = false, xsks_map_updated = false; struct bpf_prog_info prog_info = {}; __u32 prog_len = sizeof(prog_info); struct bpf_map_info map_info; __u32 map_len = sizeof(map_info); __u32 *map_ids; int reset_value = 0; __u32 num_maps; unsigned int i; int err; err = bpf_obj_get_info_by_fd(xsk->prog_fd, &prog_info, &prog_len); if (err) return err; num_maps = prog_info.nr_map_ids; map_ids = calloc(prog_info.nr_map_ids, sizeof(*map_ids)); if (!map_ids) return -ENOMEM; memset(&prog_info, 0, prog_len); prog_info.nr_map_ids = num_maps; prog_info.map_ids = (__u64)(unsigned long)map_ids; err = bpf_obj_get_info_by_fd(xsk->prog_fd, &prog_info, &prog_len); if (err) goto out_map_ids; for (i = 0; i < prog_info.nr_map_ids; i++) { int fd; fd = bpf_map_get_fd_by_id(map_ids[i]); if (fd < 0) { err = -errno; goto out_maps; } err = bpf_obj_get_info_by_fd(fd, &map_info, &map_len); if (err) goto out_maps; if (!strcmp(map_info.name, "qidconf_map")) { err = bpf_map_update_elem(fd, &xsk->queue_id, &qidconf_value, 0); if (err) goto out_maps; qidconf_map_updated = true; xsk->qidconf_map_fd = fd; } else if (!strcmp(map_info.name, "xsks_map")) { err = bpf_map_update_elem(fd, &xsk->queue_id, &xsks_value, 0); if (err) goto out_maps; xsks_map_updated = true; xsk->xsks_map_fd = fd; } if (qidconf_map_updated && xsks_map_updated) break; } if (!(qidconf_map_updated && xsks_map_updated)) { err = -ENOENT; goto out_maps; } err = 0; goto out_success; out_maps: if (qidconf_map_updated) (void)bpf_map_update_elem(xsk->qidconf_map_fd, &xsk->queue_id, &reset_value, 0); if (xsks_map_updated) (void)bpf_map_update_elem(xsk->xsks_map_fd, &xsk->queue_id, &reset_value, 0); out_success: if (qidconf_map_updated) close(xsk->qidconf_map_fd); if (xsks_map_updated) close(xsk->xsks_map_fd); out_map_ids: free(map_ids); return err; } static int xsk_setup_xdp_prog(struct xsk_socket *xsk) { bool prog_attached = false; __u32 prog_id = 0; int err; err = bpf_get_link_xdp_id(xsk->ifindex, &prog_id, xsk->config.xdp_flags); if (err) return err; if (!prog_id) { prog_attached = true; err = xsk_create_bpf_maps(xsk); if (err) return err; err = xsk_load_xdp_prog(xsk); if (err) goto out_maps; } else { xsk->prog_fd = bpf_prog_get_fd_by_id(prog_id); } err = xsk_update_bpf_maps(xsk, true, xsk->fd); if (err) goto out_load; return 0; out_load: if (prog_attached) close(xsk->prog_fd); out_maps: if (prog_attached) xsk_delete_bpf_maps(xsk); return err; } int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, __u32 queue_id, struct xsk_umem *umem, struct xsk_ring_cons *rx, struct xsk_ring_prod *tx, const struct xsk_socket_config *usr_config) { struct sockaddr_xdp sxdp = {}; struct xdp_mmap_offsets off; struct xsk_socket *xsk; socklen_t optlen; void *map; int err; if (!umem || !xsk_ptr || !rx || !tx) return -EFAULT; if (umem->refcount) { pr_warning("Error: shared umems not supported by libbpf.\n"); return -EBUSY; } xsk = calloc(1, sizeof(*xsk)); if (!xsk) return -ENOMEM; if (umem->refcount++ > 0) { xsk->fd = socket(AF_XDP, SOCK_RAW, 0); if (xsk->fd < 0) { err = -errno; goto out_xsk_alloc; } } else { xsk->fd = umem->fd; } xsk->outstanding_tx = 0; xsk->queue_id = queue_id; xsk->umem = umem; xsk->ifindex = if_nametoindex(ifname); if (!xsk->ifindex) { err = -errno; goto out_socket; } strncpy(xsk->ifname, ifname, IFNAMSIZ); err = xsk_set_xdp_socket_config(&xsk->config, usr_config); if (err) goto out_socket; if (rx) { err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING, &xsk->config.rx_size, sizeof(xsk->config.rx_size)); if (err) { err = -errno; goto out_socket; } } if (tx) { err = setsockopt(xsk->fd, SOL_XDP, XDP_TX_RING, &xsk->config.tx_size, sizeof(xsk->config.tx_size)); if (err) { err = -errno; goto out_socket; } } optlen = sizeof(off); err = getsockopt(xsk->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen); if (err) { err = -errno; goto out_socket; } if (rx) { map = xsk_mmap(NULL, off.rx.desc + xsk->config.rx_size * sizeof(struct xdp_desc), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, xsk->fd, XDP_PGOFF_RX_RING); if (map == MAP_FAILED) { err = -errno; goto out_socket; } rx->mask = xsk->config.rx_size - 1; rx->size = xsk->config.rx_size; rx->producer = map + off.rx.producer; rx->consumer = map + off.rx.consumer; rx->ring = map + off.rx.desc; } xsk->rx = rx; if (tx) { map = xsk_mmap(NULL, off.tx.desc + xsk->config.tx_size * sizeof(struct xdp_desc), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, xsk->fd, XDP_PGOFF_TX_RING); if (map == MAP_FAILED) { err = -errno; goto out_mmap_rx; } tx->mask = xsk->config.tx_size - 1; tx->size = xsk->config.tx_size; tx->producer = map + off.tx.producer; tx->consumer = map + off.tx.consumer; tx->ring = map + off.tx.desc; tx->cached_cons = xsk->config.tx_size; } xsk->tx = tx; sxdp.sxdp_family = PF_XDP; sxdp.sxdp_ifindex = xsk->ifindex; sxdp.sxdp_queue_id = xsk->queue_id; sxdp.sxdp_flags = xsk->config.bind_flags; err = bind(xsk->fd, (struct sockaddr *)&sxdp, sizeof(sxdp)); if (err) { err = -errno; goto out_mmap_tx; } if (!(xsk->config.libbpf_flags & XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)) { err = xsk_setup_xdp_prog(xsk); if (err) goto out_mmap_tx; } *xsk_ptr = xsk; return 0; out_mmap_tx: if (tx) munmap(xsk->tx, off.tx.desc + xsk->config.tx_size * sizeof(struct xdp_desc)); out_mmap_rx: if (rx) munmap(xsk->rx, off.rx.desc + xsk->config.rx_size * sizeof(struct xdp_desc)); out_socket: if (--umem->refcount) close(xsk->fd); out_xsk_alloc: free(xsk); return err; } int xsk_umem__delete(struct xsk_umem *umem) { struct xdp_mmap_offsets off; socklen_t optlen; int err; if (!umem) return 0; if (umem->refcount) return -EBUSY; optlen = sizeof(off); err = getsockopt(umem->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen); if (!err) { munmap(umem->fill->ring, off.fr.desc + umem->config.fill_size * sizeof(__u64)); munmap(umem->comp->ring, off.cr.desc + umem->config.comp_size * sizeof(__u64)); } close(umem->fd); free(umem); return 0; } void xsk_socket__delete(struct xsk_socket *xsk) { struct xdp_mmap_offsets off; socklen_t optlen; int err; if (!xsk) return; (void)xsk_update_bpf_maps(xsk, 0, 0); optlen = sizeof(off); err = getsockopt(xsk->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen); if (!err) { if (xsk->rx) munmap(xsk->rx->ring, off.rx.desc + xsk->config.rx_size * sizeof(struct xdp_desc)); if (xsk->tx) munmap(xsk->tx->ring, off.tx.desc + xsk->config.tx_size * sizeof(struct xdp_desc)); } xsk->umem->refcount--; /* Do not close an fd that also has an associated umem connected * to it. */ if (xsk->fd != xsk->umem->fd) close(xsk->fd); free(xsk); } dwarves-dfsg-1.15/lib/bpf/src/xsk.h000066400000000000000000000115401344730411300171170ustar00rootroot00000000000000/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ /* * AF_XDP user-space access library. * * Copyright(c) 2018 - 2019 Intel Corporation. * * Author(s): Magnus Karlsson */ #ifndef __LIBBPF_XSK_H #define __LIBBPF_XSK_H #include #include #include #include "libbpf.h" #ifdef __cplusplus extern "C" { #endif /* Do not access these members directly. Use the functions below. */ #define DEFINE_XSK_RING(name) \ struct name { \ __u32 cached_prod; \ __u32 cached_cons; \ __u32 mask; \ __u32 size; \ __u32 *producer; \ __u32 *consumer; \ void *ring; \ } DEFINE_XSK_RING(xsk_ring_prod); DEFINE_XSK_RING(xsk_ring_cons); struct xsk_umem; struct xsk_socket; static inline __u64 *xsk_ring_prod__fill_addr(struct xsk_ring_prod *fill, __u32 idx) { __u64 *addrs = (__u64 *)fill->ring; return &addrs[idx & fill->mask]; } static inline const __u64 * xsk_ring_cons__comp_addr(const struct xsk_ring_cons *comp, __u32 idx) { const __u64 *addrs = (const __u64 *)comp->ring; return &addrs[idx & comp->mask]; } static inline struct xdp_desc *xsk_ring_prod__tx_desc(struct xsk_ring_prod *tx, __u32 idx) { struct xdp_desc *descs = (struct xdp_desc *)tx->ring; return &descs[idx & tx->mask]; } static inline const struct xdp_desc * xsk_ring_cons__rx_desc(const struct xsk_ring_cons *rx, __u32 idx) { const struct xdp_desc *descs = (const struct xdp_desc *)rx->ring; return &descs[idx & rx->mask]; } static inline __u32 xsk_prod_nb_free(struct xsk_ring_prod *r, __u32 nb) { __u32 free_entries = r->cached_cons - r->cached_prod; if (free_entries >= nb) return free_entries; /* Refresh the local tail pointer. * cached_cons is r->size bigger than the real consumer pointer so * that this addition can be avoided in the more frequently * executed code that computs free_entries in the beginning of * this function. Without this optimization it whould have been * free_entries = r->cached_prod - r->cached_cons + r->size. */ r->cached_cons = *r->consumer + r->size; return r->cached_cons - r->cached_prod; } static inline __u32 xsk_cons_nb_avail(struct xsk_ring_cons *r, __u32 nb) { __u32 entries = r->cached_prod - r->cached_cons; if (entries == 0) { r->cached_prod = *r->producer; entries = r->cached_prod - r->cached_cons; } return (entries > nb) ? nb : entries; } static inline size_t xsk_ring_prod__reserve(struct xsk_ring_prod *prod, size_t nb, __u32 *idx) { if (unlikely(xsk_prod_nb_free(prod, nb) < nb)) return 0; *idx = prod->cached_prod; prod->cached_prod += nb; return nb; } static inline void xsk_ring_prod__submit(struct xsk_ring_prod *prod, size_t nb) { /* Make sure everything has been written to the ring before signalling * this to the kernel. */ smp_wmb(); *prod->producer += nb; } static inline size_t xsk_ring_cons__peek(struct xsk_ring_cons *cons, size_t nb, __u32 *idx) { size_t entries = xsk_cons_nb_avail(cons, nb); if (likely(entries > 0)) { /* Make sure we do not speculatively read the data before * we have received the packet buffers from the ring. */ smp_rmb(); *idx = cons->cached_cons; cons->cached_cons += entries; } return entries; } static inline void xsk_ring_cons__release(struct xsk_ring_cons *cons, size_t nb) { *cons->consumer += nb; } static inline void *xsk_umem__get_data(void *umem_area, __u64 addr) { return &((char *)umem_area)[addr]; } LIBBPF_API int xsk_umem__fd(const struct xsk_umem *umem); LIBBPF_API int xsk_socket__fd(const struct xsk_socket *xsk); #define XSK_RING_CONS__DEFAULT_NUM_DESCS 2048 #define XSK_RING_PROD__DEFAULT_NUM_DESCS 2048 #define XSK_UMEM__DEFAULT_FRAME_SHIFT 11 /* 2048 bytes */ #define XSK_UMEM__DEFAULT_FRAME_SIZE (1 << XSK_UMEM__DEFAULT_FRAME_SHIFT) #define XSK_UMEM__DEFAULT_FRAME_HEADROOM 0 struct xsk_umem_config { __u32 fill_size; __u32 comp_size; __u32 frame_size; __u32 frame_headroom; }; /* Flags for the libbpf_flags field. */ #define XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD (1 << 0) struct xsk_socket_config { __u32 rx_size; __u32 tx_size; __u32 libbpf_flags; __u32 xdp_flags; __u16 bind_flags; }; /* Set config to NULL to get the default configuration. */ LIBBPF_API int xsk_umem__create(struct xsk_umem **umem, void *umem_area, __u64 size, struct xsk_ring_prod *fill, struct xsk_ring_cons *comp, const struct xsk_umem_config *config); LIBBPF_API int xsk_socket__create(struct xsk_socket **xsk, const char *ifname, __u32 queue_id, struct xsk_umem *umem, struct xsk_ring_cons *rx, struct xsk_ring_prod *tx, const struct xsk_socket_config *config); /* Returns 0 for success and -EBUSY if the umem is still in use. */ LIBBPF_API int xsk_umem__delete(struct xsk_umem *umem); LIBBPF_API void xsk_socket__delete(struct xsk_socket *xsk); #ifdef __cplusplus } /* extern "C" */ #endif #endif /* __LIBBPF_XSK_H */