pax_global_header00006660000000000000000000000064140444741030014513gustar00rootroot0000000000000052 comment=61faf031a0309356093bc242f230ee2798abe12e gobpf-0.2.0/000077500000000000000000000000001404447410300126075ustar00rootroot00000000000000gobpf-0.2.0/.github/000077500000000000000000000000001404447410300141475ustar00rootroot00000000000000gobpf-0.2.0/.github/workflows/000077500000000000000000000000001404447410300162045ustar00rootroot00000000000000gobpf-0.2.0/.github/workflows/ci.yml000066400000000000000000000024561404447410300173310ustar00rootroot00000000000000name: CI on: [push, pull_request] jobs: run-tests: runs-on: ubuntu-20.04 steps: - name: Checkout code uses: actions/checkout@v2 - name: Setup Go env uses: actions/setup-go@v2 - name: Build bcc run: | set -x sudo apt-get update # Use release 9 of llvm etc. - later versions have an unfixed # bug on Ubuntu: # https://github.com/iovisor/bcc/issues/2915 sudo apt-get -y install bison build-essential cmake flex git libelf-dev libfl-dev libedit-dev libllvm9 llvm-9-dev libclang-9-dev python zlib1g-dev pushd /tmp git clone --depth 1 --branch v0.19.0 https://github.com/iovisor/bcc.git mkdir -p bcc/build; cd bcc/build # Symlink /usr/lib/llvm to avoid "Unable to find clang libraries" # The directory appears only to be created when installing the # virtual llvm-dev package. # https://github.com/iovisor/bcc/issues/492 sudo ln -s /usr/lib/llvm-9 /usr/local/llvm cmake .. make sudo make install popd - name: Print system info run: | cat /etc/os-release uname -a - name: Run integration tests run: | sudo go test -tags integration -v ./... gobpf-0.2.0/CONTRIBUTING.md000066400000000000000000000015541404447410300150450ustar00rootroot00000000000000Please consider the following points for your pull requests: * We aim to follow https://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html for commit messages, please try to write and format yours accordingly. * Try to put relevant information for a change into the commit message. If your commit consists of multiple commits, it's OK to refer to the individual commits for context (i.e. no need to copy all information into the PR body). * Prefix the subject line of your commit with the corresponding module (`bcc` or `elf`) if sensible. * Don't mix different changes in a single commit (for example, a bug fix should not be mixed with a new feature). * Rebase your branch to keep it up-to-date, don't merge master. * Rebase your commits to keep the history clean and consistent, we don't merge "fixups" (for example a commit "Fixes from review"). gobpf-0.2.0/COPYRIGHT.txt000066400000000000000000000010701404447410300147160ustar00rootroot00000000000000Copyright 2016 PLUMgrid Copyright 2016 Kinvolk Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. gobpf-0.2.0/Documentation/000077500000000000000000000000001404447410300154205ustar00rootroot00000000000000gobpf-0.2.0/Documentation/pinning.md000066400000000000000000000047311404447410300174110ustar00rootroot00000000000000# Object pinning BPF has a persistent view of maps and programs under its own filesystem `/sys/fs/bpf`. Users are able to make each object visible under the bpffs. We call it `object pinning`. This is done by calling syscall `bpf(2)` with a command `BPF_OBJ_PIN`. After doing that, users are able to use the object with commands such as `BPF_OBJ_GET`, or remove the object with an ordinary VFS syscall `unlink(2)`. Doing that, we can make maps and programs stay alive across process terminations. This mechanism provides a much more consistent way of sharing objects with other processes, compared to other solutions such as `tc`, where objects are shared via Unix domain sockets. ## Different pinning options `C.bpf_map_def.pinning` (defined in [bpf.h](https://github.com/iovisor/gobpf/blob/446e57e0e24e/elf/include/bpf.h#L616)) can be set to one the following pinning options. * `PIN_NONE` : object is not pinned * `PIN_OBJECT_NS` : pinning that is local to an object (to-be-implemented) * `PIN_GLOBAL_NS` : pinning with a global namespace under e.g. `/sys/fs/bpf/ns1/globals` * `PIN_CUSTOM_NS` : pinning with a custom path given as section parameter ### Pinning with `PIN_CUSTOM_NS` When loading a module with `C.bpf_map_def.pinning` set to `PIN_CUSTOM_NS`, an additional path must be set in the `elf.SectionParams.PinPath` parameter to `Load()`. For example: (C source file for an ELF object) ``` struct bpf_map_def SEC("maps/dummy_array_custom") dummy_array_custom = { .type = BPF_MAP_TYPE_ARRAY, .key_size = sizeof(int), .value_size = sizeof(unsigned int), .max_entries = 1024, .pinning = PIN_CUSTOM_NS, }; ``` (Go source file that actually uses the ELF object) ``` b := elf.NewModule(customELFFileName) var secParams = map[string]elf.SectionParams{ "maps/dummy_array_custom": elf.SectionParams{ PinPath: "ns1/test1", }, } if err := b.Load(secParams); err != nil { fmt.Println(err) } ``` Then you can check if the object is pinned like below: ``` $ ls -l /sys/fs/bpf/ns1/test1 ``` ### Unpinning with `PIN_CUSTOM_NS` To unpin a custom pinned map, we need an additional path `elf.CloseOptions.PinPath` as parameter to `CloseExt()`. For example: ``` var closeOptions = map[string]elf.CloseOptions{ "maps/dummy_array_custom": elf.CloseOptions{ Unpin: true, PinPath: "ns1/test1", }, } if err := b.CloseExt(closeOptions); err != nil { fmt.Println(err) } ``` Or you can also remove the file just like below: ``` os.Remove("/sys/fs/bpf/ns1/test1") ``` gobpf-0.2.0/LICENSE-bpf.txt000066400000000000000000000456441404447410300152140ustar00rootroot00000000000000The file at /elf/include/bpf.h is a copy of the Linux kernel file /include/uapi/linux/bpf.h, retrieved from version 4.17, Git commit 36f9814, available at https://raw.githubusercontent.com/torvalds/linux/36f9814a494a874d5a0f44843544b4b2539022db/include/uapi/linux/bpf.h. It is provided here in unmodified source code form. As indicated in the file, it is licensed under the GNU General Public License, version 2.0, with the Linux-syscall-note. Copies of the Linux-syscall-note and GPL-2.0 license text are included below. gobpf has included this header file in this repository with the intention of using it solely in the manner described in the Linux-syscall-note. = = = = = Linux-syscall-note: NOTE! This copyright does *not* cover user programs that use kernel services by normal system calls - this is merely considered normal use of the kernel, and does *not* fall under the heading of "derived work". Also note that the GPL below is copyrighted by the Free Software Foundation, but the instance of code that it refers to (the Linux kernel) is copyrighted by me and others who actually wrote it. Also note that the only valid version of the GPL as far as the kernel is concerned is _this_ particular version of the license (ie v2, not v2.2 or v3.x or whatever), unless explicitly otherwise stated. Linus Torvalds = = = = = GPL-2.0: GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc. 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License. gobpf-0.2.0/LICENSE.txt000066400000000000000000000261351404447410300144410ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. gobpf-0.2.0/README.md000066400000000000000000000040431404447410300140670ustar00rootroot00000000000000# gobpf [![GoDoc](https://godoc.org/github.com/golang/gddo?status.svg)](http://godoc.org/github.com/iovisor/gobpf) [![CI](https://github.com/iovisor/gobpf/actions/workflows/ci.yml/badge.svg)](https://github.com/iovisor/gobpf/actions/workflows/ci.yml) This repository provides go bindings for the [bcc framework](https://github.com/iovisor/bcc) as well as low-level routines to load and use eBPF programs from .elf files. Input and contributions are very welcome. We recommend vendoring gobpf and pinning its version as the API is regularly changing following bcc and Linux updates and releases. ## Requirements eBPF requires a recent Linux kernel. A good feature list can be found here: https://github.com/iovisor/bcc/blob/master/docs/kernel-versions.md ### `github.com/iovisor/gobpf/bcc` Install the latest released version of [libbcc](https://github.com/iovisor/bcc/blob/master/INSTALL.md) (either using a package manager or by building from source). ### `github.com/iovisor/gobpf/elf` #### Building ELF Object Files To build ELF object files for use with the elf package, you must use specific sections (`SEC("...")`). The following are currently supported: * `kprobe/...` * `cgroup/skb` * `cgroup/sock` * `maps/...` * `socket...` * `tracepoint...` * `uprobe/...` * `uretprobe/...` * `xdp/...` Map definitions must correspond to `bpf_map_def` from [the elf package](https://github.com/iovisor/gobpf/blob/master/elf/include/bpf_map.h). Otherwise, you will encounter an error like `only one map with size 280 bytes allowed per section (check bpf_map_def)`. The [Cilium](https://github.com/cilium/cilium) BPF docs contain helpful info for using clang/LLVM to compile programs into elf object files: https://cilium.readthedocs.io/en/latest/bpf/#llvm See `tests/dummy.c` for a minimal dummy and https://github.com/weaveworks/tcptracer-bpf for a real world example. ## Examples Sample code can be found in the `examples/` directory. Examples can be run as follows: ``` sudo -E go run examples/bcc/perf/perf.go ``` ## Tests ``` go test -tags integration -v ./... ``` gobpf-0.2.0/bcc/000077500000000000000000000000001404447410300133365ustar00rootroot00000000000000gobpf-0.2.0/bcc/module.go000066400000000000000000000405251404447410300151600ustar00rootroot00000000000000// Copyright 2016 PLUMgrid // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bcc import ( "fmt" "regexp" "runtime" "strings" "sync" "syscall" "unsafe" "github.com/iovisor/gobpf/pkg/cpuonline" ) /* #cgo CFLAGS: -I/usr/include/bcc/compat #cgo LDFLAGS: -lbcc #include #include */ import "C" // Module type type Module struct { p unsafe.Pointer funcs map[string]int kprobes map[string]int uprobes map[string]int tracepoints map[string]int rawTracepoints map[string]int perfEvents map[string][]int } type compileRequest struct { code string cflags []string rspCh chan *Module } const ( BPF_PROBE_ENTRY = iota BPF_PROBE_RETURN ) const ( XDP_FLAGS_UPDATE_IF_NOEXIST = uint32(1) << iota XDP_FLAGS_SKB_MODE XDP_FLAGS_DRV_MODE XDP_FLAGS_HW_MODE XDP_FLAGS_MODES = XDP_FLAGS_SKB_MODE | XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE XDP_FLAGS_MASK = XDP_FLAGS_UPDATE_IF_NOEXIST | XDP_FLAGS_MODES ) var ( defaultCflags []string compileCh chan compileRequest bpfInitOnce sync.Once ) func bpfInit() { defaultCflags = []string{ fmt.Sprintf("-DNUMCPUS=%d", runtime.NumCPU()), } compileCh = make(chan compileRequest) go compile() } // NewModule constructor func newModule(code string, cflags []string) *Module { cflagsC := make([]*C.char, len(defaultCflags)+len(cflags)) defer func() { for _, cflag := range cflagsC { C.free(unsafe.Pointer(cflag)) } }() for i, cflag := range cflags { cflagsC[i] = C.CString(cflag) } for i, cflag := range defaultCflags { cflagsC[len(cflags)+i] = C.CString(cflag) } cs := C.CString(code) defer C.free(unsafe.Pointer(cs)) c := C.bpf_module_create_c_from_string(cs, 2, (**C.char)(&cflagsC[0]), C.int(len(cflagsC)), (C.bool)(true), nil) if c == nil { return nil } return &Module{ p: c, funcs: make(map[string]int), kprobes: make(map[string]int), uprobes: make(map[string]int), tracepoints: make(map[string]int), rawTracepoints: make(map[string]int), perfEvents: make(map[string][]int), } } // NewModule asynchronously compiles the code, generates a new BPF // module and returns it. func NewModule(code string, cflags []string) *Module { bpfInitOnce.Do(bpfInit) ch := make(chan *Module) compileCh <- compileRequest{code, cflags, ch} return <-ch } func compile() { for { req := <-compileCh req.rspCh <- newModule(req.code, req.cflags) } } // Close takes care of closing all kprobes opened by this modules and // destroys the underlying libbpf module. func (bpf *Module) Close() { C.bpf_module_destroy(bpf.p) for k, v := range bpf.kprobes { C.bpf_close_perf_event_fd((C.int)(v)) evNameCS := C.CString(k) C.bpf_detach_kprobe(evNameCS) C.free(unsafe.Pointer(evNameCS)) } for k, v := range bpf.uprobes { C.bpf_close_perf_event_fd((C.int)(v)) evNameCS := C.CString(k) C.bpf_detach_uprobe(evNameCS) C.free(unsafe.Pointer(evNameCS)) } for k, v := range bpf.tracepoints { C.bpf_close_perf_event_fd((C.int)(v)) parts := strings.SplitN(k, ":", 2) tpCategoryCS := C.CString(parts[0]) tpNameCS := C.CString(parts[1]) C.bpf_detach_tracepoint(tpCategoryCS, tpNameCS) C.free(unsafe.Pointer(tpCategoryCS)) C.free(unsafe.Pointer(tpNameCS)) } for _, vs := range bpf.perfEvents { for _, v := range vs { C.bpf_close_perf_event_fd((C.int)(v)) } } for _, fd := range bpf.funcs { syscall.Close(fd) } } // GetProgramTag returns a tag for ebpf program under passed fd func (bpf *Module) GetProgramTag(fd int) (tag uint64, err error) { _, err = C.bpf_prog_get_tag(C.int(fd), (*C.ulonglong)(unsafe.Pointer(&tag))) return tag, err } // LoadNet loads a program of type BPF_PROG_TYPE_SCHED_ACT. func (bpf *Module) LoadNet(name string) (int, error) { return bpf.Load(name, C.BPF_PROG_TYPE_SCHED_ACT, 0, 0) } // LoadKprobe loads a program of type BPF_PROG_TYPE_KPROBE. func (bpf *Module) LoadKprobe(name string) (int, error) { return bpf.Load(name, C.BPF_PROG_TYPE_KPROBE, 0, 0) } // LoadTracepoint loads a program of type BPF_PROG_TYPE_TRACEPOINT func (bpf *Module) LoadTracepoint(name string) (int, error) { return bpf.Load(name, C.BPF_PROG_TYPE_TRACEPOINT, 0, 0) } // LoadRawTracepoint loads a program of type BPF_PROG_TYPE_RAW_TRACEPOINT func (bpf *Module) LoadRawTracepoint(name string) (int, error) { return bpf.Load(name, C.BPF_PROG_TYPE_RAW_TRACEPOINT, 0, 0) } // LoadPerfEvent loads a program of type BPF_PROG_TYPE_PERF_EVENT func (bpf *Module) LoadPerfEvent(name string) (int, error) { return bpf.Load(name, C.BPF_PROG_TYPE_PERF_EVENT, 0, 0) } // LoadUprobe loads a program of type BPF_PROG_TYPE_KPROBE. func (bpf *Module) LoadUprobe(name string) (int, error) { return bpf.Load(name, C.BPF_PROG_TYPE_KPROBE, 0, 0) } // Load a program. func (bpf *Module) Load(name string, progType int, logLevel, logSize uint) (int, error) { fd, ok := bpf.funcs[name] if ok { return fd, nil } fd, err := bpf.load(name, progType, logLevel, logSize) if err != nil { return -1, err } bpf.funcs[name] = fd return fd, nil } func (bpf *Module) load(name string, progType int, logLevel, logSize uint) (int, error) { nameCS := C.CString(name) defer C.free(unsafe.Pointer(nameCS)) start := (*C.struct_bpf_insn)(C.bpf_function_start(bpf.p, nameCS)) size := C.int(C.bpf_function_size(bpf.p, nameCS)) license := C.bpf_module_license(bpf.p) version := C.bpf_module_kern_version(bpf.p) if start == nil { return -1, fmt.Errorf("Module: unable to find %s", name) } var logBuf []byte var logBufP *C.char if logSize > 0 { logBuf = make([]byte, logSize) logBufP = (*C.char)(unsafe.Pointer(&logBuf[0])) } fd, err := C.bcc_func_load(bpf.p, C.int(uint32(progType)), nameCS, start, size, license, version, C.int(logLevel), logBufP, C.uint(len(logBuf)), nil) if fd < 0 { return -1, fmt.Errorf("error loading BPF program: %v", err) } return int(fd), nil } var kprobeRegexp = regexp.MustCompile("[+.]") var uprobeRegexp = regexp.MustCompile("[^a-zA-Z0-9_]") func (bpf *Module) attachProbe(evName string, attachType uint32, fnName string, fd int, maxActive int) error { if _, ok := bpf.kprobes[evName]; ok { return nil } evNameCS := C.CString(evName) fnNameCS := C.CString(fnName) res, err := C.bpf_attach_kprobe(C.int(fd), attachType, evNameCS, fnNameCS, (C.uint64_t)(0), C.int(maxActive)) C.free(unsafe.Pointer(evNameCS)) C.free(unsafe.Pointer(fnNameCS)) if res < 0 { return fmt.Errorf("failed to attach BPF kprobe: %v", err) } bpf.kprobes[evName] = int(res) return nil } func (bpf *Module) attachUProbe(evName string, attachType uint32, path string, addr uint64, fd, pid int) error { evNameCS := C.CString(evName) binaryPathCS := C.CString(path) res, err := C.bpf_attach_uprobe(C.int(fd), attachType, evNameCS, binaryPathCS, (C.uint64_t)(addr), (C.pid_t)(pid), 0) C.free(unsafe.Pointer(evNameCS)) C.free(unsafe.Pointer(binaryPathCS)) if res < 0 { return fmt.Errorf("failed to attach BPF uprobe: %v", err) } bpf.uprobes[evName] = int(res) return nil } // AttachKprobe attaches a kprobe fd to a function. func (bpf *Module) AttachKprobe(fnName string, fd int, maxActive int) error { evName := "p_" + kprobeRegexp.ReplaceAllString(fnName, "_") return bpf.attachProbe(evName, BPF_PROBE_ENTRY, fnName, fd, maxActive) } // AttachKretprobe attaches a kretprobe fd to a function. func (bpf *Module) AttachKretprobe(fnName string, fd int, maxActive int) error { evName := "r_" + kprobeRegexp.ReplaceAllString(fnName, "_") return bpf.attachProbe(evName, BPF_PROBE_RETURN, fnName, fd, maxActive) } // AttachTracepoint attaches a tracepoint fd to a function // The 'name' argument is in the format 'category:name' func (bpf *Module) AttachTracepoint(name string, fd int) error { if _, ok := bpf.tracepoints[name]; ok { return nil } parts := strings.SplitN(name, ":", 2) if len(parts) < 2 { return fmt.Errorf("failed to parse tracepoint name, expected %q, got %q", "category:name", name) } tpCategoryCS := C.CString(parts[0]) tpNameCS := C.CString(parts[1]) res, err := C.bpf_attach_tracepoint(C.int(fd), tpCategoryCS, tpNameCS) C.free(unsafe.Pointer(tpCategoryCS)) C.free(unsafe.Pointer(tpNameCS)) if res < 0 { return fmt.Errorf("failed to attach BPF tracepoint: %v", err) } bpf.tracepoints[name] = int(res) return nil } // AttachRawTracepoint attaches a raw tracepoint fd to a function // The 'name' argument is in the format 'name', there is no category func (bpf *Module) AttachRawTracepoint(name string, fd int) error { if _, ok := bpf.rawTracepoints[name]; ok { return nil } tpNameCS := C.CString(name) res, err := C.bpf_attach_raw_tracepoint(C.int(fd), tpNameCS) C.free(unsafe.Pointer(tpNameCS)) if res < 0 { return fmt.Errorf("failed to attach BPF tracepoint: %v", err) } bpf.rawTracepoints[name] = int(res) return nil } // AttachPerfEvent attaches a perf event fd to a function // Argument 'evType' is a member of 'perf_type_id' enum in the kernel // header 'include/uapi/linux/perf_event.h'. Argument 'evConfig' // is one of PERF_COUNT_* constants in the same file. func (bpf *Module) AttachPerfEvent(evType, evConfig int, samplePeriod int, sampleFreq int, pid, cpu, groupFd, fd int) error { key := fmt.Sprintf("%d:%d", evType, evConfig) if _, ok := bpf.perfEvents[key]; ok { return nil } res := []int{} if cpu > 0 { r, err := C.bpf_attach_perf_event(C.int(fd), C.uint32_t(evType), C.uint32_t(evConfig), C.uint64_t(samplePeriod), C.uint64_t(sampleFreq), C.pid_t(pid), C.int(cpu), C.int(groupFd)) if r < 0 { return fmt.Errorf("failed to attach BPF perf event: %v", err) } res = append(res, int(r)) } else { cpus, err := cpuonline.Get() if err != nil { return fmt.Errorf("failed to determine online cpus: %v", err) } for _, i := range cpus { r, err := C.bpf_attach_perf_event(C.int(fd), C.uint32_t(evType), C.uint32_t(evConfig), C.uint64_t(samplePeriod), C.uint64_t(sampleFreq), C.pid_t(pid), C.int(i), C.int(groupFd)) if r < 0 { return fmt.Errorf("failed to attach BPF perf event: %v", err) } res = append(res, int(r)) } } bpf.perfEvents[key] = res return nil } // AttachUprobe attaches a uprobe fd to the symbol in the library or binary 'name' // The 'name' argument can be given as either a full library path (/usr/lib/..), // a library without the lib prefix, or as a binary with full path (/bin/bash) // A pid can be given to attach to, or -1 to attach to all processes // // Presently attempts to trace processes running in a different namespace // to the tracer will fail due to limitations around namespace-switching // in multi-threaded programs (such as Go programs) func (bpf *Module) AttachUprobe(name, symbol string, fd, pid int) error { path, addr, err := resolveSymbolPath(name, symbol, 0x0, pid) if err != nil { return err } evName := fmt.Sprintf("p_%s_0x%x", uprobeRegexp.ReplaceAllString(path, "_"), addr) return bpf.attachUProbe(evName, BPF_PROBE_ENTRY, path, addr, fd, pid) } // AttachMatchingUprobes attaches a uprobe fd to all symbols in the library or binary // 'name' that match a given pattern. // The 'name' argument can be given as either a full library path (/usr/lib/..), // a library without the lib prefix, or as a binary with full path (/bin/bash) // A pid can be given, or -1 to attach to all processes // // Presently attempts to trace processes running in a different namespace // to the tracer will fail due to limitations around namespace-switching // in multi-threaded programs (such as Go programs) func (bpf *Module) AttachMatchingUprobes(name, match string, fd, pid int) error { symbols, err := matchUserSymbols(name, match) if err != nil { return fmt.Errorf("unable to match symbols: %s", err) } if len(symbols) == 0 { return fmt.Errorf("no symbols matching %s for %s found", match, name) } for _, symbol := range symbols { if err := bpf.AttachUprobe(name, symbol.name, fd, pid); err != nil { return err } } return nil } // AttachUretprobe attaches a uretprobe fd to the symbol in the library or binary 'name' // The 'name' argument can be given as either a full library path (/usr/lib/..), // a library without the lib prefix, or as a binary with full path (/bin/bash) // A pid can be given to attach to, or -1 to attach to all processes // // Presently attempts to trace processes running in a different namespace // to the tracer will fail due to limitations around namespace-switching // in multi-threaded programs (such as Go programs) func (bpf *Module) AttachUretprobe(name, symbol string, fd, pid int) error { path, addr, err := resolveSymbolPath(name, symbol, 0x0, pid) if err != nil { return err } evName := fmt.Sprintf("r_%s_0x%x", uprobeRegexp.ReplaceAllString(path, "_"), addr) return bpf.attachUProbe(evName, BPF_PROBE_RETURN, path, addr, fd, pid) } // AttachMatchingUretprobes attaches a uretprobe fd to all symbols in the library or binary // 'name' that match a given pattern. // The 'name' argument can be given as either a full library path (/usr/lib/..), // a library without the lib prefix, or as a binary with full path (/bin/bash) // A pid can be given, or -1 to attach to all processes // // Presently attempts to trace processes running in a different namespace // to the tracer will fail due to limitations around namespace-switching // in multi-threaded programs (such as Go programs) func (bpf *Module) AttachMatchingUretprobes(name, match string, fd, pid int) error { symbols, err := matchUserSymbols(name, match) if err != nil { return fmt.Errorf("unable to match symbols: %s", err) } if len(symbols) == 0 { return fmt.Errorf("no symbols matching %s for %s found", match, name) } for _, symbol := range symbols { if err := bpf.AttachUretprobe(name, symbol.name, fd, pid); err != nil { return err } } return nil } // TableSize returns the number of tables in the module. func (bpf *Module) TableSize() uint64 { size := C.bpf_num_tables(bpf.p) return uint64(size) } // TableId returns the id of a table. func (bpf *Module) TableId(name string) C.size_t { cs := C.CString(name) defer C.free(unsafe.Pointer(cs)) return C.bpf_table_id(bpf.p, cs) } // TableDesc returns a map with table properties (name, fd, ...). func (bpf *Module) TableDesc(id uint64) map[string]interface{} { i := C.size_t(id) return map[string]interface{}{ "name": C.GoString(C.bpf_table_name(bpf.p, i)), "fd": int(C.bpf_table_fd_id(bpf.p, i)), "key_size": uint64(C.bpf_table_key_size_id(bpf.p, i)), "leaf_size": uint64(C.bpf_table_leaf_size_id(bpf.p, i)), "key_desc": C.GoString(C.bpf_table_key_desc_id(bpf.p, i)), "leaf_desc": C.GoString(C.bpf_table_leaf_desc_id(bpf.p, i)), } } // TableIter returns a receveier channel to iterate over entries. func (bpf *Module) TableIter() <-chan map[string]interface{} { ch := make(chan map[string]interface{}) go func() { size := C.bpf_num_tables(bpf.p) for i := C.size_t(0); i < size; i++ { ch <- bpf.TableDesc(uint64(i)) } close(ch) }() return ch } func (bpf *Module) attachXDP(devName string, fd int, flags uint32) error { devNameCS := C.CString(devName) res, err := C.bpf_attach_xdp(devNameCS, C.int(fd), C.uint32_t(flags)) defer C.free(unsafe.Pointer(devNameCS)) if res != 0 || err != nil { return fmt.Errorf("failed to attach BPF xdp to device %v: %v", devName, err) } return nil } // AttachXDP attaches a xdp fd to a device. func (bpf *Module) AttachXDP(devName string, fd int) error { return bpf.attachXDP(devName, fd, 0) } // AttachXDPWithFlags attaches a xdp fd to a device with flags. func (bpf *Module) AttachXDPWithFlags(devName string, fd int, flags uint32) error { return bpf.attachXDP(devName, fd, flags) } // RemoveXDP removes any xdp from this device. func (bpf *Module) RemoveXDP(devName string) error { return bpf.attachXDP(devName, -1, 0) } func GetSyscallFnName(name string) string { return GetSyscallPrefix() + name } var syscallPrefix string func GetSyscallPrefix() string { if syscallPrefix == "" { _, err := bccResolveName("", "__x64_sys_bpf", -1) if err == nil { syscallPrefix = "__x64_sys_" } else { syscallPrefix = "sys_" } } return syscallPrefix } gobpf-0.2.0/bcc/perf.go000066400000000000000000000136501404447410300146260ustar00rootroot00000000000000// Copyright 2016 Kinvolk // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bcc import ( "encoding/binary" "fmt" "sync" "unsafe" "github.com/iovisor/gobpf/pkg/cpuonline" ) /* #cgo CFLAGS: -I/usr/include/bcc/compat #cgo LDFLAGS: -lbcc #include #include #include // perf_reader_raw_cb and perf_reader_lost_cb as defined in bcc libbpf.h // typedef void (*perf_reader_raw_cb)(void *cb_cookie, void *raw, int raw_size); extern void rawCallback(void*, void*, int); // typedef void (*perf_reader_lost_cb)(void *cb_cookie, uint64_t lost); extern void lostCallback(void*, uint64_t); */ import "C" type PerfMap struct { table *Table readers []*C.struct_perf_reader stop chan bool } type callbackData struct { receiverChan chan []byte lostChan chan uint64 } // BPF_PERF_READER_PAGE_CNT is the default page_cnt used per cpu ring buffer const BPF_PERF_READER_PAGE_CNT = 8 var byteOrder binary.ByteOrder var callbackRegister = make(map[uint64]*callbackData) var callbackIndex uint64 var mu sync.Mutex // In lack of binary.HostEndian ... func init() { byteOrder = determineHostByteOrder() } func registerCallback(data *callbackData) uint64 { mu.Lock() defer mu.Unlock() callbackIndex++ for callbackRegister[callbackIndex] != nil { callbackIndex++ } callbackRegister[callbackIndex] = data return callbackIndex } func unregisterCallback(i uint64) { mu.Lock() defer mu.Unlock() delete(callbackRegister, i) } func lookupCallback(i uint64) *callbackData { return callbackRegister[i] } // Gateway function as required with CGO Go >= 1.6 // "If a C-program wants a function pointer, a gateway function has to // be written. This is because we can't take the address of a Go // function and give that to C-code since the cgo tool will generate a // stub in C that should be called." //export rawCallback func rawCallback(cbCookie unsafe.Pointer, raw unsafe.Pointer, rawSize C.int) { callbackData := lookupCallback(uint64(uintptr(cbCookie))) callbackData.receiverChan <- C.GoBytes(raw, rawSize) } //export lostCallback func lostCallback(cbCookie unsafe.Pointer, lost C.uint64_t) { callbackData := lookupCallback(uint64(uintptr(cbCookie))) if callbackData.lostChan != nil { callbackData.lostChan <- uint64(lost) } } // GetHostByteOrder returns the current byte-order. func GetHostByteOrder() binary.ByteOrder { return byteOrder } func determineHostByteOrder() binary.ByteOrder { var i int32 = 0x01020304 u := unsafe.Pointer(&i) pb := (*byte)(u) b := *pb if b == 0x04 { return binary.LittleEndian } return binary.BigEndian } // InitPerfMap initializes a perf map with a receiver channel, with a default page_cnt. func InitPerfMap(table *Table, receiverChan chan []byte, lostChan chan uint64) (*PerfMap, error) { return InitPerfMapWithPageCnt(table, receiverChan, lostChan, BPF_PERF_READER_PAGE_CNT) } // InitPerfMapWithPageCnt initializes a perf map with a receiver channel with a specified page_cnt. func InitPerfMapWithPageCnt(table *Table, receiverChan chan []byte, lostChan chan uint64, pageCnt int) (*PerfMap, error) { fd := table.Config()["fd"].(int) keySize := table.Config()["key_size"].(uint64) leafSize := table.Config()["leaf_size"].(uint64) if keySize != 4 || leafSize != 4 { return nil, fmt.Errorf("passed table has wrong size") } callbackDataIndex := registerCallback(&callbackData{ receiverChan, lostChan, }) key := make([]byte, keySize) leaf := make([]byte, leafSize) keyP := unsafe.Pointer(&key[0]) leafP := unsafe.Pointer(&leaf[0]) readers := []*C.struct_perf_reader{} cpus, err := cpuonline.Get() if err != nil { return nil, fmt.Errorf("failed to determine online cpus: %v", err) } for _, cpu := range cpus { reader, err := bpfOpenPerfBuffer(cpu, callbackDataIndex, pageCnt) if err != nil { return nil, fmt.Errorf("failed to open perf buffer: %v", err) } perfFd := C.perf_reader_fd((*C.struct_perf_reader)(reader)) readers = append(readers, (*C.struct_perf_reader)(reader)) byteOrder.PutUint32(leaf, uint32(perfFd)) r, err := C.bpf_update_elem(C.int(fd), keyP, leafP, 0) if r != 0 { return nil, fmt.Errorf("unable to initialize perf map: %v", err) } r = C.bpf_get_next_key(C.int(fd), keyP, keyP) if r != 0 { break } } return &PerfMap{ table, readers, make(chan bool), }, nil } // Start to poll the perf map reader and send back event data // over the connected channel. func (pm *PerfMap) Start() { go pm.poll(500) } // Stop to poll the perf map readers after a maximum of 500ms // (the timeout we use for perf_reader_poll). Ideally we would // have a way to cancel the poll, but perf_reader_poll doesn't // support that yet. func (pm *PerfMap) Stop() { pm.stop <- true } func (pm *PerfMap) poll(timeout int) { for { select { case <-pm.stop: return default: C.perf_reader_poll(C.int(len(pm.readers)), &pm.readers[0], C.int(timeout)) } } } func bpfOpenPerfBuffer(cpu uint, callbackDataIndex uint64, pageCnt int) (unsafe.Pointer, error) { if (pageCnt & (pageCnt - 1)) != 0 { return nil, fmt.Errorf("pageCnt must be a power of 2: %d", pageCnt) } cpuC := C.int(cpu) pageCntC := C.int(pageCnt) reader, err := C.bpf_open_perf_buffer( (C.perf_reader_raw_cb)(unsafe.Pointer(C.rawCallback)), (C.perf_reader_lost_cb)(unsafe.Pointer(C.lostCallback)), unsafe.Pointer(uintptr(callbackDataIndex)), -1, cpuC, pageCntC) if reader == nil { return nil, fmt.Errorf("failed to open perf buffer: %v", err) } return reader, nil } gobpf-0.2.0/bcc/symbol.go000066400000000000000000000114361404447410300151770ustar00rootroot00000000000000// Copyright 2017 Louis McCormack // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bcc import ( "fmt" "regexp" "sync" "unsafe" ) /* #cgo CFLAGS: -I/usr/include/bcc/compat #cgo LDFLAGS: -lbcc #include #include #include extern void foreach_symbol_callback(char*, uint64_t); */ import "C" type symbolAddress struct { name string addr uint64 } //symbolCache will cache module lookups var symbolCache = struct { cache map[string][]*symbolAddress currentModule string lock *sync.Mutex }{ cache: map[string][]*symbolAddress{}, currentModule: "", lock: &sync.Mutex{}, } type bccSymbol struct { name *C.char demangleName *C.char module *C.char offset C.ulonglong } type bccSymbolOption struct { useDebugFile int checkDebugFileCrc int useSymbolType uint32 } // resolveSymbolPath returns the file and offset to locate symname in module func resolveSymbolPath(module string, symname string, addr uint64, pid int) (string, uint64, error) { if pid == -1 { pid = 0 } modname, offset, err := bccResolveSymname(module, symname, addr, pid) if err != nil { return "", 0, fmt.Errorf("unable to locate symbol %s in module %s: %v", symname, module, err) } return modname, offset, nil } func bccResolveSymname(module string, symname string, addr uint64, pid int) (string, uint64, error) { symbol := &bccSymbol{} symbolC := (*C.struct_bcc_symbol)(unsafe.Pointer(symbol)) moduleCS := C.CString(module) defer C.free(unsafe.Pointer(moduleCS)) symnameCS := C.CString(symname) defer C.free(unsafe.Pointer(symnameCS)) res, err := C.bcc_resolve_symname(moduleCS, symnameCS, (C.uint64_t)(addr), C.int(pid), nil, symbolC) if res < 0 { return "", 0, fmt.Errorf("unable to locate symbol %s in module %s: %v", symname, module, err) } return C.GoString(symbolC.module), (uint64)(symbolC.offset), nil } func bccResolveName(module, symname string, pid int) (uint64, error) { symbol := &bccSymbolOption{} symbolC := (*C.struct_bcc_symbol_option)(unsafe.Pointer(symbol)) pidC := C.int(pid) cache := C.bcc_symcache_new(pidC, symbolC) defer C.bcc_free_symcache(cache, pidC) moduleCS := C.CString(module) defer C.free(unsafe.Pointer(moduleCS)) nameCS := C.CString(symname) defer C.free(unsafe.Pointer(nameCS)) var addr uint64 addrC := C.uint64_t(addr) res := C.bcc_symcache_resolve_name(cache, moduleCS, nameCS, &addrC) if res < 0 { return 0, fmt.Errorf("unable to locate symbol %s in module %s", symname, module) } return addr, nil } // getUserSymbolsAndAddresses finds a list of symbols associated with a module, // along with their addresses. The results are cached in the symbolCache and // returned func getUserSymbolsAndAddresses(module string) ([]*symbolAddress, error) { symbolCache.lock.Lock() defer symbolCache.lock.Unlock() // return previously cached list if it exists if _, ok := symbolCache.cache[module]; ok { return symbolCache.cache[module], nil } symbolCache.cache[module] = []*symbolAddress{} symbolCache.currentModule = module if err := bccForeachSymbol(module); err != nil { return nil, err } return symbolCache.cache[module], nil } func matchUserSymbols(module, match string) ([]*symbolAddress, error) { r, err := regexp.Compile(match) if err != nil { return nil, fmt.Errorf("invalid regex %s : %s", match, err) } matchedSymbols := []*symbolAddress{} symbols, err := getUserSymbolsAndAddresses(module) if err != nil { return nil, err } for _, sym := range symbols { if r.MatchString(sym.name) { matchedSymbols = append(matchedSymbols, sym) } } return matchedSymbols, nil } // foreach_symbol_callback is a gateway function that will be exported to C // so that it can be referenced as a function pointer //export foreach_symbol_callback func foreach_symbol_callback(symname *C.char, addr C.uint64_t) { symbolCache.cache[symbolCache.currentModule] = append(symbolCache.cache[symbolCache.currentModule], &symbolAddress{C.GoString(symname), (uint64)(addr)}) } func bccForeachSymbol(module string) error { moduleCS := C.CString(module) defer C.free(unsafe.Pointer(moduleCS)) res := C.bcc_foreach_function_symbol(moduleCS, (C.SYM_CB)(unsafe.Pointer(C.foreach_symbol_callback))) if res < 0 { return fmt.Errorf("unable to list symbols for %s", module) } return nil } gobpf-0.2.0/bcc/table.go000066400000000000000000000231561404447410300147630ustar00rootroot00000000000000// Copyright 2016 PLUMgrid // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bcc import ( "bytes" "errors" "fmt" "os" "unsafe" "github.com/iovisor/gobpf/pkg/cpupossible" ) /* #cgo CFLAGS: -I/usr/include/bcc/compat #cgo LDFLAGS: -lbcc #include #include #include */ import "C" var errIterationFailed = errors.New("table.Iter: leaf for next key not found") // Table references a BPF table. The zero value cannot be used. type Table struct { id C.size_t module *Module } // New tables returns a refernce to a BPF table. func NewTable(id C.size_t, module *Module) *Table { return &Table{ id: id, module: module, } } // ID returns the table id. func (table *Table) ID() string { return C.GoString(C.bpf_table_name(table.module.p, table.id)) } // Name returns the table name. func (table *Table) Name() string { return C.GoString(C.bpf_table_name(table.module.p, table.id)) } // Config returns the table properties (name, fd, ...). func (table *Table) Config() map[string]interface{} { mod := table.module.p return map[string]interface{}{ "name": C.GoString(C.bpf_table_name(mod, table.id)), "fd": int(C.bpf_table_fd_id(mod, table.id)), "key_size": uint64(C.bpf_table_key_size_id(mod, table.id)), "leaf_size": uint64(C.bpf_table_leaf_size_id(mod, table.id)), "key_desc": C.GoString(C.bpf_table_key_desc_id(mod, table.id)), "leaf_desc": C.GoString(C.bpf_table_leaf_desc_id(mod, table.id)), } } func (table *Table) LeafStrToBytes(leafStr string) ([]byte, error) { mod := table.module.p leafSize := C.bpf_table_leaf_size_id(mod, table.id) leaf := make([]byte, leafSize) leafP := unsafe.Pointer(&leaf[0]) leafCS := C.CString(leafStr) defer C.free(unsafe.Pointer(leafCS)) r := C.bpf_table_leaf_sscanf(mod, table.id, leafCS, leafP) if r != 0 { return nil, fmt.Errorf("error scanning leaf (%v) from string", leafStr) } return leaf, nil } func (table *Table) KeyStrToBytes(keyStr string) ([]byte, error) { mod := table.module.p keySize := C.bpf_table_key_size_id(mod, table.id) key := make([]byte, keySize) keyP := unsafe.Pointer(&key[0]) keyCS := C.CString(keyStr) defer C.free(unsafe.Pointer(keyCS)) r := C.bpf_table_key_sscanf(mod, table.id, keyCS, keyP) if r != 0 { return nil, fmt.Errorf("error scanning key (%v) from string", keyStr) } return key, nil } // KeyBytesToStr returns the given key value formatted using the bcc-table's key string printer. func (table *Table) KeyBytesToStr(key []byte) (string, error) { keySize := len(key) keyP := unsafe.Pointer(&key[0]) keyStr := make([]byte, keySize*8) keyStrP := (*C.char)(unsafe.Pointer(&keyStr[0])) if res := C.bpf_table_key_snprintf(table.module.p, table.id, keyStrP, C.size_t(len(keyStr)), keyP); res != 0 { return "", fmt.Errorf("formatting table-key: %d", res) } return string(keyStr[:bytes.IndexByte(keyStr, 0)]), nil } // LeafBytesToStr returns the given leaf value formatted using the bcc-table's leaf string printer. func (table *Table) LeafBytesToStr(leaf []byte) (string, error) { leafSize := len(leaf) leafP := unsafe.Pointer(&leaf[0]) leafStr := make([]byte, leafSize*8) leafStrP := (*C.char)(unsafe.Pointer(&leafStr[0])) if res := C.bpf_table_leaf_snprintf(table.module.p, table.id, leafStrP, C.size_t(len(leafStr)), leafP); res != 0 { return "", fmt.Errorf("formatting table-leaf: %d", res) } return string(leafStr[:bytes.IndexByte(leafStr, 0)]), nil } // Get takes a key and returns the value or nil, and an 'ok' style indicator. func (table *Table) Get(key []byte) ([]byte, error) { mod := table.module.p fd := C.bpf_table_fd_id(mod, table.id) keyP := unsafe.Pointer(&key[0]) leafSize := C.bpf_table_leaf_size_id(mod, table.id) mapType := C.bpf_table_type_id(mod, table.id) switch mapType { case C.BPF_MAP_TYPE_PERCPU_HASH, C.BPF_MAP_TYPE_PERCPU_ARRAY: cpus, err := cpupossible.Get() if err != nil { return nil, fmt.Errorf("get possible cpus: %w", err) } leafSize *= C.size_t(len(cpus)) } leaf := make([]byte, leafSize) leafP := unsafe.Pointer(&leaf[0]) r, err := C.bpf_lookup_elem(fd, keyP, leafP) if r != 0 { keyStr, errK := table.KeyBytesToStr(key) if errK != nil { keyStr = fmt.Sprintf("%v", key) } return nil, fmt.Errorf("Table.Get: key %v: %v", keyStr, err) } return leaf, nil } // GetP takes a key and returns the value or nil. func (table *Table) GetP(key unsafe.Pointer) (unsafe.Pointer, error) { fd := C.bpf_table_fd_id(table.module.p, table.id) leafSize := C.bpf_table_leaf_size_id(table.module.p, table.id) mapType := C.bpf_table_type_id(table.module.p, table.id) switch mapType { case C.BPF_MAP_TYPE_PERCPU_HASH, C.BPF_MAP_TYPE_PERCPU_ARRAY: cpus, err := cpupossible.Get() if err != nil { return nil, fmt.Errorf("get possible cpus: %w", err) } leafSize *= C.size_t(len(cpus)) } leaf := make([]byte, leafSize) leafP := unsafe.Pointer(&leaf[0]) _, err := C.bpf_lookup_elem(fd, key, leafP) if err != nil { return nil, err } return leafP, nil } // Set a key to a value. func (table *Table) Set(key, leaf []byte) error { fd := C.bpf_table_fd_id(table.module.p, table.id) keyP := unsafe.Pointer(&key[0]) leafP := unsafe.Pointer(&leaf[0]) r, err := C.bpf_update_elem(fd, keyP, leafP, 0) if r != 0 { keyStr, errK := table.KeyBytesToStr(key) if errK != nil { keyStr = fmt.Sprintf("%v", key) } leafStr, errL := table.LeafBytesToStr(leaf) if errL != nil { leafStr = fmt.Sprintf("%v", leaf) } return fmt.Errorf("Table.Set: update %v to %v: %v", keyStr, leafStr, err) } return nil } // SetP a key to a value as unsafe.Pointer. func (table *Table) SetP(key, leaf unsafe.Pointer) error { fd := C.bpf_table_fd_id(table.module.p, table.id) _, err := C.bpf_update_elem(fd, key, leaf, 0) if err != nil { return err } return nil } // Delete a key. func (table *Table) Delete(key []byte) error { fd := C.bpf_table_fd_id(table.module.p, table.id) keyP := unsafe.Pointer(&key[0]) r, err := C.bpf_delete_elem(fd, keyP) if r != 0 { keyStr, errK := table.KeyBytesToStr(key) if errK != nil { keyStr = fmt.Sprintf("%v", key) } return fmt.Errorf("Table.Delete: key %v: %v", keyStr, err) } return nil } // DeleteP a key. func (table *Table) DeleteP(key unsafe.Pointer) error { fd := C.bpf_table_fd_id(table.module.p, table.id) _, err := C.bpf_delete_elem(fd, key) if err != nil { return err } return nil } // DeleteAll deletes all entries from the table func (table *Table) DeleteAll() error { mod := table.module.p fd := C.bpf_table_fd_id(mod, table.id) keySize := C.bpf_table_key_size_id(mod, table.id) key := make([]byte, keySize) keyP := unsafe.Pointer(&key[0]) for res := C.bpf_get_first_key(fd, keyP, keySize); res == 0; res = C.bpf_get_next_key(fd, keyP, keyP) { r, err := C.bpf_delete_elem(fd, keyP) if r != 0 { return fmt.Errorf("Table.DeleteAll: unable to delete element: %v", err) } } return nil } // TableIterator contains the current position for iteration over a *bcc.Table and provides methods for iteration. type TableIterator struct { table *Table fd C.int err error key []byte leaf []byte } // Iter returns an iterator to list all table entries available as raw bytes. func (table *Table) Iter() *TableIterator { fd := C.bpf_table_fd_id(table.module.p, table.id) return &TableIterator{ table: table, fd: fd, } } // Next looks up the next element and return true if one is available. func (it *TableIterator) Next() bool { if it.err != nil { return false } if it.key == nil { keySize := C.bpf_table_key_size_id(it.table.module.p, it.table.id) key := make([]byte, keySize) keyP := unsafe.Pointer(&key[0]) if res, err := C.bpf_get_first_key(it.fd, keyP, keySize); res != 0 { if !os.IsNotExist(err) { it.err = err } return false } leafSize := C.bpf_table_leaf_size_id(it.table.module.p, it.table.id) mapType := C.bpf_table_type_id(it.table.module.p, it.table.id) switch mapType { case C.BPF_MAP_TYPE_PERCPU_HASH, C.BPF_MAP_TYPE_PERCPU_ARRAY: cpus, err := cpupossible.Get() if err != nil { it.err = fmt.Errorf("get possible cpus: %w", err) return false } leafSize *= C.size_t(len(cpus)) } leaf := make([]byte, leafSize) it.key = key it.leaf = leaf } else { keyP := unsafe.Pointer(&it.key[0]) if res, err := C.bpf_get_next_key(it.fd, keyP, keyP); res != 0 { if !os.IsNotExist(err) { it.err = err } return false } } keyP := unsafe.Pointer(&it.key[0]) leafP := unsafe.Pointer(&it.leaf[0]) if res, err := C.bpf_lookup_elem(it.fd, keyP, leafP); res != 0 { it.err = errIterationFailed if !os.IsNotExist(err) { it.err = err } return false } return true } // Key returns the current key value of the iterator, if the most recent call to Next returned true. // The slice is valid only until the next call to Next. func (it *TableIterator) Key() []byte { return it.key } // Leaf returns the current leaf value of the iterator, if the most recent call to Next returned true. // The slice is valid only until the next call to Next. func (it *TableIterator) Leaf() []byte { return it.leaf } // Err returns the last error that ocurred while table.Iter oder iter.Next func (it *TableIterator) Err() error { return it.err } gobpf-0.2.0/bpf.go000066400000000000000000000011211404447410300137000ustar00rootroot00000000000000// Copyright 2016 Kinvolk // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bpf gobpf-0.2.0/bpf_test.go000066400000000000000000000414231404447410300147500ustar00rootroot00000000000000// +build integration // Copyright 2016 PLUMgrid // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bpf import ( "bytes" "encoding/binary" "fmt" "os" "path/filepath" "strconv" "syscall" "testing" "unsafe" "github.com/iovisor/gobpf/bcc" "github.com/iovisor/gobpf/elf" "github.com/iovisor/gobpf/pkg/bpffs" "github.com/iovisor/gobpf/pkg/progtestrun" ) // redefine flags here as cgo in test is not supported const ( BPF_ANY = 0 /* create new element or update existing */ BPF_NOEXIST = 1 /* create new element if it didn't exist */ BPF_EXIST = 2 ) var simple1 string = ` BPF_TABLE("hash", int, int, table1, 10); int func1(void *ctx) { return 0; } ` var simple2 = ` struct key { int key; }; struct leaf { int value; }; BPF_HASH(table2, struct key, struct leaf, 10); int func2(void *ctx) { return 0; } ` type key struct { key uint32 } type leaf struct { value uint32 } var kernelVersion uint32 var ( kernelVersion46 uint32 kernelVersion47 uint32 kernelVersion48 uint32 kernelVersion410 uint32 kernelVersion412 uint32 kernelVersion414 uint32 ) func init() { kernelVersion46, _ = elf.KernelVersionFromReleaseString("4.6.0") kernelVersion47, _ = elf.KernelVersionFromReleaseString("4.7.0") kernelVersion48, _ = elf.KernelVersionFromReleaseString("4.8.0") kernelVersion410, _ = elf.KernelVersionFromReleaseString("4.10.0") kernelVersion412, _ = elf.KernelVersionFromReleaseString("4.12.0") kernelVersion414, _ = elf.KernelVersionFromReleaseString("4.14.0") } func TestModuleLoadBCC(t *testing.T) { b := bcc.NewModule(simple1, []string{}) if b == nil { t.Fatal("prog is nil") } defer b.Close() _, err := b.LoadKprobe("func1") if err != nil { t.Fatal(err) } } func fillTable1(b *bcc.Module) (*bcc.Table, error) { table := bcc.NewTable(b.TableId("table1"), b) key, _ := table.KeyStrToBytes("1") leaf, _ := table.LeafStrToBytes("11") if err := table.Set(key, leaf); err != nil { return nil, fmt.Errorf("table.Set key 1 failed: %v", err) } key, leaf = make([]byte, 4), make([]byte, 4) bcc.GetHostByteOrder().PutUint32(key, 2) bcc.GetHostByteOrder().PutUint32(leaf, 22) if err := table.Set(key, leaf); err != nil { return nil, fmt.Errorf("table.Set key 2 failed: %v", err) } return table, nil } func TestBCCIterTable(t *testing.T) { b := bcc.NewModule(simple1, []string{}) if b == nil { t.Fatal("prog is nil") } defer b.Close() table, err := fillTable1(b) if err != nil { t.Fatalf("fill table1 failed: %v", err) } hostEndian := bcc.GetHostByteOrder() resIter := make(map[int32]int32) iter := table.Iter() for iter.Next() { key, leaf := iter.Key(), iter.Leaf() keyStr, err := table.KeyBytesToStr(key) if err != nil { t.Fatalf("table.Iter/KeyBytesToStr failed: cannot print value: %v", err) } leafStr, err := table.LeafBytesToStr(leaf) if err != nil { t.Fatalf("table.Iter/LeafBytesToStr failed: cannot print value: %v", err) } var k, v int32 if err := binary.Read(bytes.NewBuffer(key), hostEndian, &k); err != nil { t.Fatalf("table.Iter failed: cannot decode key: %v", err) } if err := binary.Read(bytes.NewBuffer(leaf), hostEndian, &v); err != nil { t.Fatalf("table.Iter failed: cannot decode value: %v", err) } resIter[k] = v kS, err := strconv.ParseInt(keyStr[2:], 16, 32) if err != nil { t.Fatalf("table.Iter failed: non-number key: %v", err) } vS, err := strconv.ParseInt(leafStr[2:], 16, 32) if err != nil { t.Fatalf("table.Iter failed: non-number value: %v", err) } if int32(kS) != k || int32(vS) != v { t.Errorf("table.iter.Values() inconsistent with string values: (%v, %v) vs (%v, %v)", k, v, kS, vS) } } if iter.Err() != nil { t.Fatalf("table.Iter failed: iteration finished with unexpected error: %v", iter.Err()) } if count := len(resIter); count != 2 { t.Fatalf("expected 2 entries in Iter table, not %d", count) } for _, te := range [][]int32{{1, 11}, {2, 22}} { res := resIter[te[0]] if res != te[1] { t.Fatalf("expected entry %d in Iter table to contain %d, but got %d", te[0], te[1], res) } } } func TestBCCTableSetPGetPDeleteP(t *testing.T) { b := bcc.NewModule(simple2, []string{}) if b == nil { t.Fatal("prog is nil") } defer b.Close() table := bcc.NewTable(b.TableId("table2"), b) k := &key{0} l := &leaf{1} err := table.SetP(unsafe.Pointer(k), unsafe.Pointer(l)) if err != nil { t.Fatal(err) } p, err := table.GetP(unsafe.Pointer(k)) if err != nil { t.Fatal(err) } v := (*leaf)(p) if v.value != 1 { t.Fatalf("expected 1, not %d", v.value) } err = table.DeleteP(unsafe.Pointer(k)) if err != nil { t.Fatal(err) } _, err = table.GetP(unsafe.Pointer(k)) if !os.IsNotExist(err) { t.Fatal(err) } } func TestBCCTableDeleteAll(t *testing.T) { b := bcc.NewModule(simple1, []string{}) if b == nil { t.Fatal("prog is nil") } defer b.Close() table, err := fillTable1(b) if err != nil { t.Fatalf("fill table1 failed: %v", err) } count := 0 for it := table.Iter(); it.Next(); { count++ } if count != 2 { t.Fatalf("expected 2 entries in table, not %d", count) } if err := table.DeleteAll(); err != nil { t.Fatalf("table.DeleteAll failed: %v", err) } count = 0 for it := table.Iter(); it.Next(); { count++ } if count != 0 { t.Fatalf("expected 0 entries in table, not %d", count) } } func containsMap(maps []*elf.Map, name string) bool { for _, m := range maps { if m.Name == name { return true } } return false } func containsProbe(probes []*elf.Kprobe, name string) bool { for _, k := range probes { if k.Name == name { return true } } return false } func containsUprobe(uprobes []*elf.Uprobe, name string) bool { for _, u := range uprobes { if u.Name == name { return true } } return false } func containsCgroupProg(cgroupProgs []*elf.CgroupProgram, name string) bool { for _, c := range cgroupProgs { if c.Name == name { return true } } return false } func containsTracepointProg(tracepointProgs []*elf.TracepointProgram, name string) bool { for _, c := range tracepointProgs { if c.Name == name { return true } } return false } func containsSocketFilter(socketFilters []*elf.SocketFilter, name string) bool { for _, c := range socketFilters { if c.Name == name { return true } } return false } func checkMaps(t *testing.T, b *elf.Module) { var expectedMaps = []string{ "dummy_hash", "dummy_array", "dummy_prog_array", "dummy_perf", "dummy_array_custom", } if kernelVersion >= kernelVersion46 { kernel46Maps := []string{ "dummy_percpu_hash", "dummy_percpu_array", "dummy_stack_trace", } expectedMaps = append(expectedMaps, kernel46Maps...) } else { t.Logf("kernel doesn't support percpu maps and stacktrace maps. Skipping...") } if kernelVersion >= kernelVersion48 { kernel48Maps := []string{ "dummy_cgroup_array", } expectedMaps = append(expectedMaps, kernel48Maps...) } else { t.Logf("kernel doesn't support cgroup array maps. Skipping...") } var maps []*elf.Map for m := range b.IterMaps() { maps = append(maps, m) } if len(maps) != len(expectedMaps) { t.Fatalf("unexpected number of maps. Got %d, expected %d", len(maps), len(expectedMaps)) } for _, em := range expectedMaps { if !containsMap(maps, em) { t.Fatalf("map %q not found", em) } } } func checkProbes(t *testing.T, b *elf.Module) { var expectedProbes = []string{ "kprobe/dummy", "kretprobe/dummy", } var probes []*elf.Kprobe for p := range b.IterKprobes() { probes = append(probes, p) } if len(probes) != len(expectedProbes) { t.Fatalf("unexpected number of probes. Got %d, expected %d", len(probes), len(expectedProbes)) } for _, ek := range expectedProbes { if !containsProbe(probes, ek) { t.Fatalf("probe %q not found", ek) } } } func checkUprobes(t *testing.T, b *elf.Module) { var expectedUprobes = []string{ "uprobe/dummy", "uretprobe/dummy", } var uprobes []*elf.Uprobe for p := range b.IterUprobes() { uprobes = append(uprobes, p) } if len(uprobes) != len(expectedUprobes) { t.Fatalf("unexpected number of uprobes. Got %d, expected %d", len(uprobes), len(expectedUprobes)) } for _, ek := range expectedUprobes { if !containsUprobe(uprobes, ek) { t.Fatalf("uprobe %q not found", ek) } } } func checkCgroupProgs(t *testing.T, b *elf.Module) { if kernelVersion < kernelVersion410 { t.Logf("kernel doesn't support cgroup-bpf. Skipping...") return } var expectedCgroupProgs = []string{ "cgroup/skb", "cgroup/sock", } var cgroupProgs []*elf.CgroupProgram for p := range b.IterCgroupProgram() { cgroupProgs = append(cgroupProgs, p) } if len(cgroupProgs) != len(expectedCgroupProgs) { t.Fatalf("unexpected number of cgroup programs. Got %d, expected %v", len(cgroupProgs), len(expectedCgroupProgs)) } for _, cp := range expectedCgroupProgs { if !containsCgroupProg(cgroupProgs, cp) { t.Fatalf("cgroup program %q not found", cp) } } } func checkXDPProgs(t *testing.T, b *elf.Module) { if kernelVersion < kernelVersion48 { t.Logf("kernel doesn't support XDP. Skipping...") t.Skip() } var expectedXDPProgs = []string{ "xdp/prog1", "xdp/prog2", } var xdpProgs []*elf.XDPProgram for p := range b.IterXDPProgram() { xdpProgs = append(xdpProgs, p) } if len(xdpProgs) != len(expectedXDPProgs) { t.Fatalf("unexpected number of XDP programs. Got %d, expected %v", len(xdpProgs), len(expectedXDPProgs)) } } func checkTracepointProgs(t *testing.T, b *elf.Module) { if kernelVersion < kernelVersion47 { t.Logf("kernel doesn't support bpf programs for tracepoints. Skipping...") return } var expectedTracepointProgs = []string{ "tracepoint/raw_syscalls/sys_enter", } var tracepointProgs []*elf.TracepointProgram for p := range b.IterTracepointProgram() { tracepointProgs = append(tracepointProgs, p) } if len(tracepointProgs) != len(expectedTracepointProgs) { t.Fatalf("unexpected number of tracepoint programs. Got %d, expected %v", len(tracepointProgs), len(expectedTracepointProgs)) } for _, p := range expectedTracepointProgs { if !containsTracepointProg(tracepointProgs, p) { t.Fatalf("tracepoint program %q not found", p) } } } func checkSocketFilters(t *testing.T, b *elf.Module) { var expectedSocketFilters = []string{ "socket/dummy", } var socketFilters []*elf.SocketFilter for sf := range b.IterSocketFilter() { socketFilters = append(socketFilters, sf) } if len(socketFilters) != len(expectedSocketFilters) { t.Fatalf("unexpected number of socket filters. Got %d, expected %d", len(socketFilters), len(expectedSocketFilters)) } for _, sf := range expectedSocketFilters { if !containsSocketFilter(socketFilters, sf) { t.Fatalf("socket filter %q not found", sf) } } fd, err := syscall.Socket(syscall.AF_PACKET, syscall.SOCK_RAW, syscall.ETH_P_ALL) if err != nil { t.Fatalf("unable to open a raw socket: %s", err) } defer syscall.Close(fd) socketFilter := b.SocketFilter("socket/dummy") if socketFilter == nil { t.Fatal("socket filter dummy not found") } if err := elf.AttachSocketFilter(socketFilter, fd); err != nil { t.Fatalf("failed trying to attach socket filter: %s", err) } if err := elf.DetachSocketFilter(socketFilter, fd); err != nil { t.Fatalf("failed trying to detach socket filter: %s", err) } } func checkPinConfig(t *testing.T, expectedPaths []string) { for _, p := range expectedPaths { if fi, err := os.Stat(p); os.IsNotExist(err) || !fi.Mode().IsRegular() { t.Fatalf("pinned object %q not found", p) } } } func checkPinConfigCleanup(t *testing.T, expectedPaths []string) { for _, p := range expectedPaths { if _, err := os.Stat(p); !os.IsNotExist(err) { t.Fatalf("pinned object %q is not cleaned up", p) } } } func checkUpdateDeleteElement(t *testing.T, b *elf.Module) { mp := b.Map("dummy_hash") if mp == nil { t.Fatal("unable to find dummy_hash map") } key := 1000 value := 1000 if err := b.UpdateElement(mp, unsafe.Pointer(&key), unsafe.Pointer(&value), BPF_ANY); err != nil { t.Fatal("failed trying to update an element with BPF_ANY") } if err := b.UpdateElement(mp, unsafe.Pointer(&key), unsafe.Pointer(&value), BPF_NOEXIST); err == nil { t.Fatal("succeeded updating element with BPF_NOEXIST, but an element with the same key was added to the map before") } if err := b.UpdateElement(mp, unsafe.Pointer(&key), unsafe.Pointer(&value), BPF_EXIST); err != nil { t.Fatal("failed trying to update an element with BPF_EXIST while the key was added to the map before") } if err := b.DeleteElement(mp, unsafe.Pointer(&key)); err != nil { t.Fatal("failed to delete an element") } if err := b.UpdateElement(mp, unsafe.Pointer(&key), unsafe.Pointer(&value), BPF_EXIST); err == nil { t.Fatal("succeeded updating element with BPF_EXIST, but the element was deleted from the map before") } } func checkLookupElement(t *testing.T, b *elf.Module) { mp := b.Map("dummy_hash") if mp == nil { t.Fatal("unable to find dummy_hash map") } key := 2000 value := 2000 if err := b.UpdateElement(mp, unsafe.Pointer(&key), unsafe.Pointer(&value), BPF_ANY); err != nil { t.Fatal("failed trying to update an element with BPF_ANY") } var lvalue int if err := b.LookupElement(mp, unsafe.Pointer(&key), unsafe.Pointer(&lvalue)); err != nil { t.Fatal("failed trying to lookup an element previously added") } if value != lvalue { t.Fatalf("wrong value returned, expected %d, got %d", value, lvalue) } key = 3000 if err := b.LookupElement(mp, unsafe.Pointer(&key), unsafe.Pointer(&lvalue)); err == nil { t.Fatalf("succeeded to find an element which wasn't added previously") } found := map[int]bool{2000: false} for i := 4000; i != 4010; i++ { key = i value = i if err := b.UpdateElement(mp, unsafe.Pointer(&key), unsafe.Pointer(&value), BPF_ANY); err != nil { t.Fatal("failed trying to update an element with BPF_ANY") } found[key] = false } key = 0 nextKey := 0 for { f, err := b.LookupNextElement(mp, unsafe.Pointer(&key), unsafe.Pointer(&nextKey), unsafe.Pointer(&lvalue)) if err != nil { t.Fatalf("failed trying to lookup the next element: %s", err) } if !f { break } if nextKey != lvalue { t.Fatalf("key %d not corresponding to value %d", nextKey, lvalue) } if _, ok := found[nextKey]; !ok { t.Fatalf("key %d found", nextKey) } found[nextKey] = true key = nextKey } for key, f := range found { if !f { t.Fatalf("expected key %d not found", key) } } } func checkProgTestRun(t *testing.T, b *elf.Module) { if kernelVersion < kernelVersion412 { t.Logf("kernel doesn't support BPF_PROG_TEST_RUN. Skipping...") return } prog := b.CgroupProgram("cgroup/skb") if prog == nil { t.Fatal("unable to find prog") } // minimum amount of input data, but unused data := make([]byte, 14) returnValue, _, _, err := progtestrun.Run(prog.Fd(), 1, data, nil) if err != nil { t.Fatalf("bpf_prog_test_run failed: %v", err) } if returnValue != 1 { t.Fatalf("expected return value 1, got %d", returnValue) } } func TestModuleLoadELF(t *testing.T) { var err error kernelVersion, err = elf.CurrentKernelVersion() if err != nil { t.Fatalf("error getting current kernel version: %v", err) } dummyELF := "./tests/dummy.o" if kernelVersion > kernelVersion414 { dummyELF = "./tests/dummy-414.o" } else if kernelVersion > kernelVersion410 { dummyELF = "./tests/dummy-410.o" } else if kernelVersion > kernelVersion48 { dummyELF = "./tests/dummy-48.o" } else if kernelVersion > kernelVersion46 { dummyELF = "./tests/dummy-46.o" } var secParams = map[string]elf.SectionParams{ "maps/dummy_array_custom": elf.SectionParams{ PinPath: filepath.Join("gobpf-test", "testgroup1"), }, } var closeOptions = map[string]elf.CloseOptions{ "maps/dummy_array_custom": elf.CloseOptions{ Unpin: true, PinPath: filepath.Join("gobpf-test", "testgroup1"), }, } if err := bpffs.Mount(); err != nil { t.Fatalf("error mounting bpf fs: %v", err) } b := elf.NewModule(dummyELF) if b == nil { t.Fatal("prog is nil") } if err := b.Load(secParams); err != nil { t.Fatal(err) } defer func() { if err := b.CloseExt(closeOptions); err != nil { t.Fatal(err) } checkPinConfigCleanup(t, []string{"/sys/fs/bpf/gobpf-test/testgroup1"}) }() checkMaps(t, b) checkProbes(t, b) checkUprobes(t, b) checkCgroupProgs(t, b) checkSocketFilters(t, b) checkTracepointProgs(t, b) checkXDPProgs(t, b) checkPinConfig(t, []string{"/sys/fs/bpf/gobpf-test/testgroup1"}) checkUpdateDeleteElement(t, b) checkLookupElement(t, b) checkProgTestRun(t, b) } gobpf-0.2.0/elf/000077500000000000000000000000001404447410300133555ustar00rootroot00000000000000gobpf-0.2.0/elf/compat.go000066400000000000000000000042071404447410300151720ustar00rootroot00000000000000// +build linux // (c) 2018 Suchakra Sharma // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package elf import ( "errors" "io/ioutil" "regexp" "runtime" ) const defaultSymFile = "/proc/kallsyms" // Returns the qualified syscall named by going through '/proc/kallsyms' on the // system on which its executed. It allows BPF programs that may have been compiled // for older syscall functions to run on newer kernels func GetSyscallFnName(name string) (string, error) { // Get kernel symbols syms, err := ioutil.ReadFile(defaultSymFile) if err != nil { return "", err } return getSyscallFnNameWithKallsyms(name, string(syms)) } func getSyscallFnNameWithKallsyms(name string, kallsymsContent string) (string, error) { var arch string switch runtime.GOARCH { case "386": arch = "ia32" default: arch = "x64" } // We should search for new syscall function like "__x64__sys_open" // Note the start of word boundary. Should return exactly one string regexStr := `(\b__` + arch + `_[Ss]y[sS]_` + name + `\b)` fnRegex := regexp.MustCompile(regexStr) match := fnRegex.FindAllString(kallsymsContent, -1) // If nothing found, search for old syscall function to be sure if len(match) == 0 { newRegexStr := `(\b[Ss]y[sS]_` + name + `\b)` fnRegex = regexp.MustCompile(newRegexStr) newMatch := fnRegex.FindAllString(kallsymsContent, -1) // If we get something like 'sys_open' or 'SyS_open', return // either (they have same addr) else, just return original string if len(newMatch) >= 1 { return newMatch[0], nil } else { return "", errors.New("could not find a valid syscall name") } } return match[0], nil } gobpf-0.2.0/elf/compat_test.go000066400000000000000000000050251404447410300162300ustar00rootroot00000000000000// +build linux // (c) 2018 ShiftLeft GmbH // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package elf import ( "testing" ) const prefixedKallsymsSymbols = ` 0000000000000000 W __x32_compat_sys_open_by_handle_at 0000000000000000 T do_sys_open 0000000000000000 T __x64_sys_open 0000000000000000 T __ia32_sys_open 0000000000000000 T __x64_sys_openat 0000000000000000 T __ia32_sys_openat 0000000000000000 T __ia32_compat_sys_open 0000000000000000 T __ia32_compat_sys_openat 0000000000000000 T __x64_sys_open_by_handle_at 0000000000000000 T __ia32_sys_open_by_handle_at 0000000000000000 T __ia32_compat_sys_open_by_handle_at 0000000000000000 t proc_sys_open 0000000000000000 t _eil_addr___ia32_compat_sys_openat 0000000000000000 t _eil_addr___ia32_compat_sys_open 0000000000000000 t _eil_addr___ia32_sys_openat 0000000000000000 t _eil_addr___x64_sys_openat 0000000000000000 t _eil_addr___ia32_sys_open 0000000000000000 t _eil_addr___x64_sys_open 0000000000000000 t _eil_addr___ia32_compat_sys_open_by_handle_at 0000000000000000 t _eil_addr___ia32_sys_open_by_handle_at 0000000000000000 t _eil_addr___x64_sys_open_by_handle_at ` const kallsymsSymbols = ` 0000000000000000 T dentry_open 0000000000000000 T filp_clone_open 0000000000000000 T file_open_name 0000000000000000 T filp_open 0000000000000000 T do_sys_open 0000000000000000 T SyS_open 0000000000000000 T sys_open 0000000000000000 T SyS_openat 0000000000000000 T sys_openat 0000000000000000 T compat_SyS_open 0000000000000000 T compat_sys_open 0000000000000000 T compat_SyS_openat 0000000000000000 T compat_sys_openat 0000000000000000 T SyS_creat 0000000000000000 T sys_creat 0000000000000000 T sys_vhangup ` func TestGetSyscallFnName(t *testing.T) { fnName, err := getSyscallFnNameWithKallsyms("open", prefixedKallsymsSymbols) if err != nil && fnName != "__x64_sys_open" { t.Errorf("expected __x64_sys_open : %s", err) } fnName, err = getSyscallFnNameWithKallsyms("open", kallsymsSymbols) if err != nil { if fnName != "SyS_open" { t.Errorf("expected SyS_open :%s", err) } } } gobpf-0.2.0/elf/elf.go000066400000000000000000000626031404447410300144610ustar00rootroot00000000000000// +build linux // Copyright 2016 Cilium Project // Copyright 2016 Sylvain Afchain // Copyright 2016 Kinvolk // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package elf import ( "bytes" "debug/elf" "encoding/binary" "errors" "fmt" "io" "os" "path/filepath" "strings" "syscall" "unsafe" _ "github.com/iovisor/gobpf/elf/include" _ "github.com/iovisor/gobpf/elf/include/uapi/linux" "github.com/iovisor/gobpf/pkg/bpffs" "github.com/iovisor/gobpf/pkg/cpuonline" ) /* #define _GNU_SOURCE #include #include #include #include #include #include #include #include #include #include #include #include #include #include "bpf_map.h" #include #include #include typedef struct bpf_map { int fd; bpf_map_def def; } bpf_map; // from https://github.com/safchain/goebpf // Apache License, Version 2.0 extern int bpf_pin_object(int fd, const char *pathname); __u64 ptr_to_u64(void *ptr) { return (__u64) (unsigned long) ptr; } static void bpf_apply_relocation(int fd, struct bpf_insn *insn) { insn->src_reg = BPF_PSEUDO_MAP_FD; insn->imm = fd; } static int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size, int max_entries, int map_flags) { int ret; union bpf_attr attr; memset(&attr, 0, sizeof(attr)); attr.map_type = map_type; attr.key_size = key_size; attr.value_size = value_size; attr.max_entries = max_entries; attr.map_flags = map_flags; ret = syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr)); if (ret < 0 && errno == EPERM) { // When EPERM is returned, two reasons are possible: // 1. user has no permissions for bpf() // 2. user has insufficent rlimit for locked memory // Unfortunately, there is no api to inspect the current usage of locked // mem for the user, so an accurate calculation of how much memory to lock // for this new program is difficult to calculate. As a hack, bump the limit // to unlimited. If program load fails again, return the error. struct rlimit rl = {}; if (getrlimit(RLIMIT_MEMLOCK, &rl) == 0) { rl.rlim_max = RLIM_INFINITY; rl.rlim_cur = rl.rlim_max; if (setrlimit(RLIMIT_MEMLOCK, &rl) == 0) { ret = syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr)); } else { printf("setrlimit() failed with errno=%d\n", errno); return -1; } } } return ret; } void create_bpf_obj_get(const char *pathname, void *attr) { union bpf_attr *ptr_bpf_attr; ptr_bpf_attr = (union bpf_attr *)attr; ptr_bpf_attr->pathname = ptr_to_u64((void *) pathname); } int get_pinned_obj_fd(const char *path) { union bpf_attr attr; memset(&attr, 0, sizeof(attr)); create_bpf_obj_get(path, &attr); return syscall(__NR_bpf, BPF_OBJ_GET, &attr, sizeof(attr)); } static bpf_map *bpf_load_map(bpf_map_def *map_def, const char *path) { bpf_map *map; struct stat st; int ret, do_pin = 0; map = calloc(1, sizeof(bpf_map)); if (map == NULL) return NULL; memcpy(&map->def, map_def, sizeof(bpf_map_def)); switch (map_def->pinning) { case 1: // PIN_OBJECT_NS // TODO to be implemented free(map); return 0; case 2: // PIN_GLOBAL_NS case 3: // PIN_CUSTOM_NS if (stat(path, &st) == 0) { ret = get_pinned_obj_fd(path); if (ret < 0) { free(map); return 0; } map->fd = ret; return map; } do_pin = 1; } map->fd = bpf_create_map(map_def->type, map_def->key_size, map_def->value_size, map_def->max_entries, map_def->map_flags ); if (map->fd < 0) { free(map); return 0; } if (do_pin) { ret = bpf_pin_object(map->fd, path); if (ret < 0) { free(map); return 0; } } return map; } static int bpf_prog_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns, int prog_len, const char *license, int kern_version, char *log_buf, int log_size) { int ret; union bpf_attr attr; memset(&attr, 0, sizeof(attr)); attr.prog_type = prog_type; attr.insn_cnt = prog_len / sizeof(struct bpf_insn); attr.insns = ptr_to_u64((void *) insns); attr.license = ptr_to_u64((void *) license); attr.log_buf = ptr_to_u64(log_buf); attr.log_size = log_size; attr.log_level = 1; attr.kern_version = kern_version; ret = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr)); if (ret < 0 && errno == EPERM) { // When EPERM is returned, two reasons are possible: // 1. user has no permissions for bpf() // 2. user has insufficent rlimit for locked memory // Unfortunately, there is no api to inspect the current usage of locked // mem for the user, so an accurate calculation of how much memory to lock // for this new program is difficult to calculate. As a hack, bump the limit // to unlimited. If program load fails again, return the error. struct rlimit rl = {}; if (getrlimit(RLIMIT_MEMLOCK, &rl) == 0) { rl.rlim_max = RLIM_INFINITY; rl.rlim_cur = rl.rlim_max; if (setrlimit(RLIMIT_MEMLOCK, &rl) == 0) { ret = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr)); } else { printf("setrlimit() failed with errno=%d\n", errno); return -1; } } } return ret; } static int bpf_update_element(int fd, void *key, void *value, unsigned long long flags) { union bpf_attr attr; memset(&attr, 0, sizeof(attr)); attr.map_fd = fd; attr.key = ptr_to_u64(key); attr.value = ptr_to_u64(value); attr.flags = flags; return syscall(__NR_bpf, BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr)); } static int perf_event_open_map(int pid, int cpu, int group_fd, unsigned long flags, int backward) { struct perf_event_attr attr = {0,}; attr.type = PERF_TYPE_SOFTWARE; attr.sample_type = PERF_SAMPLE_RAW; attr.wakeup_events = 1; attr.write_backward = !!backward; attr.size = sizeof(struct perf_event_attr); attr.config = 10; // PERF_COUNT_SW_BPF_OUTPUT return syscall(__NR_perf_event_open, &attr, pid, cpu, group_fd, flags); } */ import "C" const ( useCurrentKernelVersion = 0xFFFFFFFE // Object pin settings should correspond to those of other projects, e.g.: // https://git.kernel.org/pub/scm/linux/kernel/git/shemminger/iproute2.git/tree/include/bpf_elf.h#n25 // Also it should be self-consistent with `elf/include/bpf.h` in the same repository. PIN_NONE = 0 PIN_OBJECT_NS = 1 PIN_GLOBAL_NS = 2 PIN_CUSTOM_NS = 3 ) // Based on https://github.com/safchain/goebpf // Apache License func elfReadLicense(file *elf.File) (string, error) { if lsec := file.Section("license"); lsec != nil { data, err := lsec.Data() if err != nil { return "", err } return string(data), nil } return "", nil } func elfReadVersion(file *elf.File) (uint32, error) { if vsec := file.Section("version"); vsec != nil { data, err := vsec.Data() if err != nil { return 0, err } if len(data) != 4 { return 0, errors.New("version is not a __u32") } version := *(*C.uint32_t)(unsafe.Pointer(&data[0])) return uint32(version), nil } return 0, nil } func createPinPath(path string) (string, error) { if err := bpffs.Mount(); err != nil { return "", err } if err := os.MkdirAll(filepath.Dir(path), syscall.S_IRWXU); err != nil { return "", fmt.Errorf("error creating map directory %q: %v", filepath.Dir(path), err) } return path, nil } func validateMapPath(path string) bool { if !strings.HasPrefix(path, BPFFSPath) { return false } return filepath.Clean(path) == path } func getMapNamespace(mapDef *C.bpf_map_def) string { namespacePtr := &mapDef.namespace[0] return C.GoStringN(namespacePtr, C.int(C.strnlen(namespacePtr, C.BUF_SIZE_MAP_NS))) } func getMapPath(mapDef *C.bpf_map_def, mapName, pinPath string) (string, error) { var mapPath string switch mapDef.pinning { case PIN_OBJECT_NS: return "", fmt.Errorf("not implemented yet") case PIN_GLOBAL_NS: namespace := getMapNamespace(mapDef) if namespace == "" { return "", fmt.Errorf("map %q has empty namespace", mapName) } mapPath = filepath.Join(BPFFSPath, namespace, BPFDirGlobals, mapName) case PIN_CUSTOM_NS: if pinPath == "" { return "", fmt.Errorf("no pin path given for map %q with PIN_CUSTOM_NS", mapName) } mapPath = filepath.Join(BPFFSPath, pinPath) default: // map is not pinned return "", nil } return mapPath, nil } func createMapPath(mapDef *C.bpf_map_def, mapName string, params SectionParams) (string, error) { mapPath, err := getMapPath(mapDef, mapName, params.PinPath) if err != nil || mapPath == "" { return "", err } if !validateMapPath(mapPath) { return "", fmt.Errorf("invalid path %q", mapPath) } return createPinPath(mapPath) } func elfReadMaps(file *elf.File, params map[string]SectionParams) (map[string]*Map, error) { maps := make(map[string]*Map) for _, section := range file.Sections { if !strings.HasPrefix(section.Name, "maps/") { continue } name := strings.TrimPrefix(section.Name, "maps/") if oldMap, ok := maps[name]; ok { return nil, fmt.Errorf("duplicate map: %q and %q", oldMap.Name, name) } data, err := section.Data() if err != nil { return nil, err } if len(data) != C.sizeof_struct_bpf_map_def { return nil, fmt.Errorf("only one map with size %d bytes allowed per section (check bpf_map_def)", C.sizeof_struct_bpf_map_def) } mapDef := (*C.bpf_map_def)(unsafe.Pointer(&data[0])) // check if the map size has to be changed if p, ok := params[section.Name]; ok { if p.MapMaxEntries != 0 { mapDef.max_entries = C.uint(p.MapMaxEntries) } } mapPath, err := createMapPath(mapDef, name, params[section.Name]) if err != nil { return nil, err } mapPathC := C.CString(mapPath) defer C.free(unsafe.Pointer(mapPathC)) cm, err := C.bpf_load_map(mapDef, mapPathC) if cm == nil { return nil, fmt.Errorf("error while loading map %q: %v", section.Name, err) } maps[name] = &Map{ Name: name, m: cm, } } return maps, nil } func (b *Module) relocate(data []byte, rdata []byte) error { var symbol elf.Symbol var offset uint64 symbols, err := b.file.Symbols() if err != nil { return err } br := bytes.NewReader(data) for { switch b.file.Class { case elf.ELFCLASS64: var rel elf.Rel64 err := binary.Read(br, b.file.ByteOrder, &rel) if err != nil { if err == io.EOF { return nil } return err } symNo := rel.Info >> 32 symbol = symbols[symNo-1] offset = rel.Off case elf.ELFCLASS32: var rel elf.Rel32 err := binary.Read(br, b.file.ByteOrder, &rel) if err != nil { if err == io.EOF { return nil } return err } symNo := rel.Info >> 8 symbol = symbols[symNo-1] offset = uint64(rel.Off) default: return errors.New("architecture not supported") } rinsn := (*C.struct_bpf_insn)(unsafe.Pointer(&rdata[offset])) if rinsn.code != (C.BPF_LD | C.BPF_IMM | C.BPF_DW) { symbolSec := b.file.Sections[symbol.Section] return fmt.Errorf("invalid relocation: insn code=%#x, symbol name=%s\nsymbol section: Name=%s, Type=%s, Flags=%s", *(*C.uchar)(unsafe.Pointer(&rinsn.code)), symbol.Name, symbolSec.Name, symbolSec.Type.String(), symbolSec.Flags.String(), ) } symbolSec := b.file.Sections[symbol.Section] if !strings.HasPrefix(symbolSec.Name, "maps/") { return fmt.Errorf("map location not supported: map %q is in section %q instead of \"maps/%s\"", symbol.Name, symbolSec.Name, symbol.Name) } name := strings.TrimPrefix(symbolSec.Name, "maps/") m := b.Map(name) if m == nil { return fmt.Errorf("relocation error, symbol %q not found in section %q", symbol.Name, symbolSec.Name) } C.bpf_apply_relocation(m.m.fd, rinsn) } } type SectionParams struct { PerfRingBufferPageCount int SkipPerfMapInitialization bool PinPath string // path to be pinned, relative to "/sys/fs/bpf" MapMaxEntries int // Used to override bpf map entries size PerfRingBufferBackward bool PerfRingBufferOverwritable bool } // Load loads the BPF programs and BPF maps in the module. Each ELF section // can optionally have parameters that changes how it is configured. func (b *Module) Load(parameters map[string]SectionParams) error { if b.fileName != "" { fileReader, err := os.Open(b.fileName) if err != nil { return err } defer fileReader.Close() b.fileReader = fileReader } var err error b.file, err = elf.NewFile(b.fileReader) if err != nil { return err } license, err := elfReadLicense(b.file) if err != nil { return err } lp := unsafe.Pointer(C.CString(license)) defer C.free(lp) version, err := elfReadVersion(b.file) if err != nil { return err } if version == useCurrentKernelVersion { version, err = CurrentKernelVersion() if err != nil { return err } } maps, err := elfReadMaps(b.file, parameters) if err != nil { return err } b.maps = maps processed := make([]bool, len(b.file.Sections)) for i, section := range b.file.Sections { if processed[i] { continue } data, err := section.Data() if err != nil { return err } if len(data) == 0 { continue } if section.Type == elf.SHT_REL { rsection := b.file.Sections[section.Info] processed[i] = true processed[section.Info] = true secName := rsection.Name isKprobe := strings.HasPrefix(secName, "kprobe/") isKretprobe := strings.HasPrefix(secName, "kretprobe/") isUprobe := strings.HasPrefix(secName, "uprobe/") isUretprobe := strings.HasPrefix(secName, "uretprobe/") isCgroupSkb := strings.HasPrefix(secName, "cgroup/skb") isCgroupSock := strings.HasPrefix(secName, "cgroup/sock") isSocketFilter := strings.HasPrefix(secName, "socket") isTracepoint := strings.HasPrefix(secName, "tracepoint/") isSchedCls := strings.HasPrefix(secName, "sched_cls/") isSchedAct := strings.HasPrefix(secName, "sched_act/") isXDP := strings.HasPrefix(secName, "xdp/") var progType uint32 switch { case isKprobe: fallthrough case isKretprobe: fallthrough case isUprobe: fallthrough case isUretprobe: progType = uint32(C.BPF_PROG_TYPE_KPROBE) case isCgroupSkb: progType = uint32(C.BPF_PROG_TYPE_CGROUP_SKB) case isCgroupSock: progType = uint32(C.BPF_PROG_TYPE_CGROUP_SOCK) case isSocketFilter: progType = uint32(C.BPF_PROG_TYPE_SOCKET_FILTER) case isTracepoint: progType = uint32(C.BPF_PROG_TYPE_TRACEPOINT) case isSchedCls: progType = uint32(C.BPF_PROG_TYPE_SCHED_CLS) case isSchedAct: progType = uint32(C.BPF_PROG_TYPE_SCHED_ACT) case isXDP: progType = uint32(C.BPF_PROG_TYPE_XDP) } // If Kprobe or Kretprobe for a syscall, use correct syscall prefix in section name if b.compatProbe && (isKprobe || isKretprobe) { str := strings.Split(secName, "/") if (strings.HasPrefix(str[1], "SyS_")) || (strings.HasPrefix(str[1], "sys_")) { name := strings.TrimPrefix(str[1], "SyS_") name = strings.TrimPrefix(name, "sys_") syscallFnName, err := GetSyscallFnName(name) if err == nil { secName = fmt.Sprintf("%s/%s", str[0], syscallFnName) } } } if isKprobe || isKretprobe || isUprobe || isUretprobe || isCgroupSkb || isCgroupSock || isSocketFilter || isTracepoint || isSchedCls || isSchedAct || isXDP { rdata, err := rsection.Data() if err != nil { return err } if len(rdata) == 0 { continue } err = b.relocate(data, rdata) if err != nil { return err } insns := (*C.struct_bpf_insn)(unsafe.Pointer(&rdata[0])) progFd, err := C.bpf_prog_load(progType, insns, C.int(rsection.Size), (*C.char)(lp), C.int(version), (*C.char)(unsafe.Pointer(&b.log[0])), C.int(len(b.log))) if progFd < 0 { return fmt.Errorf("error while loading %q (%v):\n%s", secName, err, b.log) } switch { case isKprobe: fallthrough case isKretprobe: b.probes[secName] = &Kprobe{ Name: secName, insns: insns, fd: int(progFd), efd: -1, } case isUprobe: fallthrough case isUretprobe: b.uprobes[secName] = &Uprobe{ Name: secName, insns: insns, fd: int(progFd), efds: make(map[string]int), } case isCgroupSkb: fallthrough case isCgroupSock: b.cgroupPrograms[secName] = &CgroupProgram{ Name: secName, insns: insns, fd: int(progFd), } case isSocketFilter: b.socketFilters[secName] = &SocketFilter{ Name: secName, insns: insns, fd: int(progFd), } case isTracepoint: b.tracepointPrograms[secName] = &TracepointProgram{ Name: secName, insns: insns, fd: int(progFd), efd: -1, } case isSchedCls: fallthrough case isSchedAct: b.schedPrograms[secName] = &SchedProgram{ Name: secName, insns: insns, fd: int(progFd), } case isXDP: b.xdpPrograms[secName] = &XDPProgram{ Name: secName, insns: insns, fd: int(progFd), } } } } } for i, section := range b.file.Sections { if processed[i] { continue } secName := section.Name isKprobe := strings.HasPrefix(secName, "kprobe/") isKretprobe := strings.HasPrefix(secName, "kretprobe/") isUprobe := strings.HasPrefix(secName, "uprobe/") isUretprobe := strings.HasPrefix(secName, "uretprobe/") isCgroupSkb := strings.HasPrefix(secName, "cgroup/skb") isCgroupSock := strings.HasPrefix(secName, "cgroup/sock") isSocketFilter := strings.HasPrefix(secName, "socket") isTracepoint := strings.HasPrefix(secName, "tracepoint/") isSchedCls := strings.HasPrefix(secName, "sched_cls/") isSchedAct := strings.HasPrefix(secName, "sched_act/") isXDP := strings.HasPrefix(secName, "xdp/") var progType uint32 switch { case isKprobe: fallthrough case isKretprobe: fallthrough case isUprobe: fallthrough case isUretprobe: progType = uint32(C.BPF_PROG_TYPE_KPROBE) case isCgroupSkb: progType = uint32(C.BPF_PROG_TYPE_CGROUP_SKB) case isCgroupSock: progType = uint32(C.BPF_PROG_TYPE_CGROUP_SOCK) case isSocketFilter: progType = uint32(C.BPF_PROG_TYPE_SOCKET_FILTER) case isTracepoint: progType = uint32(C.BPF_PROG_TYPE_TRACEPOINT) case isSchedCls: progType = uint32(C.BPF_PROG_TYPE_SCHED_CLS) case isSchedAct: progType = uint32(C.BPF_PROG_TYPE_SCHED_ACT) case isXDP: progType = uint32(C.BPF_PROG_TYPE_XDP) } // If Kprobe or Kretprobe for a syscall, use correct syscall prefix in section name if b.compatProbe && (isKprobe || isKretprobe) { str := strings.Split(secName, "/") if (strings.HasPrefix(str[1], "SyS_")) || (strings.HasPrefix(str[1], "sys_")) { name := strings.TrimPrefix(str[1], "SyS_") name = strings.TrimPrefix(name, "sys_") syscallFnName, err := GetSyscallFnName(name) if err == nil { secName = fmt.Sprintf("%s/%s", str[0], syscallFnName) } } } if isKprobe || isKretprobe || isUprobe || isUretprobe || isCgroupSkb || isCgroupSock || isSocketFilter || isTracepoint || isSchedCls || isSchedAct || isXDP { data, err := section.Data() if err != nil { return err } if len(data) == 0 { continue } insns := (*C.struct_bpf_insn)(unsafe.Pointer(&data[0])) progFd, err := C.bpf_prog_load(progType, insns, C.int(section.Size), (*C.char)(lp), C.int(version), (*C.char)(unsafe.Pointer(&b.log[0])), C.int(len(b.log))) if progFd < 0 { return fmt.Errorf("error while loading %q (%v):\n%s", section.Name, err, b.log) } switch { case isKprobe: fallthrough case isKretprobe: b.probes[secName] = &Kprobe{ Name: secName, insns: insns, fd: int(progFd), efd: -1, } case isUprobe: fallthrough case isUretprobe: b.uprobes[secName] = &Uprobe{ Name: secName, insns: insns, fd: int(progFd), efds: make(map[string]int), } case isCgroupSkb: fallthrough case isCgroupSock: b.cgroupPrograms[secName] = &CgroupProgram{ Name: secName, insns: insns, fd: int(progFd), } case isSocketFilter: b.socketFilters[secName] = &SocketFilter{ Name: secName, insns: insns, fd: int(progFd), } case isTracepoint: b.tracepointPrograms[secName] = &TracepointProgram{ Name: secName, insns: insns, fd: int(progFd), efd: -1, } case isSchedCls: fallthrough case isSchedAct: b.schedPrograms[secName] = &SchedProgram{ Name: secName, insns: insns, fd: int(progFd), } case isXDP: b.xdpPrograms[secName] = &XDPProgram{ Name: secName, insns: insns, fd: int(progFd), } } } } return b.initializePerfMaps(parameters) } func createPerfRingBuffer(backward bool, overwriteable bool, pageCount int) ([]C.int, []*C.struct_perf_event_mmap_page, [][]byte, error) { pageSize := os.Getpagesize() cpus, err := cpuonline.Get() if err != nil { return nil, nil, nil, fmt.Errorf("failed to determine online cpus: %v", err) } pmuFds := make([]C.int, len(cpus)) headers := make([]*C.struct_perf_event_mmap_page, len(cpus)) bases := make([][]byte, len(cpus)) for i, cpu := range cpus { cpuC := C.int(cpu) backwardC := C.int(0) if backward { backwardC = 1 } pmuFD, err := C.perf_event_open_map(-1 /* pid */, cpuC /* cpu */, -1 /* group_fd */, C.PERF_FLAG_FD_CLOEXEC, backwardC) if pmuFD < 0 { return nil, nil, nil, fmt.Errorf("perf_event_open for map error: %v", err) } // mmap mmapSize := pageSize * (pageCount + 1) // The 'overwritable' bit is set via PROT_WRITE, see: // https://github.com/torvalds/linux/commit/9ecda41acb971ebd07c8fb35faf24005c0baea12 // "By mapping without 'PROT_WRITE', an overwritable ring buffer is created." var prot int if overwriteable { prot = syscall.PROT_READ } else { prot = syscall.PROT_READ | syscall.PROT_WRITE } base, err := syscall.Mmap(int(pmuFD), 0, mmapSize, prot, syscall.MAP_SHARED) if err != nil { return nil, nil, nil, fmt.Errorf("mmap error: %v", err) } // enable _, _, err2 := syscall.Syscall(syscall.SYS_IOCTL, uintptr(pmuFD), C.PERF_EVENT_IOC_ENABLE, 0) if err2 != 0 { return nil, nil, nil, fmt.Errorf("error enabling perf event: %v", err2) } pmuFds[i] = pmuFD headers[i] = (*C.struct_perf_event_mmap_page)(unsafe.Pointer(&base[0])) bases[i] = base } return pmuFds, headers, bases, nil } func (b *Module) initializePerfMaps(parameters map[string]SectionParams) error { for name, m := range b.maps { if m.m != nil && m.m.def._type != C.BPF_MAP_TYPE_PERF_EVENT_ARRAY { continue } b.maps[name].pageCount = 8 // reasonable default backward := false overwriteable := false sectionName := "maps/" + name if params, ok := parameters[sectionName]; ok { if params.SkipPerfMapInitialization { continue } if params.PerfRingBufferPageCount > 0 { if (params.PerfRingBufferPageCount & (params.PerfRingBufferPageCount - 1)) != 0 { return fmt.Errorf("number of pages (%d) must be stricly positive and a power of 2", params.PerfRingBufferPageCount) } b.maps[name].pageCount = params.PerfRingBufferPageCount } if params.PerfRingBufferBackward { backward = true } if params.PerfRingBufferOverwritable { overwriteable = true } } pmuFds, headers, bases, err := createPerfRingBuffer(backward, overwriteable, b.maps[name].pageCount) if err != nil { return fmt.Errorf("cannot create perfring map %v", err) } cpus, err := cpuonline.Get() if err != nil { return fmt.Errorf("failed to determine online cpus: %v", err) } for index, cpu := range cpus { // assign perf fd to map ret, err := C.bpf_update_element(C.int(b.maps[name].m.fd), unsafe.Pointer(&cpu), unsafe.Pointer(&pmuFds[index]), C.BPF_ANY) if ret != 0 { return fmt.Errorf("cannot assign perf fd to map %q: %v (cpu %d)", name, err, index) } } b.maps[name].pmuFDs = pmuFds b.maps[name].headers = headers b.maps[name].bases = bases } return nil } // PerfMapStop stops the BPF program from writing into the perf ring buffers. // However, the userspace program can still read the ring buffers. func (b *Module) PerfMapStop(mapName string) error { m, ok := b.maps[mapName] if !ok { return fmt.Errorf("map %q not found", mapName) } if m.m.def._type != C.BPF_MAP_TYPE_PERF_EVENT_ARRAY { return fmt.Errorf("%q is not a perf map", mapName) } cpus, err := cpuonline.Get() if err != nil { return fmt.Errorf("failed to determine online cpus: %v", err) } for _, cpu := range cpus { err = b.DeleteElement(m, unsafe.Pointer(&cpu)) if err != nil { return err } } return nil } // Map represents a eBPF map. An eBPF map has to be declared in the // C file. type Map struct { Name string m *C.bpf_map // only for perf maps pmuFDs []C.int headers []*C.struct_perf_event_mmap_page bases [][]byte pageCount int } func (b *Module) IterMaps() <-chan *Map { ch := make(chan *Map) go func() { for name := range b.maps { ch <- b.maps[name] } close(ch) }() return ch } func (b *Module) Map(name string) *Map { return b.maps[name] } func (m *Map) Fd() int { return int(m.m.fd) } // GetProgFd returns the fd for a pinned bpf program at the given path func GetProgFd(pinPath string) int { pathC := C.CString(pinPath) defer C.free(unsafe.Pointer(pathC)) return int(C.get_pinned_obj_fd(pathC)) } gobpf-0.2.0/elf/elf_test.go000066400000000000000000000022461404447410300155150ustar00rootroot00000000000000// Copyright 2017 Kinvolk // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // +build linux package elf import ( "testing" ) func TestValidateMapPath(t *testing.T) { tests := []struct { input string expected bool }{ { input: "/sys/fs/bpf/good/path", expected: true, }, { input: "/sys/fs/bpf/../../bad/path", expected: false, }, { input: "/sys/fs/bpf/./bad/path", expected: false, }, { input: "/bad/path", expected: false, }, } for i, tt := range tests { if isValid := validateMapPath(tt.input); isValid != tt.expected { t.Fatalf("test %d (%s) expected %t but got %t", i, tt.input, tt.expected, isValid) } } } gobpf-0.2.0/elf/elf_unsupported.go000066400000000000000000000012211404447410300171160ustar00rootroot00000000000000// +build !linux package elf // not supported; dummy struct type BPFKProbePerf struct{} type SectionParams struct{} type Map struct{} func (b *Module) Load(parameters map[string]SectionParams) error { return errNotSupported } func NewBpfPerfEvent(fileName string) *BPFKProbePerf { // not supported return nil } func (b *BPFKProbePerf) Load() error { return errNotSupported } func (b *BPFKProbePerf) PollStart(mapName string, receiverChan chan []byte, lostChan chan uint64) { // not supported return } func (b *BPFKProbePerf) PollStop(mapName string) { // not supported return } func (m *Map) Fd() int { // not supported return -1 } gobpf-0.2.0/elf/errno.go000066400000000000000000000001321404447410300150250ustar00rootroot00000000000000package elf import ( "errors" ) var ( errNotSupported = errors.New("not supported") ) gobpf-0.2.0/elf/include/000077500000000000000000000000001404447410300150005ustar00rootroot00000000000000gobpf-0.2.0/elf/include/bpf_map.h000066400000000000000000000005231404447410300165550ustar00rootroot00000000000000#define BUF_SIZE_MAP_NS 256 typedef struct bpf_map_def { unsigned int type; unsigned int key_size; unsigned int value_size; unsigned int max_entries; unsigned int map_flags; unsigned int pinning; char namespace[BUF_SIZE_MAP_NS]; } bpf_map_def; enum bpf_pin_type { PIN_NONE = 0, PIN_OBJECT_NS, PIN_GLOBAL_NS, PIN_CUSTOM_NS, }; gobpf-0.2.0/elf/include/doc.go000066400000000000000000000000201404447410300160640ustar00rootroot00000000000000package include gobpf-0.2.0/elf/include/libbpf.h000066400000000000000000000026341404447410300164140ustar00rootroot00000000000000/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ /* * Common eBPF ELF object loading operations. * * Copyright (C) 2013-2015 Alexei Starovoitov * Copyright (C) 2015 Wang Nan * Copyright (C) 2015 Huawei Inc. */ #ifndef __LIBBPF_LIBBPF_H #define __LIBBPF_LIBBPF_H #include #include int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags); enum libbpf_errno { __LIBBPF_ERRNO__START = 4000, /* Something wrong in libelf */ LIBBPF_ERRNO__LIBELF = __LIBBPF_ERRNO__START, LIBBPF_ERRNO__FORMAT, /* BPF object format invalid */ LIBBPF_ERRNO__KVERSION, /* Incorrect or no 'version' section */ LIBBPF_ERRNO__ENDIAN, /* Endian mismatch */ LIBBPF_ERRNO__INTERNAL, /* Internal error in libbpf */ LIBBPF_ERRNO__RELOC, /* Relocation failed */ LIBBPF_ERRNO__LOAD, /* Load program failure for unknown reason */ LIBBPF_ERRNO__VERIFY, /* Kernel verifier blocks program loading */ LIBBPF_ERRNO__PROG2BIG, /* Program too big */ LIBBPF_ERRNO__KVER, /* Incorrect kernel version */ LIBBPF_ERRNO__PROGTYPE, /* Kernel doesn't support this program type */ LIBBPF_ERRNO__WRNGPID, /* Wrong pid in netlink message */ LIBBPF_ERRNO__INVSEQ, /* Invalid netlink sequence */ LIBBPF_ERRNO__NLPARSE, /* netlink parsing error */ __LIBBPF_ERRNO__END, }; typedef int (*libbpf_dump_nlmsg_t)(void *cookie, void *msg, struct nlattr **tb); #endif /* __LIBBPF_LIBBPF_H */ gobpf-0.2.0/elf/include/nlattr.h000066400000000000000000000052471404447410300164650ustar00rootroot00000000000000/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ /* * NETLINK Netlink attributes * * Copyright (c) 2003-2013 Thomas Graf */ #ifndef __LIBBPF_NLATTR_H #define __LIBBPF_NLATTR_H #include #include /* avoid multiple definition of netlink features */ #define __LINUX_NETLINK_H /** * Standard attribute types to specify validation policy */ enum { LIBBPF_NLA_UNSPEC, /**< Unspecified type, binary data chunk */ LIBBPF_NLA_U8, /**< 8 bit integer */ LIBBPF_NLA_U16, /**< 16 bit integer */ LIBBPF_NLA_U32, /**< 32 bit integer */ LIBBPF_NLA_U64, /**< 64 bit integer */ LIBBPF_NLA_STRING, /**< NUL terminated character string */ LIBBPF_NLA_FLAG, /**< Flag */ LIBBPF_NLA_MSECS, /**< Micro seconds (64bit) */ LIBBPF_NLA_NESTED, /**< Nested attributes */ __LIBBPF_NLA_TYPE_MAX, }; #define LIBBPF_NLA_TYPE_MAX (__LIBBPF_NLA_TYPE_MAX - 1) /** * @ingroup attr * Attribute validation policy. * * See section @core_doc{core_attr_parse,Attribute Parsing} for more details. */ struct libbpf_nla_policy { /** Type of attribute or LIBBPF_NLA_UNSPEC */ uint16_t type; /** Minimal length of payload required */ uint16_t minlen; /** Maximal length of payload allowed */ uint16_t maxlen; }; /** * @ingroup attr * Iterate over a stream of attributes * @arg pos loop counter, set to current attribute * @arg head head of attribute stream * @arg len length of attribute stream * @arg rem initialized to len, holds bytes currently remaining in stream */ #define libbpf_nla_for_each_attr(pos, head, len, rem) \ for (pos = head, rem = len; \ nla_ok(pos, rem); \ pos = nla_next(pos, &(rem))) /** * libbpf_nla_data - head of payload * @nla: netlink attribute */ static inline void *libbpf_nla_data(const struct nlattr *nla) { return (char *) nla + NLA_HDRLEN; } static inline uint8_t libbpf_nla_getattr_u8(const struct nlattr *nla) { return *(uint8_t *)libbpf_nla_data(nla); } static inline uint32_t libbpf_nla_getattr_u32(const struct nlattr *nla) { return *(uint32_t *)libbpf_nla_data(nla); } static inline const char *libbpf_nla_getattr_str(const struct nlattr *nla) { return (const char *)libbpf_nla_data(nla); } /** * libbpf_nla_len - length of payload * @nla: netlink attribute */ static inline int libbpf_nla_len(const struct nlattr *nla) { return nla->nla_len - NLA_HDRLEN; } int libbpf_nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, int len, struct libbpf_nla_policy *policy); int libbpf_nla_parse_nested(struct nlattr *tb[], int maxtype, struct nlattr *nla, struct libbpf_nla_policy *policy); int libbpf_nla_dump_errormsg(struct nlmsghdr *nlh); #endif /* __LIBBPF_NLATTR_H */gobpf-0.2.0/elf/include/uapi/000077500000000000000000000000001404447410300157365ustar00rootroot00000000000000gobpf-0.2.0/elf/include/uapi/linux/000077500000000000000000000000001404447410300170755ustar00rootroot00000000000000gobpf-0.2.0/elf/include/uapi/linux/bpf.h000066400000000000000000003443751404447410300200350ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. */ #ifndef _UAPI__LINUX_BPF_H__ #define _UAPI__LINUX_BPF_H__ #include #include /* Extended instruction set based on top of classic BPF */ /* instruction classes */ #define BPF_ALU64 0x07 /* alu mode in double word width */ /* ld/ldx fields */ #define BPF_DW 0x18 /* double word (64-bit) */ #define BPF_XADD 0xc0 /* exclusive add */ /* alu/jmp fields */ #define BPF_MOV 0xb0 /* mov reg to reg */ #define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */ /* change endianness of a register */ #define BPF_END 0xd0 /* flags for endianness conversion: */ #define BPF_TO_LE 0x00 /* convert to little-endian */ #define BPF_TO_BE 0x08 /* convert to big-endian */ #define BPF_FROM_LE BPF_TO_LE #define BPF_FROM_BE BPF_TO_BE /* jmp encodings */ #define BPF_JNE 0x50 /* jump != */ #define BPF_JLT 0xa0 /* LT is unsigned, '<' */ #define BPF_JLE 0xb0 /* LE is unsigned, '<=' */ #define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */ #define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */ #define BPF_JSLT 0xc0 /* SLT is signed, '<' */ #define BPF_JSLE 0xd0 /* SLE is signed, '<=' */ #define BPF_CALL 0x80 /* function call */ #define BPF_EXIT 0x90 /* function return */ /* Register numbers */ enum { BPF_REG_0 = 0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5, BPF_REG_6, BPF_REG_7, BPF_REG_8, BPF_REG_9, BPF_REG_10, __MAX_BPF_REG, }; /* BPF has 10 general purpose 64-bit registers and stack frame. */ #define MAX_BPF_REG __MAX_BPF_REG struct bpf_insn { __u8 code; /* opcode */ __u8 dst_reg:4; /* dest register */ __u8 src_reg:4; /* source register */ __s16 off; /* signed offset */ __s32 imm; /* signed immediate constant */ }; /* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */ struct bpf_lpm_trie_key { __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */ __u8 data[0]; /* Arbitrary size */ }; struct bpf_cgroup_storage_key { __u64 cgroup_inode_id; /* cgroup inode id */ __u32 attach_type; /* program attach type */ }; /* BPF syscall commands, see bpf(2) man-page for details. */ enum bpf_cmd { BPF_MAP_CREATE, BPF_MAP_LOOKUP_ELEM, BPF_MAP_UPDATE_ELEM, BPF_MAP_DELETE_ELEM, BPF_MAP_GET_NEXT_KEY, BPF_PROG_LOAD, BPF_OBJ_PIN, BPF_OBJ_GET, BPF_PROG_ATTACH, BPF_PROG_DETACH, BPF_PROG_TEST_RUN, BPF_PROG_GET_NEXT_ID, BPF_MAP_GET_NEXT_ID, BPF_PROG_GET_FD_BY_ID, BPF_MAP_GET_FD_BY_ID, BPF_OBJ_GET_INFO_BY_FD, BPF_PROG_QUERY, BPF_RAW_TRACEPOINT_OPEN, BPF_BTF_LOAD, BPF_BTF_GET_FD_BY_ID, BPF_TASK_FD_QUERY, BPF_MAP_LOOKUP_AND_DELETE_ELEM, }; enum bpf_map_type { BPF_MAP_TYPE_UNSPEC, BPF_MAP_TYPE_HASH, BPF_MAP_TYPE_ARRAY, BPF_MAP_TYPE_PROG_ARRAY, BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_MAP_TYPE_PERCPU_HASH, BPF_MAP_TYPE_PERCPU_ARRAY, BPF_MAP_TYPE_STACK_TRACE, BPF_MAP_TYPE_CGROUP_ARRAY, BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH, BPF_MAP_TYPE_LPM_TRIE, BPF_MAP_TYPE_ARRAY_OF_MAPS, BPF_MAP_TYPE_HASH_OF_MAPS, BPF_MAP_TYPE_DEVMAP, BPF_MAP_TYPE_SOCKMAP, BPF_MAP_TYPE_CPUMAP, BPF_MAP_TYPE_XSKMAP, BPF_MAP_TYPE_SOCKHASH, BPF_MAP_TYPE_CGROUP_STORAGE, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, BPF_MAP_TYPE_QUEUE, BPF_MAP_TYPE_STACK, }; /* Note that tracing related programs such as * BPF_PROG_TYPE_{KPROBE,TRACEPOINT,PERF_EVENT,RAW_TRACEPOINT} * are not subject to a stable API since kernel internal data * structures can change from release to release and may * therefore break existing tracing BPF programs. Tracing BPF * programs correspond to /a/ specific kernel which is to be * analyzed, and not /a/ specific kernel /and/ all future ones. */ enum bpf_prog_type { BPF_PROG_TYPE_UNSPEC, BPF_PROG_TYPE_SOCKET_FILTER, BPF_PROG_TYPE_KPROBE, BPF_PROG_TYPE_SCHED_CLS, BPF_PROG_TYPE_SCHED_ACT, BPF_PROG_TYPE_TRACEPOINT, BPF_PROG_TYPE_XDP, BPF_PROG_TYPE_PERF_EVENT, BPF_PROG_TYPE_CGROUP_SKB, BPF_PROG_TYPE_CGROUP_SOCK, BPF_PROG_TYPE_LWT_IN, BPF_PROG_TYPE_LWT_OUT, BPF_PROG_TYPE_LWT_XMIT, BPF_PROG_TYPE_SOCK_OPS, BPF_PROG_TYPE_SK_SKB, BPF_PROG_TYPE_CGROUP_DEVICE, BPF_PROG_TYPE_SK_MSG, BPF_PROG_TYPE_RAW_TRACEPOINT, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_PROG_TYPE_LWT_SEG6LOCAL, BPF_PROG_TYPE_LIRC_MODE2, BPF_PROG_TYPE_SK_REUSEPORT, BPF_PROG_TYPE_FLOW_DISSECTOR, }; enum bpf_attach_type { BPF_CGROUP_INET_INGRESS, BPF_CGROUP_INET_EGRESS, BPF_CGROUP_INET_SOCK_CREATE, BPF_CGROUP_SOCK_OPS, BPF_SK_SKB_STREAM_PARSER, BPF_SK_SKB_STREAM_VERDICT, BPF_CGROUP_DEVICE, BPF_SK_MSG_VERDICT, BPF_CGROUP_INET4_BIND, BPF_CGROUP_INET6_BIND, BPF_CGROUP_INET4_CONNECT, BPF_CGROUP_INET6_CONNECT, BPF_CGROUP_INET4_POST_BIND, BPF_CGROUP_INET6_POST_BIND, BPF_CGROUP_UDP4_SENDMSG, BPF_CGROUP_UDP6_SENDMSG, BPF_LIRC_MODE2, BPF_FLOW_DISSECTOR, __MAX_BPF_ATTACH_TYPE }; #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE /* cgroup-bpf attach flags used in BPF_PROG_ATTACH command * * NONE(default): No further bpf programs allowed in the subtree. * * BPF_F_ALLOW_OVERRIDE: If a sub-cgroup installs some bpf program, * the program in this cgroup yields to sub-cgroup program. * * BPF_F_ALLOW_MULTI: If a sub-cgroup installs some bpf program, * that cgroup program gets run in addition to the program in this cgroup. * * Only one program is allowed to be attached to a cgroup with * NONE or BPF_F_ALLOW_OVERRIDE flag. * Attaching another program on top of NONE or BPF_F_ALLOW_OVERRIDE will * release old program and attach the new one. Attach flags has to match. * * Multiple programs are allowed to be attached to a cgroup with * BPF_F_ALLOW_MULTI flag. They are executed in FIFO order * (those that were attached first, run first) * The programs of sub-cgroup are executed first, then programs of * this cgroup and then programs of parent cgroup. * When children program makes decision (like picking TCP CA or sock bind) * parent program has a chance to override it. * * A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups. * A cgroup with NONE doesn't allow any programs in sub-cgroups. * Ex1: * cgrp1 (MULTI progs A, B) -> * cgrp2 (OVERRIDE prog C) -> * cgrp3 (MULTI prog D) -> * cgrp4 (OVERRIDE prog E) -> * cgrp5 (NONE prog F) * the event in cgrp5 triggers execution of F,D,A,B in that order. * if prog F is detached, the execution is E,D,A,B * if prog F and D are detached, the execution is E,A,B * if prog F, E and D are detached, the execution is C,A,B * * All eligible programs are executed regardless of return code from * earlier programs. */ #define BPF_F_ALLOW_OVERRIDE (1U << 0) #define BPF_F_ALLOW_MULTI (1U << 1) /* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the * verifier will perform strict alignment checking as if the kernel * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set, * and NET_IP_ALIGN defined to 2. */ #define BPF_F_STRICT_ALIGNMENT (1U << 0) /* If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the * verifier will allow any alignment whatsoever. On platforms * with strict alignment requirements for loads ands stores (such * as sparc and mips) the verifier validates that all loads and * stores provably follow this requirement. This flag turns that * checking and enforcement off. * * It is mostly used for testing when we want to validate the * context and memory access aspects of the verifier, but because * of an unaligned access the alignment check would trigger before * the one we are interested in. */ #define BPF_F_ANY_ALIGNMENT (1U << 1) /* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */ #define BPF_PSEUDO_MAP_FD 1 /* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative * offset to another bpf function */ #define BPF_PSEUDO_CALL 1 /* flags for BPF_MAP_UPDATE_ELEM command */ #define BPF_ANY 0 /* create new element or update existing */ #define BPF_NOEXIST 1 /* create new element if it didn't exist */ #define BPF_EXIST 2 /* update existing element */ /* flags for BPF_MAP_CREATE command */ #define BPF_F_NO_PREALLOC (1U << 0) /* Instead of having one common LRU list in the * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list * which can scale and perform better. * Note, the LRU nodes (including free nodes) cannot be moved * across different LRU lists. */ #define BPF_F_NO_COMMON_LRU (1U << 1) /* Specify numa node during map creation */ #define BPF_F_NUMA_NODE (1U << 2) #define BPF_OBJ_NAME_LEN 16U /* Flags for accessing BPF object */ #define BPF_F_RDONLY (1U << 3) #define BPF_F_WRONLY (1U << 4) /* Flag for stack_map, store build_id+offset instead of pointer */ #define BPF_F_STACK_BUILD_ID (1U << 5) /* Zero-initialize hash function seed. This should only be used for testing. */ #define BPF_F_ZERO_SEED (1U << 6) /* flags for BPF_PROG_QUERY */ #define BPF_F_QUERY_EFFECTIVE (1U << 0) enum bpf_stack_build_id_status { /* user space need an empty entry to identify end of a trace */ BPF_STACK_BUILD_ID_EMPTY = 0, /* with valid build_id and offset */ BPF_STACK_BUILD_ID_VALID = 1, /* couldn't get build_id, fallback to ip */ BPF_STACK_BUILD_ID_IP = 2, }; #define BPF_BUILD_ID_SIZE 20 struct bpf_stack_build_id { __s32 status; unsigned char build_id[BPF_BUILD_ID_SIZE]; union { __u64 offset; __u64 ip; }; }; union bpf_attr { struct { /* anonymous struct used by BPF_MAP_CREATE command */ __u32 map_type; /* one of enum bpf_map_type */ __u32 key_size; /* size of key in bytes */ __u32 value_size; /* size of value in bytes */ __u32 max_entries; /* max number of entries in a map */ __u32 map_flags; /* BPF_MAP_CREATE related * flags defined above. */ __u32 inner_map_fd; /* fd pointing to the inner map */ __u32 numa_node; /* numa node (effective only if * BPF_F_NUMA_NODE is set). */ char map_name[BPF_OBJ_NAME_LEN]; __u32 map_ifindex; /* ifindex of netdev to create on */ __u32 btf_fd; /* fd pointing to a BTF type data */ __u32 btf_key_type_id; /* BTF type_id of the key */ __u32 btf_value_type_id; /* BTF type_id of the value */ }; struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ __u32 map_fd; __aligned_u64 key; union { __aligned_u64 value; __aligned_u64 next_key; }; __u64 flags; }; struct { /* anonymous struct used by BPF_PROG_LOAD command */ __u32 prog_type; /* one of enum bpf_prog_type */ __u32 insn_cnt; __aligned_u64 insns; __aligned_u64 license; __u32 log_level; /* verbosity level of verifier */ __u32 log_size; /* size of user buffer */ __aligned_u64 log_buf; /* user supplied buffer */ __u32 kern_version; /* not used */ __u32 prog_flags; char prog_name[BPF_OBJ_NAME_LEN]; __u32 prog_ifindex; /* ifindex of netdev to prep for */ /* For some prog types expected attach type must be known at * load time to verify attach type specific parts of prog * (context accesses, allowed helpers, etc). */ __u32 expected_attach_type; __u32 prog_btf_fd; /* fd pointing to BTF type data */ __u32 func_info_rec_size; /* userspace bpf_func_info size */ __aligned_u64 func_info; /* func info */ __u32 func_info_cnt; /* number of bpf_func_info records */ __u32 line_info_rec_size; /* userspace bpf_line_info size */ __aligned_u64 line_info; /* line info */ __u32 line_info_cnt; /* number of bpf_line_info records */ }; struct { /* anonymous struct used by BPF_OBJ_* commands */ __aligned_u64 pathname; __u32 bpf_fd; __u32 file_flags; }; struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */ __u32 target_fd; /* container object to attach to */ __u32 attach_bpf_fd; /* eBPF program to attach */ __u32 attach_type; __u32 attach_flags; }; struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */ __u32 prog_fd; __u32 retval; __u32 data_size_in; /* input: len of data_in */ __u32 data_size_out; /* input/output: len of data_out * returns ENOSPC if data_out * is too small. */ __aligned_u64 data_in; __aligned_u64 data_out; __u32 repeat; __u32 duration; } test; struct { /* anonymous struct used by BPF_*_GET_*_ID */ union { __u32 start_id; __u32 prog_id; __u32 map_id; __u32 btf_id; }; __u32 next_id; __u32 open_flags; }; struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */ __u32 bpf_fd; __u32 info_len; __aligned_u64 info; } info; struct { /* anonymous struct used by BPF_PROG_QUERY command */ __u32 target_fd; /* container object to query */ __u32 attach_type; __u32 query_flags; __u32 attach_flags; __aligned_u64 prog_ids; __u32 prog_cnt; } query; struct { __u64 name; __u32 prog_fd; } raw_tracepoint; struct { /* anonymous struct for BPF_BTF_LOAD */ __aligned_u64 btf; __aligned_u64 btf_log_buf; __u32 btf_size; __u32 btf_log_size; __u32 btf_log_level; }; struct { __u32 pid; /* input: pid */ __u32 fd; /* input: fd */ __u32 flags; /* input: flags */ __u32 buf_len; /* input/output: buf len */ __aligned_u64 buf; /* input/output: * tp_name for tracepoint * symbol for kprobe * filename for uprobe */ __u32 prog_id; /* output: prod_id */ __u32 fd_type; /* output: BPF_FD_TYPE_* */ __u64 probe_offset; /* output: probe_offset */ __u64 probe_addr; /* output: probe_addr */ } task_fd_query; } __attribute__((aligned(8))); /* The description below is an attempt at providing documentation to eBPF * developers about the multiple available eBPF helper functions. It can be * parsed and used to produce a manual page. The workflow is the following, * and requires the rst2man utility: * * $ ./scripts/bpf_helpers_doc.py \ * --filename include/uapi/linux/bpf.h > /tmp/bpf-helpers.rst * $ rst2man /tmp/bpf-helpers.rst > /tmp/bpf-helpers.7 * $ man /tmp/bpf-helpers.7 * * Note that in order to produce this external documentation, some RST * formatting is used in the descriptions to get "bold" and "italics" in * manual pages. Also note that the few trailing white spaces are * intentional, removing them would break paragraphs for rst2man. * * Start of BPF helper function descriptions: * * void *bpf_map_lookup_elem(struct bpf_map *map, const void *key) * Description * Perform a lookup in *map* for an entry associated to *key*. * Return * Map value associated to *key*, or **NULL** if no entry was * found. * * int bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags) * Description * Add or update the value of the entry associated to *key* in * *map* with *value*. *flags* is one of: * * **BPF_NOEXIST** * The entry for *key* must not exist in the map. * **BPF_EXIST** * The entry for *key* must already exist in the map. * **BPF_ANY** * No condition on the existence of the entry for *key*. * * Flag value **BPF_NOEXIST** cannot be used for maps of types * **BPF_MAP_TYPE_ARRAY** or **BPF_MAP_TYPE_PERCPU_ARRAY** (all * elements always exist), the helper would return an error. * Return * 0 on success, or a negative error in case of failure. * * int bpf_map_delete_elem(struct bpf_map *map, const void *key) * Description * Delete entry with *key* from *map*. * Return * 0 on success, or a negative error in case of failure. * * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags) * Description * Push an element *value* in *map*. *flags* is one of: * * **BPF_EXIST** * If the queue/stack is full, the oldest element is removed to * make room for this. * Return * 0 on success, or a negative error in case of failure. * * int bpf_probe_read(void *dst, u32 size, const void *src) * Description * For tracing programs, safely attempt to read *size* bytes from * address *src* and store the data in *dst*. * Return * 0 on success, or a negative error in case of failure. * * u64 bpf_ktime_get_ns(void) * Description * Return the time elapsed since system boot, in nanoseconds. * Return * Current *ktime*. * * int bpf_trace_printk(const char *fmt, u32 fmt_size, ...) * Description * This helper is a "printk()-like" facility for debugging. It * prints a message defined by format *fmt* (of size *fmt_size*) * to file *\/sys/kernel/debug/tracing/trace* from DebugFS, if * available. It can take up to three additional **u64** * arguments (as an eBPF helpers, the total number of arguments is * limited to five). * * Each time the helper is called, it appends a line to the trace. * The format of the trace is customizable, and the exact output * one will get depends on the options set in * *\/sys/kernel/debug/tracing/trace_options* (see also the * *README* file under the same directory). However, it usually * defaults to something like: * * :: * * telnet-470 [001] .N.. 419421.045894: 0x00000001: * * In the above: * * * ``telnet`` is the name of the current task. * * ``470`` is the PID of the current task. * * ``001`` is the CPU number on which the task is * running. * * In ``.N..``, each character refers to a set of * options (whether irqs are enabled, scheduling * options, whether hard/softirqs are running, level of * preempt_disabled respectively). **N** means that * **TIF_NEED_RESCHED** and **PREEMPT_NEED_RESCHED** * are set. * * ``419421.045894`` is a timestamp. * * ``0x00000001`` is a fake value used by BPF for the * instruction pointer register. * * ```` is the message formatted with * *fmt*. * * The conversion specifiers supported by *fmt* are similar, but * more limited than for printk(). They are **%d**, **%i**, * **%u**, **%x**, **%ld**, **%li**, **%lu**, **%lx**, **%lld**, * **%lli**, **%llu**, **%llx**, **%p**, **%s**. No modifier (size * of field, padding with zeroes, etc.) is available, and the * helper will return **-EINVAL** (but print nothing) if it * encounters an unknown specifier. * * Also, note that **bpf_trace_printk**\ () is slow, and should * only be used for debugging purposes. For this reason, a notice * bloc (spanning several lines) is printed to kernel logs and * states that the helper should not be used "for production use" * the first time this helper is used (or more precisely, when * **trace_printk**\ () buffers are allocated). For passing values * to user space, perf events should be preferred. * Return * The number of bytes written to the buffer, or a negative error * in case of failure. * * u32 bpf_get_prandom_u32(void) * Description * Get a pseudo-random number. * * From a security point of view, this helper uses its own * pseudo-random internal state, and cannot be used to infer the * seed of other random functions in the kernel. However, it is * essential to note that the generator used by the helper is not * cryptographically secure. * Return * A random 32-bit unsigned value. * * u32 bpf_get_smp_processor_id(void) * Description * Get the SMP (symmetric multiprocessing) processor id. Note that * all programs run with preemption disabled, which means that the * SMP processor id is stable during all the execution of the * program. * Return * The SMP id of the processor running the program. * * int bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags) * Description * Store *len* bytes from address *from* into the packet * associated to *skb*, at *offset*. *flags* are a combination of * **BPF_F_RECOMPUTE_CSUM** (automatically recompute the * checksum for the packet after storing the bytes) and * **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\ * **->swhash** and *skb*\ **->l4hash** to 0). * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * int bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size) * Description * Recompute the layer 3 (e.g. IP) checksum for the packet * associated to *skb*. Computation is incremental, so the helper * must know the former value of the header field that was * modified (*from*), the new value of this field (*to*), and the * number of bytes (2 or 4) for this field, stored in *size*. * Alternatively, it is possible to store the difference between * the previous and the new values of the header field in *to*, by * setting *from* and *size* to 0. For both methods, *offset* * indicates the location of the IP checksum within the packet. * * This helper works in combination with **bpf_csum_diff**\ (), * which does not update the checksum in-place, but offers more * flexibility and can handle sizes larger than 2 or 4 for the * checksum to update. * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * int bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags) * Description * Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the * packet associated to *skb*. Computation is incremental, so the * helper must know the former value of the header field that was * modified (*from*), the new value of this field (*to*), and the * number of bytes (2 or 4) for this field, stored on the lowest * four bits of *flags*. Alternatively, it is possible to store * the difference between the previous and the new values of the * header field in *to*, by setting *from* and the four lowest * bits of *flags* to 0. For both methods, *offset* indicates the * location of the IP checksum within the packet. In addition to * the size of the field, *flags* can be added (bitwise OR) actual * flags. With **BPF_F_MARK_MANGLED_0**, a null checksum is left * untouched (unless **BPF_F_MARK_ENFORCE** is added as well), and * for updates resulting in a null checksum the value is set to * **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates * the checksum is to be computed against a pseudo-header. * * This helper works in combination with **bpf_csum_diff**\ (), * which does not update the checksum in-place, but offers more * flexibility and can handle sizes larger than 2 or 4 for the * checksum to update. * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * int bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index) * Description * This special helper is used to trigger a "tail call", or in * other words, to jump into another eBPF program. The same stack * frame is used (but values on stack and in registers for the * caller are not accessible to the callee). This mechanism allows * for program chaining, either for raising the maximum number of * available eBPF instructions, or to execute given programs in * conditional blocks. For security reasons, there is an upper * limit to the number of successive tail calls that can be * performed. * * Upon call of this helper, the program attempts to jump into a * program referenced at index *index* in *prog_array_map*, a * special map of type **BPF_MAP_TYPE_PROG_ARRAY**, and passes * *ctx*, a pointer to the context. * * If the call succeeds, the kernel immediately runs the first * instruction of the new program. This is not a function call, * and it never returns to the previous program. If the call * fails, then the helper has no effect, and the caller continues * to run its subsequent instructions. A call can fail if the * destination program for the jump does not exist (i.e. *index* * is superior to the number of entries in *prog_array_map*), or * if the maximum number of tail calls has been reached for this * chain of programs. This limit is defined in the kernel by the * macro **MAX_TAIL_CALL_CNT** (not accessible to user space), * which is currently set to 32. * Return * 0 on success, or a negative error in case of failure. * * int bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags) * Description * Clone and redirect the packet associated to *skb* to another * net device of index *ifindex*. Both ingress and egress * interfaces can be used for redirection. The **BPF_F_INGRESS** * value in *flags* is used to make the distinction (ingress path * is selected if the flag is present, egress path otherwise). * This is the only flag supported for now. * * In comparison with **bpf_redirect**\ () helper, * **bpf_clone_redirect**\ () has the associated cost of * duplicating the packet buffer, but this can be executed out of * the eBPF program. Conversely, **bpf_redirect**\ () is more * efficient, but it is handled through an action code where the * redirection happens only after the eBPF program has returned. * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * u64 bpf_get_current_pid_tgid(void) * Return * A 64-bit integer containing the current tgid and pid, and * created as such: * *current_task*\ **->tgid << 32 \|** * *current_task*\ **->pid**. * * u64 bpf_get_current_uid_gid(void) * Return * A 64-bit integer containing the current GID and UID, and * created as such: *current_gid* **<< 32 \|** *current_uid*. * * int bpf_get_current_comm(char *buf, u32 size_of_buf) * Description * Copy the **comm** attribute of the current task into *buf* of * *size_of_buf*. The **comm** attribute contains the name of * the executable (excluding the path) for the current task. The * *size_of_buf* must be strictly positive. On success, the * helper makes sure that the *buf* is NUL-terminated. On failure, * it is filled with zeroes. * Return * 0 on success, or a negative error in case of failure. * * u32 bpf_get_cgroup_classid(struct sk_buff *skb) * Description * Retrieve the classid for the current task, i.e. for the net_cls * cgroup to which *skb* belongs. * * This helper can be used on TC egress path, but not on ingress. * * The net_cls cgroup provides an interface to tag network packets * based on a user-provided identifier for all traffic coming from * the tasks belonging to the related cgroup. See also the related * kernel documentation, available from the Linux sources in file * *Documentation/cgroup-v1/net_cls.txt*. * * The Linux kernel has two versions for cgroups: there are * cgroups v1 and cgroups v2. Both are available to users, who can * use a mixture of them, but note that the net_cls cgroup is for * cgroup v1 only. This makes it incompatible with BPF programs * run on cgroups, which is a cgroup-v2-only feature (a socket can * only hold data for one version of cgroups at a time). * * This helper is only available is the kernel was compiled with * the **CONFIG_CGROUP_NET_CLASSID** configuration option set to * "**y**" or to "**m**". * Return * The classid, or 0 for the default unconfigured classid. * * int bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) * Description * Push a *vlan_tci* (VLAN tag control information) of protocol * *vlan_proto* to the packet associated to *skb*, then update * the checksum. Note that if *vlan_proto* is different from * **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to * be **ETH_P_8021Q**. * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * int bpf_skb_vlan_pop(struct sk_buff *skb) * Description * Pop a VLAN header from the packet associated to *skb*. * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * int bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) * Description * Get tunnel metadata. This helper takes a pointer *key* to an * empty **struct bpf_tunnel_key** of **size**, that will be * filled with tunnel metadata for the packet associated to *skb*. * The *flags* can be set to **BPF_F_TUNINFO_IPV6**, which * indicates that the tunnel is based on IPv6 protocol instead of * IPv4. * * The **struct bpf_tunnel_key** is an object that generalizes the * principal parameters used by various tunneling protocols into a * single struct. This way, it can be used to easily make a * decision based on the contents of the encapsulation header, * "summarized" in this struct. In particular, it holds the IP * address of the remote end (IPv4 or IPv6, depending on the case) * in *key*\ **->remote_ipv4** or *key*\ **->remote_ipv6**. Also, * this struct exposes the *key*\ **->tunnel_id**, which is * generally mapped to a VNI (Virtual Network Identifier), making * it programmable together with the **bpf_skb_set_tunnel_key**\ * () helper. * * Let's imagine that the following code is part of a program * attached to the TC ingress interface, on one end of a GRE * tunnel, and is supposed to filter out all messages coming from * remote ends with IPv4 address other than 10.0.0.1: * * :: * * int ret; * struct bpf_tunnel_key key = {}; * * ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0); * if (ret < 0) * return TC_ACT_SHOT; // drop packet * * if (key.remote_ipv4 != 0x0a000001) * return TC_ACT_SHOT; // drop packet * * return TC_ACT_OK; // accept packet * * This interface can also be used with all encapsulation devices * that can operate in "collect metadata" mode: instead of having * one network device per specific configuration, the "collect * metadata" mode only requires a single device where the * configuration can be extracted from this helper. * * This can be used together with various tunnels such as VXLan, * Geneve, GRE or IP in IP (IPIP). * Return * 0 on success, or a negative error in case of failure. * * int bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) * Description * Populate tunnel metadata for packet associated to *skb.* The * tunnel metadata is set to the contents of *key*, of *size*. The * *flags* can be set to a combination of the following values: * * **BPF_F_TUNINFO_IPV6** * Indicate that the tunnel is based on IPv6 protocol * instead of IPv4. * **BPF_F_ZERO_CSUM_TX** * For IPv4 packets, add a flag to tunnel metadata * indicating that checksum computation should be skipped * and checksum set to zeroes. * **BPF_F_DONT_FRAGMENT** * Add a flag to tunnel metadata indicating that the * packet should not be fragmented. * **BPF_F_SEQ_NUMBER** * Add a flag to tunnel metadata indicating that a * sequence number should be added to tunnel header before * sending the packet. This flag was added for GRE * encapsulation, but might be used with other protocols * as well in the future. * * Here is a typical usage on the transmit path: * * :: * * struct bpf_tunnel_key key; * populate key ... * bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0); * bpf_clone_redirect(skb, vxlan_dev_ifindex, 0); * * See also the description of the **bpf_skb_get_tunnel_key**\ () * helper for additional information. * Return * 0 on success, or a negative error in case of failure. * * u64 bpf_perf_event_read(struct bpf_map *map, u64 flags) * Description * Read the value of a perf event counter. This helper relies on a * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of * the perf event counter is selected when *map* is updated with * perf event file descriptors. The *map* is an array whose size * is the number of available CPUs, and each cell contains a value * relative to one CPU. The value to retrieve is indicated by * *flags*, that contains the index of the CPU to look up, masked * with **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to * **BPF_F_CURRENT_CPU** to indicate that the value for the * current CPU should be retrieved. * * Note that before Linux 4.13, only hardware perf event can be * retrieved. * * Also, be aware that the newer helper * **bpf_perf_event_read_value**\ () is recommended over * **bpf_perf_event_read**\ () in general. The latter has some ABI * quirks where error and counter value are used as a return code * (which is wrong to do since ranges may overlap). This issue is * fixed with **bpf_perf_event_read_value**\ (), which at the same * time provides more features over the **bpf_perf_event_read**\ * () interface. Please refer to the description of * **bpf_perf_event_read_value**\ () for details. * Return * The value of the perf event counter read from the map, or a * negative error code in case of failure. * * int bpf_redirect(u32 ifindex, u64 flags) * Description * Redirect the packet to another net device of index *ifindex*. * This helper is somewhat similar to **bpf_clone_redirect**\ * (), except that the packet is not cloned, which provides * increased performance. * * Except for XDP, both ingress and egress interfaces can be used * for redirection. The **BPF_F_INGRESS** value in *flags* is used * to make the distinction (ingress path is selected if the flag * is present, egress path otherwise). Currently, XDP only * supports redirection to the egress interface, and accepts no * flag at all. * * The same effect can be attained with the more generic * **bpf_redirect_map**\ (), which requires specific maps to be * used but offers better performance. * Return * For XDP, the helper returns **XDP_REDIRECT** on success or * **XDP_ABORTED** on error. For other program types, the values * are **TC_ACT_REDIRECT** on success or **TC_ACT_SHOT** on * error. * * u32 bpf_get_route_realm(struct sk_buff *skb) * Description * Retrieve the realm or the route, that is to say the * **tclassid** field of the destination for the *skb*. The * indentifier retrieved is a user-provided tag, similar to the * one used with the net_cls cgroup (see description for * **bpf_get_cgroup_classid**\ () helper), but here this tag is * held by a route (a destination entry), not by a task. * * Retrieving this identifier works with the clsact TC egress hook * (see also **tc-bpf(8)**), or alternatively on conventional * classful egress qdiscs, but not on TC ingress path. In case of * clsact TC egress hook, this has the advantage that, internally, * the destination entry has not been dropped yet in the transmit * path. Therefore, the destination entry does not need to be * artificially held via **netif_keep_dst**\ () for a classful * qdisc until the *skb* is freed. * * This helper is available only if the kernel was compiled with * **CONFIG_IP_ROUTE_CLASSID** configuration option. * Return * The realm of the route for the packet associated to *skb*, or 0 * if none was found. * * int bpf_perf_event_output(struct pt_reg *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) * Description * Write raw *data* blob into a special BPF perf event held by * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf * event must have the following attributes: **PERF_SAMPLE_RAW** * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. * * The *flags* are used to indicate the index in *map* for which * the value must be put, masked with **BPF_F_INDEX_MASK**. * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** * to indicate that the index of the current CPU core should be * used. * * The value to write, of *size*, is passed through eBPF stack and * pointed by *data*. * * The context of the program *ctx* needs also be passed to the * helper. * * On user space, a program willing to read the values needs to * call **perf_event_open**\ () on the perf event (either for * one or for all CPUs) and to store the file descriptor into the * *map*. This must be done before the eBPF program can send data * into it. An example is available in file * *samples/bpf/trace_output_user.c* in the Linux kernel source * tree (the eBPF program counterpart is in * *samples/bpf/trace_output_kern.c*). * * **bpf_perf_event_output**\ () achieves better performance * than **bpf_trace_printk**\ () for sharing data with user * space, and is much better suitable for streaming data from eBPF * programs. * * Note that this helper is not restricted to tracing use cases * and can be used with programs attached to TC or XDP as well, * where it allows for passing data to user space listeners. Data * can be: * * * Only custom structs, * * Only the packet payload, or * * A combination of both. * Return * 0 on success, or a negative error in case of failure. * * int bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset, void *to, u32 len) * Description * This helper was provided as an easy way to load data from a * packet. It can be used to load *len* bytes from *offset* from * the packet associated to *skb*, into the buffer pointed by * *to*. * * Since Linux 4.7, usage of this helper has mostly been replaced * by "direct packet access", enabling packet data to be * manipulated with *skb*\ **->data** and *skb*\ **->data_end** * pointing respectively to the first byte of packet data and to * the byte after the last byte of packet data. However, it * remains useful if one wishes to read large quantities of data * at once from a packet into the eBPF stack. * Return * 0 on success, or a negative error in case of failure. * * int bpf_get_stackid(struct pt_reg *ctx, struct bpf_map *map, u64 flags) * Description * Walk a user or a kernel stack and return its id. To achieve * this, the helper needs *ctx*, which is a pointer to the context * on which the tracing program is executed, and a pointer to a * *map* of type **BPF_MAP_TYPE_STACK_TRACE**. * * The last argument, *flags*, holds the number of stack frames to * skip (from 0 to 255), masked with * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set * a combination of the following flags: * * **BPF_F_USER_STACK** * Collect a user space stack instead of a kernel stack. * **BPF_F_FAST_STACK_CMP** * Compare stacks by hash only. * **BPF_F_REUSE_STACKID** * If two different stacks hash into the same *stackid*, * discard the old one. * * The stack id retrieved is a 32 bit long integer handle which * can be further combined with other data (including other stack * ids) and used as a key into maps. This can be useful for * generating a variety of graphs (such as flame graphs or off-cpu * graphs). * * For walking a stack, this helper is an improvement over * **bpf_probe_read**\ (), which can be used with unrolled loops * but is not efficient and consumes a lot of eBPF instructions. * Instead, **bpf_get_stackid**\ () can collect up to * **PERF_MAX_STACK_DEPTH** both kernel and user frames. Note that * this limit can be controlled with the **sysctl** program, and * that it should be manually increased in order to profile long * user stacks (such as stacks for Java programs). To do so, use: * * :: * * # sysctl kernel.perf_event_max_stack= * Return * The positive or null stack id on success, or a negative error * in case of failure. * * s64 bpf_csum_diff(__be32 *from, u32 from_size, __be32 *to, u32 to_size, __wsum seed) * Description * Compute a checksum difference, from the raw buffer pointed by * *from*, of length *from_size* (that must be a multiple of 4), * towards the raw buffer pointed by *to*, of size *to_size* * (same remark). An optional *seed* can be added to the value * (this can be cascaded, the seed may come from a previous call * to the helper). * * This is flexible enough to be used in several ways: * * * With *from_size* == 0, *to_size* > 0 and *seed* set to * checksum, it can be used when pushing new data. * * With *from_size* > 0, *to_size* == 0 and *seed* set to * checksum, it can be used when removing data from a packet. * * With *from_size* > 0, *to_size* > 0 and *seed* set to 0, it * can be used to compute a diff. Note that *from_size* and * *to_size* do not need to be equal. * * This helper can be used in combination with * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ (), to * which one can feed in the difference computed with * **bpf_csum_diff**\ (). * Return * The checksum result, or a negative error code in case of * failure. * * int bpf_skb_get_tunnel_opt(struct sk_buff *skb, u8 *opt, u32 size) * Description * Retrieve tunnel options metadata for the packet associated to * *skb*, and store the raw tunnel option data to the buffer *opt* * of *size*. * * This helper can be used with encapsulation devices that can * operate in "collect metadata" mode (please refer to the related * note in the description of **bpf_skb_get_tunnel_key**\ () for * more details). A particular example where this can be used is * in combination with the Geneve encapsulation protocol, where it * allows for pushing (with **bpf_skb_get_tunnel_opt**\ () helper) * and retrieving arbitrary TLVs (Type-Length-Value headers) from * the eBPF program. This allows for full customization of these * headers. * Return * The size of the option data retrieved. * * int bpf_skb_set_tunnel_opt(struct sk_buff *skb, u8 *opt, u32 size) * Description * Set tunnel options metadata for the packet associated to *skb* * to the option data contained in the raw buffer *opt* of *size*. * * See also the description of the **bpf_skb_get_tunnel_opt**\ () * helper for additional information. * Return * 0 on success, or a negative error in case of failure. * * int bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags) * Description * Change the protocol of the *skb* to *proto*. Currently * supported are transition from IPv4 to IPv6, and from IPv6 to * IPv4. The helper takes care of the groundwork for the * transition, including resizing the socket buffer. The eBPF * program is expected to fill the new headers, if any, via * **skb_store_bytes**\ () and to recompute the checksums with * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ * (). The main case for this helper is to perform NAT64 * operations out of an eBPF program. * * Internally, the GSO type is marked as dodgy so that headers are * checked and segments are recalculated by the GSO/GRO engine. * The size for GSO target is adapted as well. * * All values for *flags* are reserved for future usage, and must * be left at zero. * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * int bpf_skb_change_type(struct sk_buff *skb, u32 type) * Description * Change the packet type for the packet associated to *skb*. This * comes down to setting *skb*\ **->pkt_type** to *type*, except * the eBPF program does not have a write access to *skb*\ * **->pkt_type** beside this helper. Using a helper here allows * for graceful handling of errors. * * The major use case is to change incoming *skb*s to * **PACKET_HOST** in a programmatic way instead of having to * recirculate via **redirect**\ (..., **BPF_F_INGRESS**), for * example. * * Note that *type* only allows certain values. At this time, they * are: * * **PACKET_HOST** * Packet is for us. * **PACKET_BROADCAST** * Send packet to all. * **PACKET_MULTICAST** * Send packet to group. * **PACKET_OTHERHOST** * Send packet to someone else. * Return * 0 on success, or a negative error in case of failure. * * int bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index) * Description * Check whether *skb* is a descendant of the cgroup2 held by * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. * Return * The return value depends on the result of the test, and can be: * * * 0, if the *skb* failed the cgroup2 descendant test. * * 1, if the *skb* succeeded the cgroup2 descendant test. * * A negative error code, if an error occurred. * * u32 bpf_get_hash_recalc(struct sk_buff *skb) * Description * Retrieve the hash of the packet, *skb*\ **->hash**. If it is * not set, in particular if the hash was cleared due to mangling, * recompute this hash. Later accesses to the hash can be done * directly with *skb*\ **->hash**. * * Calling **bpf_set_hash_invalid**\ (), changing a packet * prototype with **bpf_skb_change_proto**\ (), or calling * **bpf_skb_store_bytes**\ () with the * **BPF_F_INVALIDATE_HASH** are actions susceptible to clear * the hash and to trigger a new computation for the next call to * **bpf_get_hash_recalc**\ (). * Return * The 32-bit hash. * * u64 bpf_get_current_task(void) * Return * A pointer to the current task struct. * * int bpf_probe_write_user(void *dst, const void *src, u32 len) * Description * Attempt in a safe way to write *len* bytes from the buffer * *src* to *dst* in memory. It only works for threads that are in * user context, and *dst* must be a valid user space address. * * This helper should not be used to implement any kind of * security mechanism because of TOC-TOU attacks, but rather to * debug, divert, and manipulate execution of semi-cooperative * processes. * * Keep in mind that this feature is meant for experiments, and it * has a risk of crashing the system and running programs. * Therefore, when an eBPF program using this helper is attached, * a warning including PID and process name is printed to kernel * logs. * Return * 0 on success, or a negative error in case of failure. * * int bpf_current_task_under_cgroup(struct bpf_map *map, u32 index) * Description * Check whether the probe is being run is the context of a given * subset of the cgroup2 hierarchy. The cgroup2 to test is held by * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. * Return * The return value depends on the result of the test, and can be: * * * 0, if the *skb* task belongs to the cgroup2. * * 1, if the *skb* task does not belong to the cgroup2. * * A negative error code, if an error occurred. * * int bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags) * Description * Resize (trim or grow) the packet associated to *skb* to the * new *len*. The *flags* are reserved for future usage, and must * be left at zero. * * The basic idea is that the helper performs the needed work to * change the size of the packet, then the eBPF program rewrites * the rest via helpers like **bpf_skb_store_bytes**\ (), * **bpf_l3_csum_replace**\ (), **bpf_l3_csum_replace**\ () * and others. This helper is a slow path utility intended for * replies with control messages. And because it is targeted for * slow path, the helper itself can afford to be slow: it * implicitly linearizes, unclones and drops offloads from the * *skb*. * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * int bpf_skb_pull_data(struct sk_buff *skb, u32 len) * Description * Pull in non-linear data in case the *skb* is non-linear and not * all of *len* are part of the linear section. Make *len* bytes * from *skb* readable and writable. If a zero value is passed for * *len*, then the whole length of the *skb* is pulled. * * This helper is only needed for reading and writing with direct * packet access. * * For direct packet access, testing that offsets to access * are within packet boundaries (test on *skb*\ **->data_end**) is * susceptible to fail if offsets are invalid, or if the requested * data is in non-linear parts of the *skb*. On failure the * program can just bail out, or in the case of a non-linear * buffer, use a helper to make the data available. The * **bpf_skb_load_bytes**\ () helper is a first solution to access * the data. Another one consists in using **bpf_skb_pull_data** * to pull in once the non-linear parts, then retesting and * eventually access the data. * * At the same time, this also makes sure the *skb* is uncloned, * which is a necessary condition for direct write. As this needs * to be an invariant for the write part only, the verifier * detects writes and adds a prologue that is calling * **bpf_skb_pull_data()** to effectively unclone the *skb* from * the very beginning in case it is indeed cloned. * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * s64 bpf_csum_update(struct sk_buff *skb, __wsum csum) * Description * Add the checksum *csum* into *skb*\ **->csum** in case the * driver has supplied a checksum for the entire packet into that * field. Return an error otherwise. This helper is intended to be * used in combination with **bpf_csum_diff**\ (), in particular * when the checksum needs to be updated after data has been * written into the packet through direct packet access. * Return * The checksum on success, or a negative error code in case of * failure. * * void bpf_set_hash_invalid(struct sk_buff *skb) * Description * Invalidate the current *skb*\ **->hash**. It can be used after * mangling on headers through direct packet access, in order to * indicate that the hash is outdated and to trigger a * recalculation the next time the kernel tries to access this * hash or when the **bpf_get_hash_recalc**\ () helper is called. * * int bpf_get_numa_node_id(void) * Description * Return the id of the current NUMA node. The primary use case * for this helper is the selection of sockets for the local NUMA * node, when the program is attached to sockets using the * **SO_ATTACH_REUSEPORT_EBPF** option (see also **socket(7)**), * but the helper is also available to other eBPF program types, * similarly to **bpf_get_smp_processor_id**\ (). * Return * The id of current NUMA node. * * int bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags) * Description * Grows headroom of packet associated to *skb* and adjusts the * offset of the MAC header accordingly, adding *len* bytes of * space. It automatically extends and reallocates memory as * required. * * This helper can be used on a layer 3 *skb* to push a MAC header * for redirection into a layer 2 device. * * All values for *flags* are reserved for future usage, and must * be left at zero. * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * int bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta) * Description * Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that * it is possible to use a negative value for *delta*. This helper * can be used to prepare the packet for pushing or popping * headers. * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr) * Description * Copy a NUL terminated string from an unsafe address * *unsafe_ptr* to *dst*. The *size* should include the * terminating NUL byte. In case the string length is smaller than * *size*, the target is not padded with further NUL bytes. If the * string length is larger than *size*, just *size*-1 bytes are * copied and the last byte is set to NUL. * * On success, the length of the copied string is returned. This * makes this helper useful in tracing programs for reading * strings, and more importantly to get its length at runtime. See * the following snippet: * * :: * * SEC("kprobe/sys_open") * void bpf_sys_open(struct pt_regs *ctx) * { * char buf[PATHLEN]; // PATHLEN is defined to 256 * int res = bpf_probe_read_str(buf, sizeof(buf), * ctx->di); * * // Consume buf, for example push it to * // userspace via bpf_perf_event_output(); we * // can use res (the string length) as event * // size, after checking its boundaries. * } * * In comparison, using **bpf_probe_read()** helper here instead * to read the string would require to estimate the length at * compile time, and would often result in copying more memory * than necessary. * * Another useful use case is when parsing individual process * arguments or individual environment variables navigating * *current*\ **->mm->arg_start** and *current*\ * **->mm->env_start**: using this helper and the return value, * one can quickly iterate at the right offset of the memory area. * Return * On success, the strictly positive length of the string, * including the trailing NUL character. On error, a negative * value. * * u64 bpf_get_socket_cookie(struct sk_buff *skb) * Description * If the **struct sk_buff** pointed by *skb* has a known socket, * retrieve the cookie (generated by the kernel) of this socket. * If no cookie has been set yet, generate a new cookie. Once * generated, the socket cookie remains stable for the life of the * socket. This helper can be useful for monitoring per socket * networking traffic statistics as it provides a unique socket * identifier per namespace. * Return * A 8-byte long non-decreasing number on success, or 0 if the * socket field is missing inside *skb*. * * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx) * Description * Equivalent to bpf_get_socket_cookie() helper that accepts * *skb*, but gets socket from **struct bpf_sock_addr** contex. * Return * A 8-byte long non-decreasing number. * * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx) * Description * Equivalent to bpf_get_socket_cookie() helper that accepts * *skb*, but gets socket from **struct bpf_sock_ops** contex. * Return * A 8-byte long non-decreasing number. * * u32 bpf_get_socket_uid(struct sk_buff *skb) * Return * The owner UID of the socket associated to *skb*. If the socket * is **NULL**, or if it is not a full socket (i.e. if it is a * time-wait or a request socket instead), **overflowuid** value * is returned (note that **overflowuid** might also be the actual * UID value for the socket). * * u32 bpf_set_hash(struct sk_buff *skb, u32 hash) * Description * Set the full hash for *skb* (set the field *skb*\ **->hash**) * to value *hash*. * Return * 0 * * int bpf_setsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, char *optval, int optlen) * Description * Emulate a call to **setsockopt()** on the socket associated to * *bpf_socket*, which must be a full socket. The *level* at * which the option resides and the name *optname* of the option * must be specified, see **setsockopt(2)** for more information. * The option value of length *optlen* is pointed by *optval*. * * This helper actually implements a subset of **setsockopt()**. * It supports the following *level*\ s: * * * **SOL_SOCKET**, which supports the following *optname*\ s: * **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**, * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**. * * **IPPROTO_TCP**, which supports the following *optname*\ s: * **TCP_CONGESTION**, **TCP_BPF_IW**, * **TCP_BPF_SNDCWND_CLAMP**. * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**. * Return * 0 on success, or a negative error in case of failure. * * int bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags) * Description * Grow or shrink the room for data in the packet associated to * *skb* by *len_diff*, and according to the selected *mode*. * * There is a single supported mode at this time: * * * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer * (room space is added or removed below the layer 3 header). * * All values for *flags* are reserved for future usage, and must * be left at zero. * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * int bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags) * Description * Redirect the packet to the endpoint referenced by *map* at * index *key*. Depending on its type, this *map* can contain * references to net devices (for forwarding packets through other * ports), or to CPUs (for redirecting XDP frames to another CPU; * but this is only implemented for native XDP (with driver * support) as of this writing). * * All values for *flags* are reserved for future usage, and must * be left at zero. * * When used to redirect packets to net devices, this helper * provides a high performance increase over **bpf_redirect**\ (). * This is due to various implementation details of the underlying * mechanisms, one of which is the fact that **bpf_redirect_map**\ * () tries to send packet as a "bulk" to the device. * Return * **XDP_REDIRECT** on success, or **XDP_ABORTED** on error. * * int bpf_sk_redirect_map(struct bpf_map *map, u32 key, u64 flags) * Description * Redirect the packet to the socket referenced by *map* (of type * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and * egress interfaces can be used for redirection. The * **BPF_F_INGRESS** value in *flags* is used to make the * distinction (ingress path is selected if the flag is present, * egress path otherwise). This is the only flag supported for now. * Return * **SK_PASS** on success, or **SK_DROP** on error. * * int bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) * Description * Add an entry to, or update a *map* referencing sockets. The * *skops* is used as a new value for the entry associated to * *key*. *flags* is one of: * * **BPF_NOEXIST** * The entry for *key* must not exist in the map. * **BPF_EXIST** * The entry for *key* must already exist in the map. * **BPF_ANY** * No condition on the existence of the entry for *key*. * * If the *map* has eBPF programs (parser and verdict), those will * be inherited by the socket being added. If the socket is * already attached to eBPF programs, this results in an error. * Return * 0 on success, or a negative error in case of failure. * * int bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta) * Description * Adjust the address pointed by *xdp_md*\ **->data_meta** by * *delta* (which can be positive or negative). Note that this * operation modifies the address stored in *xdp_md*\ **->data**, * so the latter must be loaded only after the helper has been * called. * * The use of *xdp_md*\ **->data_meta** is optional and programs * are not required to use it. The rationale is that when the * packet is processed with XDP (e.g. as DoS filter), it is * possible to push further meta data along with it before passing * to the stack, and to give the guarantee that an ingress eBPF * program attached as a TC classifier on the same device can pick * this up for further post-processing. Since TC works with socket * buffers, it remains possible to set from XDP the **mark** or * **priority** pointers, or other pointers for the socket buffer. * Having this scratch space generic and programmable allows for * more flexibility as the user is free to store whatever meta * data they need. * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * int bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size) * Description * Read the value of a perf event counter, and store it into *buf* * of size *buf_size*. This helper relies on a *map* of type * **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of the perf event * counter is selected when *map* is updated with perf event file * descriptors. The *map* is an array whose size is the number of * available CPUs, and each cell contains a value relative to one * CPU. The value to retrieve is indicated by *flags*, that * contains the index of the CPU to look up, masked with * **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to * **BPF_F_CURRENT_CPU** to indicate that the value for the * current CPU should be retrieved. * * This helper behaves in a way close to * **bpf_perf_event_read**\ () helper, save that instead of * just returning the value observed, it fills the *buf* * structure. This allows for additional data to be retrieved: in * particular, the enabled and running times (in *buf*\ * **->enabled** and *buf*\ **->running**, respectively) are * copied. In general, **bpf_perf_event_read_value**\ () is * recommended over **bpf_perf_event_read**\ (), which has some * ABI issues and provides fewer functionalities. * * These values are interesting, because hardware PMU (Performance * Monitoring Unit) counters are limited resources. When there are * more PMU based perf events opened than available counters, * kernel will multiplex these events so each event gets certain * percentage (but not all) of the PMU time. In case that * multiplexing happens, the number of samples or counter value * will not reflect the case compared to when no multiplexing * occurs. This makes comparison between different runs difficult. * Typically, the counter value should be normalized before * comparing to other experiments. The usual normalization is done * as follows. * * :: * * normalized_counter = counter * t_enabled / t_running * * Where t_enabled is the time enabled for event and t_running is * the time running for event since last normalization. The * enabled and running times are accumulated since the perf event * open. To achieve scaling factor between two invocations of an * eBPF program, users can can use CPU id as the key (which is * typical for perf array usage model) to remember the previous * value and do the calculation inside the eBPF program. * Return * 0 on success, or a negative error in case of failure. * * int bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size) * Description * For en eBPF program attached to a perf event, retrieve the * value of the event counter associated to *ctx* and store it in * the structure pointed by *buf* and of size *buf_size*. Enabled * and running times are also stored in the structure (see * description of helper **bpf_perf_event_read_value**\ () for * more details). * Return * 0 on success, or a negative error in case of failure. * * int bpf_getsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, char *optval, int optlen) * Description * Emulate a call to **getsockopt()** on the socket associated to * *bpf_socket*, which must be a full socket. The *level* at * which the option resides and the name *optname* of the option * must be specified, see **getsockopt(2)** for more information. * The retrieved value is stored in the structure pointed by * *opval* and of length *optlen*. * * This helper actually implements a subset of **getsockopt()**. * It supports the following *level*\ s: * * * **IPPROTO_TCP**, which supports *optname* * **TCP_CONGESTION**. * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**. * Return * 0 on success, or a negative error in case of failure. * * int bpf_override_return(struct pt_reg *regs, u64 rc) * Description * Used for error injection, this helper uses kprobes to override * the return value of the probed function, and to set it to *rc*. * The first argument is the context *regs* on which the kprobe * works. * * This helper works by setting setting the PC (program counter) * to an override function which is run in place of the original * probed function. This means the probed function is not run at * all. The replacement function just returns with the required * value. * * This helper has security implications, and thus is subject to * restrictions. It is only available if the kernel was compiled * with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration * option, and in this case it only works on functions tagged with * **ALLOW_ERROR_INJECTION** in the kernel code. * * Also, the helper is only available for the architectures having * the CONFIG_FUNCTION_ERROR_INJECTION option. As of this writing, * x86 architecture is the only one to support this feature. * Return * 0 * * int bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval) * Description * Attempt to set the value of the **bpf_sock_ops_cb_flags** field * for the full TCP socket associated to *bpf_sock_ops* to * *argval*. * * The primary use of this field is to determine if there should * be calls to eBPF programs of type * **BPF_PROG_TYPE_SOCK_OPS** at various points in the TCP * code. A program of the same type can change its value, per * connection and as necessary, when the connection is * established. This field is directly accessible for reading, but * this helper must be used for updates in order to return an * error if an eBPF program tries to set a callback that is not * supported in the current kernel. * * The supported callback values that *argval* can combine are: * * * **BPF_SOCK_OPS_RTO_CB_FLAG** (retransmission time out) * * **BPF_SOCK_OPS_RETRANS_CB_FLAG** (retransmission) * * **BPF_SOCK_OPS_STATE_CB_FLAG** (TCP state change) * * Here are some examples of where one could call such eBPF * program: * * * When RTO fires. * * When a packet is retransmitted. * * When the connection terminates. * * When a packet is sent. * * When a packet is received. * Return * Code **-EINVAL** if the socket is not a full TCP socket; * otherwise, a positive number containing the bits that could not * be set is returned (which comes down to 0 if all bits were set * as required). * * int bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags) * Description * This helper is used in programs implementing policies at the * socket level. If the message *msg* is allowed to pass (i.e. if * the verdict eBPF program returns **SK_PASS**), redirect it to * the socket referenced by *map* (of type * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and * egress interfaces can be used for redirection. The * **BPF_F_INGRESS** value in *flags* is used to make the * distinction (ingress path is selected if the flag is present, * egress path otherwise). This is the only flag supported for now. * Return * **SK_PASS** on success, or **SK_DROP** on error. * * int bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes) * Description * For socket policies, apply the verdict of the eBPF program to * the next *bytes* (number of bytes) of message *msg*. * * For example, this helper can be used in the following cases: * * * A single **sendmsg**\ () or **sendfile**\ () system call * contains multiple logical messages that the eBPF program is * supposed to read and for which it should apply a verdict. * * An eBPF program only cares to read the first *bytes* of a * *msg*. If the message has a large payload, then setting up * and calling the eBPF program repeatedly for all bytes, even * though the verdict is already known, would create unnecessary * overhead. * * When called from within an eBPF program, the helper sets a * counter internal to the BPF infrastructure, that is used to * apply the last verdict to the next *bytes*. If *bytes* is * smaller than the current data being processed from a * **sendmsg**\ () or **sendfile**\ () system call, the first * *bytes* will be sent and the eBPF program will be re-run with * the pointer for start of data pointing to byte number *bytes* * **+ 1**. If *bytes* is larger than the current data being * processed, then the eBPF verdict will be applied to multiple * **sendmsg**\ () or **sendfile**\ () calls until *bytes* are * consumed. * * Note that if a socket closes with the internal counter holding * a non-zero value, this is not a problem because data is not * being buffered for *bytes* and is sent as it is received. * Return * 0 * * int bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes) * Description * For socket policies, prevent the execution of the verdict eBPF * program for message *msg* until *bytes* (byte number) have been * accumulated. * * This can be used when one needs a specific number of bytes * before a verdict can be assigned, even if the data spans * multiple **sendmsg**\ () or **sendfile**\ () calls. The extreme * case would be a user calling **sendmsg**\ () repeatedly with * 1-byte long message segments. Obviously, this is bad for * performance, but it is still valid. If the eBPF program needs * *bytes* bytes to validate a header, this helper can be used to * prevent the eBPF program to be called again until *bytes* have * been accumulated. * Return * 0 * * int bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags) * Description * For socket policies, pull in non-linear data from user space * for *msg* and set pointers *msg*\ **->data** and *msg*\ * **->data_end** to *start* and *end* bytes offsets into *msg*, * respectively. * * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a * *msg* it can only parse data that the (**data**, **data_end**) * pointers have already consumed. For **sendmsg**\ () hooks this * is likely the first scatterlist element. But for calls relying * on the **sendpage** handler (e.g. **sendfile**\ ()) this will * be the range (**0**, **0**) because the data is shared with * user space and by default the objective is to avoid allowing * user space to modify data while (or after) eBPF verdict is * being decided. This helper can be used to pull in data and to * set the start and end pointer to given values. Data will be * copied if necessary (i.e. if data was not linear and if start * and end pointers do not point to the same chunk). * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * * All values for *flags* are reserved for future usage, and must * be left at zero. * Return * 0 on success, or a negative error in case of failure. * * int bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len) * Description * Bind the socket associated to *ctx* to the address pointed by * *addr*, of length *addr_len*. This allows for making outgoing * connection from the desired IP address, which can be useful for * example when all processes inside a cgroup should use one * single IP address on a host that has multiple IP configured. * * This helper works for IPv4 and IPv6, TCP and UDP sockets. The * domain (*addr*\ **->sa_family**) must be **AF_INET** (or * **AF_INET6**). Looking for a free port to bind to can be * expensive, therefore binding to port is not permitted by the * helper: *addr*\ **->sin_port** (or **sin6_port**, respectively) * must be set to zero. * Return * 0 on success, or a negative error in case of failure. * * int bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta) * Description * Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is * only possible to shrink the packet as of this writing, * therefore *delta* must be a negative integer. * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * int bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags) * Description * Retrieve the XFRM state (IP transform framework, see also * **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*. * * The retrieved value is stored in the **struct bpf_xfrm_state** * pointed by *xfrm_state* and of length *size*. * * All values for *flags* are reserved for future usage, and must * be left at zero. * * This helper is available only if the kernel was compiled with * **CONFIG_XFRM** configuration option. * Return * 0 on success, or a negative error in case of failure. * * int bpf_get_stack(struct pt_regs *regs, void *buf, u32 size, u64 flags) * Description * Return a user or a kernel stack in bpf program provided buffer. * To achieve this, the helper needs *ctx*, which is a pointer * to the context on which the tracing program is executed. * To store the stacktrace, the bpf program provides *buf* with * a nonnegative *size*. * * The last argument, *flags*, holds the number of stack frames to * skip (from 0 to 255), masked with * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set * the following flags: * * **BPF_F_USER_STACK** * Collect a user space stack instead of a kernel stack. * **BPF_F_USER_BUILD_ID** * Collect buildid+offset instead of ips for user stack, * only valid if **BPF_F_USER_STACK** is also specified. * * **bpf_get_stack**\ () can collect up to * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject * to sufficient large buffer size. Note that * this limit can be controlled with the **sysctl** program, and * that it should be manually increased in order to profile long * user stacks (such as stacks for Java programs). To do so, use: * * :: * * # sysctl kernel.perf_event_max_stack= * Return * A non-negative value equal to or less than *size* on success, * or a negative error in case of failure. * * int bpf_skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header) * Description * This helper is similar to **bpf_skb_load_bytes**\ () in that * it provides an easy way to load *len* bytes from *offset* * from the packet associated to *skb*, into the buffer pointed * by *to*. The difference to **bpf_skb_load_bytes**\ () is that * a fifth argument *start_header* exists in order to select a * base offset to start from. *start_header* can be one of: * * **BPF_HDR_START_MAC** * Base offset to load data from is *skb*'s mac header. * **BPF_HDR_START_NET** * Base offset to load data from is *skb*'s network header. * * In general, "direct packet access" is the preferred method to * access packet data, however, this helper is in particular useful * in socket filters where *skb*\ **->data** does not always point * to the start of the mac header and where "direct packet access" * is not available. * Return * 0 on success, or a negative error in case of failure. * * int bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags) * Description * Do FIB lookup in kernel tables using parameters in *params*. * If lookup is successful and result shows packet is to be * forwarded, the neighbor tables are searched for the nexthop. * If successful (ie., FIB lookup shows forwarding and nexthop * is resolved), the nexthop address is returned in ipv4_dst * or ipv6_dst based on family, smac is set to mac address of * egress device, dmac is set to nexthop mac address, rt_metric * is set to metric from route (IPv4/IPv6 only), and ifindex * is set to the device index of the nexthop from the FIB lookup. * * *plen* argument is the size of the passed in struct. * *flags* argument can be a combination of one or more of the * following values: * * **BPF_FIB_LOOKUP_DIRECT** * Do a direct table lookup vs full lookup using FIB * rules. * **BPF_FIB_LOOKUP_OUTPUT** * Perform lookup from an egress perspective (default is * ingress). * * *ctx* is either **struct xdp_md** for XDP programs or * **struct sk_buff** tc cls_act programs. * Return * * < 0 if any input argument is invalid * * 0 on success (packet is forwarded, nexthop neighbor exists) * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the * packet is not forwarded or needs assist from full stack * * int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags) * Description * Add an entry to, or update a sockhash *map* referencing sockets. * The *skops* is used as a new value for the entry associated to * *key*. *flags* is one of: * * **BPF_NOEXIST** * The entry for *key* must not exist in the map. * **BPF_EXIST** * The entry for *key* must already exist in the map. * **BPF_ANY** * No condition on the existence of the entry for *key*. * * If the *map* has eBPF programs (parser and verdict), those will * be inherited by the socket being added. If the socket is * already attached to eBPF programs, this results in an error. * Return * 0 on success, or a negative error in case of failure. * * int bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags) * Description * This helper is used in programs implementing policies at the * socket level. If the message *msg* is allowed to pass (i.e. if * the verdict eBPF program returns **SK_PASS**), redirect it to * the socket referenced by *map* (of type * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and * egress interfaces can be used for redirection. The * **BPF_F_INGRESS** value in *flags* is used to make the * distinction (ingress path is selected if the flag is present, * egress path otherwise). This is the only flag supported for now. * Return * **SK_PASS** on success, or **SK_DROP** on error. * * int bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags) * Description * This helper is used in programs implementing policies at the * skb socket level. If the sk_buff *skb* is allowed to pass (i.e. * if the verdeict eBPF program returns **SK_PASS**), redirect it * to the socket referenced by *map* (of type * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and * egress interfaces can be used for redirection. The * **BPF_F_INGRESS** value in *flags* is used to make the * distinction (ingress path is selected if the flag is present, * egress otherwise). This is the only flag supported for now. * Return * **SK_PASS** on success, or **SK_DROP** on error. * * int bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len) * Description * Encapsulate the packet associated to *skb* within a Layer 3 * protocol header. This header is provided in the buffer at * address *hdr*, with *len* its size in bytes. *type* indicates * the protocol of the header and can be one of: * * **BPF_LWT_ENCAP_SEG6** * IPv6 encapsulation with Segment Routing Header * (**struct ipv6_sr_hdr**). *hdr* only contains the SRH, * the IPv6 header is computed by the kernel. * **BPF_LWT_ENCAP_SEG6_INLINE** * Only works if *skb* contains an IPv6 packet. Insert a * Segment Routing Header (**struct ipv6_sr_hdr**) inside * the IPv6 header. * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * int bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len) * Description * Store *len* bytes from address *from* into the packet * associated to *skb*, at *offset*. Only the flags, tag and TLVs * inside the outermost IPv6 Segment Routing Header can be * modified through this helper. * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * int bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta) * Description * Adjust the size allocated to TLVs in the outermost IPv6 * Segment Routing Header contained in the packet associated to * *skb*, at position *offset* by *delta* bytes. Only offsets * after the segments are accepted. *delta* can be as well * positive (growing) as negative (shrinking). * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * int bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len) * Description * Apply an IPv6 Segment Routing action of type *action* to the * packet associated to *skb*. Each action takes a parameter * contained at address *param*, and of length *param_len* bytes. * *action* can be one of: * * **SEG6_LOCAL_ACTION_END_X** * End.X action: Endpoint with Layer-3 cross-connect. * Type of *param*: **struct in6_addr**. * **SEG6_LOCAL_ACTION_END_T** * End.T action: Endpoint with specific IPv6 table lookup. * Type of *param*: **int**. * **SEG6_LOCAL_ACTION_END_B6** * End.B6 action: Endpoint bound to an SRv6 policy. * Type of param: **struct ipv6_sr_hdr**. * **SEG6_LOCAL_ACTION_END_B6_ENCAP** * End.B6.Encap action: Endpoint bound to an SRv6 * encapsulation policy. * Type of param: **struct ipv6_sr_hdr**. * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle) * Description * This helper is used in programs implementing IR decoding, to * report a successfully decoded key press with *scancode*, * *toggle* value in the given *protocol*. The scancode will be * translated to a keycode using the rc keymap, and reported as * an input key down event. After a period a key up event is * generated. This period can be extended by calling either * **bpf_rc_keydown**\ () again with the same values, or calling * **bpf_rc_repeat**\ (). * * Some protocols include a toggle bit, in case the button was * released and pressed again between consecutive scancodes. * * The *ctx* should point to the lirc sample as passed into * the program. * * The *protocol* is the decoded protocol number (see * **enum rc_proto** for some predefined values). * * This helper is only available is the kernel was compiled with * the **CONFIG_BPF_LIRC_MODE2** configuration option set to * "**y**". * Return * 0 * * int bpf_rc_repeat(void *ctx) * Description * This helper is used in programs implementing IR decoding, to * report a successfully decoded repeat key message. This delays * the generation of a key up event for previously generated * key down event. * * Some IR protocols like NEC have a special IR message for * repeating last button, for when a button is held down. * * The *ctx* should point to the lirc sample as passed into * the program. * * This helper is only available is the kernel was compiled with * the **CONFIG_BPF_LIRC_MODE2** configuration option set to * "**y**". * Return * 0 * * uint64_t bpf_skb_cgroup_id(struct sk_buff *skb) * Description * Return the cgroup v2 id of the socket associated with the *skb*. * This is roughly similar to the **bpf_get_cgroup_classid**\ () * helper for cgroup v1 by providing a tag resp. identifier that * can be matched on or used for map lookups e.g. to implement * policy. The cgroup v2 id of a given path in the hierarchy is * exposed in user space through the f_handle API in order to get * to the same 64-bit id. * * This helper can be used on TC egress path, but not on ingress, * and is available only if the kernel was compiled with the * **CONFIG_SOCK_CGROUP_DATA** configuration option. * Return * The id is returned or 0 in case the id could not be retrieved. * * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level) * Description * Return id of cgroup v2 that is ancestor of cgroup associated * with the *skb* at the *ancestor_level*. The root cgroup is at * *ancestor_level* zero and each step down the hierarchy * increments the level. If *ancestor_level* == level of cgroup * associated with *skb*, then return value will be same as that * of **bpf_skb_cgroup_id**\ (). * * The helper is useful to implement policies based on cgroups * that are upper in hierarchy than immediate cgroup associated * with *skb*. * * The format of returned id and helper limitations are same as in * **bpf_skb_cgroup_id**\ (). * Return * The id is returned or 0 in case the id could not be retrieved. * * u64 bpf_get_current_cgroup_id(void) * Return * A 64-bit integer containing the current cgroup id based * on the cgroup within which the current task is running. * * void* get_local_storage(void *map, u64 flags) * Description * Get the pointer to the local storage area. * The type and the size of the local storage is defined * by the *map* argument. * The *flags* meaning is specific for each map type, * and has to be 0 for cgroup local storage. * * Depending on the BPF program type, a local storage area * can be shared between multiple instances of the BPF program, * running simultaneously. * * A user should care about the synchronization by himself. * For example, by using the **BPF_STX_XADD** instruction to alter * the shared data. * Return * A pointer to the local storage area. * * int bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags) * Description * Select a **SO_REUSEPORT** socket from a * **BPF_MAP_TYPE_REUSEPORT_ARRAY** *map*. * It checks the selected socket is matching the incoming * request in the socket buffer. * Return * 0 on success, or a negative error in case of failure. * * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) * Description * Look for TCP socket matching *tuple*, optionally in a child * network namespace *netns*. The return value must be checked, * and if non-**NULL**, released via **bpf_sk_release**\ (). * * The *ctx* should point to the context of the program, such as * the skb or socket (depending on the hook in use). This is used * to determine the base network namespace for the lookup. * * *tuple_size* must be one of: * * **sizeof**\ (*tuple*\ **->ipv4**) * Look for an IPv4 socket. * **sizeof**\ (*tuple*\ **->ipv6**) * Look for an IPv6 socket. * * If the *netns* is a negative signed 32-bit integer, then the * socket lookup table in the netns associated with the *ctx* will * will be used. For the TC hooks, this is the netns of the device * in the skb. For socket hooks, this is the netns of the socket. * If *netns* is any other signed 32-bit value greater than or * equal to zero then it specifies the ID of the netns relative to * the netns associated with the *ctx*. *netns* values beyond the * range of 32-bit integers are reserved for future use. * * All values for *flags* are reserved for future usage, and must * be left at zero. * * This helper is available only if the kernel was compiled with * **CONFIG_NET** configuration option. * Return * Pointer to **struct bpf_sock**, or **NULL** in case of failure. * For sockets with reuseport option, the **struct bpf_sock** * result is from **reuse->socks**\ [] using the hash of the tuple. * * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) * Description * Look for UDP socket matching *tuple*, optionally in a child * network namespace *netns*. The return value must be checked, * and if non-**NULL**, released via **bpf_sk_release**\ (). * * The *ctx* should point to the context of the program, such as * the skb or socket (depending on the hook in use). This is used * to determine the base network namespace for the lookup. * * *tuple_size* must be one of: * * **sizeof**\ (*tuple*\ **->ipv4**) * Look for an IPv4 socket. * **sizeof**\ (*tuple*\ **->ipv6**) * Look for an IPv6 socket. * * If the *netns* is a negative signed 32-bit integer, then the * socket lookup table in the netns associated with the *ctx* will * will be used. For the TC hooks, this is the netns of the device * in the skb. For socket hooks, this is the netns of the socket. * If *netns* is any other signed 32-bit value greater than or * equal to zero then it specifies the ID of the netns relative to * the netns associated with the *ctx*. *netns* values beyond the * range of 32-bit integers are reserved for future use. * * All values for *flags* are reserved for future usage, and must * be left at zero. * * This helper is available only if the kernel was compiled with * **CONFIG_NET** configuration option. * Return * Pointer to **struct bpf_sock**, or **NULL** in case of failure. * For sockets with reuseport option, the **struct bpf_sock** * result is from **reuse->socks**\ [] using the hash of the tuple. * * int bpf_sk_release(struct bpf_sock *sock) * Description * Release the reference held by *sock*. *sock* must be a * non-**NULL** pointer that was returned from * **bpf_sk_lookup_xxx**\ (). * Return * 0 on success, or a negative error in case of failure. * * int bpf_map_pop_elem(struct bpf_map *map, void *value) * Description * Pop an element from *map*. * Return * 0 on success, or a negative error in case of failure. * * int bpf_map_peek_elem(struct bpf_map *map, void *value) * Description * Get an element from *map* without removing it. * Return * 0 on success, or a negative error in case of failure. * * int bpf_msg_push_data(struct sk_buff *skb, u32 start, u32 len, u64 flags) * Description * For socket policies, insert *len* bytes into *msg* at offset * *start*. * * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a * *msg* it may want to insert metadata or options into the *msg*. * This can later be read and used by any of the lower layer BPF * hooks. * * This helper may fail if under memory pressure (a malloc * fails) in these cases BPF programs will get an appropriate * error and BPF programs will need to handle them. * Return * 0 on success, or a negative error in case of failure. * * int bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 pop, u64 flags) * Description * Will remove *pop* bytes from a *msg* starting at byte *start*. * This may result in **ENOMEM** errors under certain situations if * an allocation and copy are required due to a full ring buffer. * However, the helper will try to avoid doing the allocation * if possible. Other errors can occur if input parameters are * invalid either due to *start* byte not being valid part of *msg* * payload and/or *pop* value being to large. * Return * 0 on success, or a negative error in case of failure. * * int bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y) * Description * This helper is used in programs implementing IR decoding, to * report a successfully decoded pointer movement. * * The *ctx* should point to the lirc sample as passed into * the program. * * This helper is only available is the kernel was compiled with * the **CONFIG_BPF_LIRC_MODE2** configuration option set to * "**y**". * Return * 0 */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ FN(map_lookup_elem), \ FN(map_update_elem), \ FN(map_delete_elem), \ FN(probe_read), \ FN(ktime_get_ns), \ FN(trace_printk), \ FN(get_prandom_u32), \ FN(get_smp_processor_id), \ FN(skb_store_bytes), \ FN(l3_csum_replace), \ FN(l4_csum_replace), \ FN(tail_call), \ FN(clone_redirect), \ FN(get_current_pid_tgid), \ FN(get_current_uid_gid), \ FN(get_current_comm), \ FN(get_cgroup_classid), \ FN(skb_vlan_push), \ FN(skb_vlan_pop), \ FN(skb_get_tunnel_key), \ FN(skb_set_tunnel_key), \ FN(perf_event_read), \ FN(redirect), \ FN(get_route_realm), \ FN(perf_event_output), \ FN(skb_load_bytes), \ FN(get_stackid), \ FN(csum_diff), \ FN(skb_get_tunnel_opt), \ FN(skb_set_tunnel_opt), \ FN(skb_change_proto), \ FN(skb_change_type), \ FN(skb_under_cgroup), \ FN(get_hash_recalc), \ FN(get_current_task), \ FN(probe_write_user), \ FN(current_task_under_cgroup), \ FN(skb_change_tail), \ FN(skb_pull_data), \ FN(csum_update), \ FN(set_hash_invalid), \ FN(get_numa_node_id), \ FN(skb_change_head), \ FN(xdp_adjust_head), \ FN(probe_read_str), \ FN(get_socket_cookie), \ FN(get_socket_uid), \ FN(set_hash), \ FN(setsockopt), \ FN(skb_adjust_room), \ FN(redirect_map), \ FN(sk_redirect_map), \ FN(sock_map_update), \ FN(xdp_adjust_meta), \ FN(perf_event_read_value), \ FN(perf_prog_read_value), \ FN(getsockopt), \ FN(override_return), \ FN(sock_ops_cb_flags_set), \ FN(msg_redirect_map), \ FN(msg_apply_bytes), \ FN(msg_cork_bytes), \ FN(msg_pull_data), \ FN(bind), \ FN(xdp_adjust_tail), \ FN(skb_get_xfrm_state), \ FN(get_stack), \ FN(skb_load_bytes_relative), \ FN(fib_lookup), \ FN(sock_hash_update), \ FN(msg_redirect_hash), \ FN(sk_redirect_hash), \ FN(lwt_push_encap), \ FN(lwt_seg6_store_bytes), \ FN(lwt_seg6_adjust_srh), \ FN(lwt_seg6_action), \ FN(rc_repeat), \ FN(rc_keydown), \ FN(skb_cgroup_id), \ FN(get_current_cgroup_id), \ FN(get_local_storage), \ FN(sk_select_reuseport), \ FN(skb_ancestor_cgroup_id), \ FN(sk_lookup_tcp), \ FN(sk_lookup_udp), \ FN(sk_release), \ FN(map_push_elem), \ FN(map_pop_elem), \ FN(map_peek_elem), \ FN(msg_push_data), \ FN(msg_pop_data), \ FN(rc_pointer_rel), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call */ #define __BPF_ENUM_FN(x) BPF_FUNC_ ## x enum bpf_func_id { __BPF_FUNC_MAPPER(__BPF_ENUM_FN) __BPF_FUNC_MAX_ID, }; #undef __BPF_ENUM_FN /* All flags used by eBPF helper functions, placed here. */ /* BPF_FUNC_skb_store_bytes flags. */ #define BPF_F_RECOMPUTE_CSUM (1ULL << 0) #define BPF_F_INVALIDATE_HASH (1ULL << 1) /* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags. * First 4 bits are for passing the header field size. */ #define BPF_F_HDR_FIELD_MASK 0xfULL /* BPF_FUNC_l4_csum_replace flags. */ #define BPF_F_PSEUDO_HDR (1ULL << 4) #define BPF_F_MARK_MANGLED_0 (1ULL << 5) #define BPF_F_MARK_ENFORCE (1ULL << 6) /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */ #define BPF_F_INGRESS (1ULL << 0) /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ #define BPF_F_TUNINFO_IPV6 (1ULL << 0) /* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */ #define BPF_F_SKIP_FIELD_MASK 0xffULL #define BPF_F_USER_STACK (1ULL << 8) /* flags used by BPF_FUNC_get_stackid only. */ #define BPF_F_FAST_STACK_CMP (1ULL << 9) #define BPF_F_REUSE_STACKID (1ULL << 10) /* flags used by BPF_FUNC_get_stack only. */ #define BPF_F_USER_BUILD_ID (1ULL << 11) /* BPF_FUNC_skb_set_tunnel_key flags. */ #define BPF_F_ZERO_CSUM_TX (1ULL << 1) #define BPF_F_DONT_FRAGMENT (1ULL << 2) #define BPF_F_SEQ_NUMBER (1ULL << 3) /* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and * BPF_FUNC_perf_event_read_value flags. */ #define BPF_F_INDEX_MASK 0xffffffffULL #define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK /* BPF_FUNC_perf_event_output for sk_buff input context. */ #define BPF_F_CTXLEN_MASK (0xfffffULL << 32) /* Current network namespace */ #define BPF_F_CURRENT_NETNS (-1L) /* Mode for BPF_FUNC_skb_adjust_room helper. */ enum bpf_adj_room_mode { BPF_ADJ_ROOM_NET, }; /* Mode for BPF_FUNC_skb_load_bytes_relative helper. */ enum bpf_hdr_start_off { BPF_HDR_START_MAC, BPF_HDR_START_NET, }; /* Encapsulation type for BPF_FUNC_lwt_push_encap helper. */ enum bpf_lwt_encap_mode { BPF_LWT_ENCAP_SEG6, BPF_LWT_ENCAP_SEG6_INLINE }; #define __bpf_md_ptr(type, name) \ union { \ type name; \ __u64 :64; \ } __attribute__((aligned(8))) /* user accessible mirror of in-kernel sk_buff. * new fields can only be added to the end of this structure */ struct __sk_buff { __u32 len; __u32 pkt_type; __u32 mark; __u32 queue_mapping; __u32 protocol; __u32 vlan_present; __u32 vlan_tci; __u32 vlan_proto; __u32 priority; __u32 ingress_ifindex; __u32 ifindex; __u32 tc_index; __u32 cb[5]; __u32 hash; __u32 tc_classid; __u32 data; __u32 data_end; __u32 napi_id; /* Accessed by BPF_PROG_TYPE_sk_skb types from here to ... */ __u32 family; __u32 remote_ip4; /* Stored in network byte order */ __u32 local_ip4; /* Stored in network byte order */ __u32 remote_ip6[4]; /* Stored in network byte order */ __u32 local_ip6[4]; /* Stored in network byte order */ __u32 remote_port; /* Stored in network byte order */ __u32 local_port; /* stored in host byte order */ /* ... here. */ __u32 data_meta; __bpf_md_ptr(struct bpf_flow_keys *, flow_keys); __u64 tstamp; __u32 wire_len; }; struct bpf_tunnel_key { __u32 tunnel_id; union { __u32 remote_ipv4; __u32 remote_ipv6[4]; }; __u8 tunnel_tos; __u8 tunnel_ttl; __u16 tunnel_ext; /* Padding, future use. */ __u32 tunnel_label; }; /* user accessible mirror of in-kernel xfrm_state. * new fields can only be added to the end of this structure */ struct bpf_xfrm_state { __u32 reqid; __u32 spi; /* Stored in network byte order */ __u16 family; __u16 ext; /* Padding, future use. */ union { __u32 remote_ipv4; /* Stored in network byte order */ __u32 remote_ipv6[4]; /* Stored in network byte order */ }; }; /* Generic BPF return codes which all BPF program types may support. * The values are binary compatible with their TC_ACT_* counter-part to * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT * programs. * * XDP is handled seprately, see XDP_*. */ enum bpf_ret_code { BPF_OK = 0, /* 1 reserved */ BPF_DROP = 2, /* 3-6 reserved */ BPF_REDIRECT = 7, /* >127 are reserved for prog type specific return codes */ }; struct bpf_sock { __u32 bound_dev_if; __u32 family; __u32 type; __u32 protocol; __u32 mark; __u32 priority; __u32 src_ip4; /* Allows 1,2,4-byte read. * Stored in network byte order. */ __u32 src_ip6[4]; /* Allows 1,2,4-byte read. * Stored in network byte order. */ __u32 src_port; /* Allows 4-byte read. * Stored in host byte order */ }; struct bpf_sock_tuple { union { struct { __be32 saddr; __be32 daddr; __be16 sport; __be16 dport; } ipv4; struct { __be32 saddr[4]; __be32 daddr[4]; __be16 sport; __be16 dport; } ipv6; }; }; #define XDP_PACKET_HEADROOM 256 /* User return codes for XDP prog type. * A valid XDP program must return one of these defined values. All other * return codes are reserved for future use. Unknown return codes will * result in packet drops and a warning via bpf_warn_invalid_xdp_action(). */ enum xdp_action { XDP_ABORTED = 0, XDP_DROP, XDP_PASS, XDP_TX, XDP_REDIRECT, }; /* user accessible metadata for XDP packet hook * new fields must be added to the end of this structure */ struct xdp_md { __u32 data; __u32 data_end; __u32 data_meta; /* Below access go through struct xdp_rxq_info */ __u32 ingress_ifindex; /* rxq->dev->ifindex */ __u32 rx_queue_index; /* rxq->queue_index */ }; enum sk_action { SK_DROP = 0, SK_PASS, }; /* user accessible metadata for SK_MSG packet hook, new fields must * be added to the end of this structure */ struct sk_msg_md { __bpf_md_ptr(void *, data); __bpf_md_ptr(void *, data_end); __u32 family; __u32 remote_ip4; /* Stored in network byte order */ __u32 local_ip4; /* Stored in network byte order */ __u32 remote_ip6[4]; /* Stored in network byte order */ __u32 local_ip6[4]; /* Stored in network byte order */ __u32 remote_port; /* Stored in network byte order */ __u32 local_port; /* stored in host byte order */ __u32 size; /* Total size of sk_msg */ }; struct sk_reuseport_md { /* * Start of directly accessible data. It begins from * the tcp/udp header. */ __bpf_md_ptr(void *, data); /* End of directly accessible data */ __bpf_md_ptr(void *, data_end); /* * Total length of packet (starting from the tcp/udp header). * Note that the directly accessible bytes (data_end - data) * could be less than this "len". Those bytes could be * indirectly read by a helper "bpf_skb_load_bytes()". */ __u32 len; /* * Eth protocol in the mac header (network byte order). e.g. * ETH_P_IP(0x0800) and ETH_P_IPV6(0x86DD) */ __u32 eth_protocol; __u32 ip_protocol; /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */ __u32 bind_inany; /* Is sock bound to an INANY address? */ __u32 hash; /* A hash of the packet 4 tuples */ }; #define BPF_TAG_SIZE 8 struct bpf_prog_info { __u32 type; __u32 id; __u8 tag[BPF_TAG_SIZE]; __u32 jited_prog_len; __u32 xlated_prog_len; __aligned_u64 jited_prog_insns; __aligned_u64 xlated_prog_insns; __u64 load_time; /* ns since boottime */ __u32 created_by_uid; __u32 nr_map_ids; __aligned_u64 map_ids; char name[BPF_OBJ_NAME_LEN]; __u32 ifindex; __u32 gpl_compatible:1; __u64 netns_dev; __u64 netns_ino; __u32 nr_jited_ksyms; __u32 nr_jited_func_lens; __aligned_u64 jited_ksyms; __aligned_u64 jited_func_lens; __u32 btf_id; __u32 func_info_rec_size; __aligned_u64 func_info; __u32 nr_func_info; __u32 nr_line_info; __aligned_u64 line_info; __aligned_u64 jited_line_info; __u32 nr_jited_line_info; __u32 line_info_rec_size; __u32 jited_line_info_rec_size; __u32 nr_prog_tags; __aligned_u64 prog_tags; } __attribute__((aligned(8))); struct bpf_map_info { __u32 type; __u32 id; __u32 key_size; __u32 value_size; __u32 max_entries; __u32 map_flags; char name[BPF_OBJ_NAME_LEN]; __u32 ifindex; __u32 :32; __u64 netns_dev; __u64 netns_ino; __u32 btf_id; __u32 btf_key_type_id; __u32 btf_value_type_id; } __attribute__((aligned(8))); struct bpf_btf_info { __aligned_u64 btf; __u32 btf_size; __u32 id; } __attribute__((aligned(8))); /* User bpf_sock_addr struct to access socket fields and sockaddr struct passed * by user and intended to be used by socket (e.g. to bind to, depends on * attach attach type). */ struct bpf_sock_addr { __u32 user_family; /* Allows 4-byte read, but no write. */ __u32 user_ip4; /* Allows 1,2,4-byte read and 4-byte write. * Stored in network byte order. */ __u32 user_ip6[4]; /* Allows 1,2,4-byte read an 4-byte write. * Stored in network byte order. */ __u32 user_port; /* Allows 4-byte read and write. * Stored in network byte order */ __u32 family; /* Allows 4-byte read, but no write */ __u32 type; /* Allows 4-byte read, but no write */ __u32 protocol; /* Allows 4-byte read, but no write */ __u32 msg_src_ip4; /* Allows 1,2,4-byte read an 4-byte write. * Stored in network byte order. */ __u32 msg_src_ip6[4]; /* Allows 1,2,4-byte read an 4-byte write. * Stored in network byte order. */ }; /* User bpf_sock_ops struct to access socket values and specify request ops * and their replies. * Some of this fields are in network (bigendian) byte order and may need * to be converted before use (bpf_ntohl() defined in samples/bpf/bpf_endian.h). * New fields can only be added at the end of this structure */ struct bpf_sock_ops { __u32 op; union { __u32 args[4]; /* Optionally passed to bpf program */ __u32 reply; /* Returned by bpf program */ __u32 replylong[4]; /* Optionally returned by bpf prog */ }; __u32 family; __u32 remote_ip4; /* Stored in network byte order */ __u32 local_ip4; /* Stored in network byte order */ __u32 remote_ip6[4]; /* Stored in network byte order */ __u32 local_ip6[4]; /* Stored in network byte order */ __u32 remote_port; /* Stored in network byte order */ __u32 local_port; /* stored in host byte order */ __u32 is_fullsock; /* Some TCP fields are only valid if * there is a full socket. If not, the * fields read as zero. */ __u32 snd_cwnd; __u32 srtt_us; /* Averaged RTT << 3 in usecs */ __u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */ __u32 state; __u32 rtt_min; __u32 snd_ssthresh; __u32 rcv_nxt; __u32 snd_nxt; __u32 snd_una; __u32 mss_cache; __u32 ecn_flags; __u32 rate_delivered; __u32 rate_interval_us; __u32 packets_out; __u32 retrans_out; __u32 total_retrans; __u32 segs_in; __u32 data_segs_in; __u32 segs_out; __u32 data_segs_out; __u32 lost_out; __u32 sacked_out; __u32 sk_txhash; __u64 bytes_received; __u64 bytes_acked; }; /* Definitions for bpf_sock_ops_cb_flags */ #define BPF_SOCK_OPS_RTO_CB_FLAG (1<<0) #define BPF_SOCK_OPS_RETRANS_CB_FLAG (1<<1) #define BPF_SOCK_OPS_STATE_CB_FLAG (1<<2) #define BPF_SOCK_OPS_ALL_CB_FLAGS 0x7 /* Mask of all currently * supported cb flags */ /* List of known BPF sock_ops operators. * New entries can only be added at the end */ enum { BPF_SOCK_OPS_VOID, BPF_SOCK_OPS_TIMEOUT_INIT, /* Should return SYN-RTO value to use or * -1 if default value should be used */ BPF_SOCK_OPS_RWND_INIT, /* Should return initial advertized * window (in packets) or -1 if default * value should be used */ BPF_SOCK_OPS_TCP_CONNECT_CB, /* Calls BPF program right before an * active connection is initialized */ BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB, /* Calls BPF program when an * active connection is * established */ BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, /* Calls BPF program when a * passive connection is * established */ BPF_SOCK_OPS_NEEDS_ECN, /* If connection's congestion control * needs ECN */ BPF_SOCK_OPS_BASE_RTT, /* Get base RTT. The correct value is * based on the path and may be * dependent on the congestion control * algorithm. In general it indicates * a congestion threshold. RTTs above * this indicate congestion */ BPF_SOCK_OPS_RTO_CB, /* Called when an RTO has triggered. * Arg1: value of icsk_retransmits * Arg2: value of icsk_rto * Arg3: whether RTO has expired */ BPF_SOCK_OPS_RETRANS_CB, /* Called when skb is retransmitted. * Arg1: sequence number of 1st byte * Arg2: # segments * Arg3: return value of * tcp_transmit_skb (0 => success) */ BPF_SOCK_OPS_STATE_CB, /* Called when TCP changes state. * Arg1: old_state * Arg2: new_state */ BPF_SOCK_OPS_TCP_LISTEN_CB, /* Called on listen(2), right after * socket transition to LISTEN state. */ }; /* List of TCP states. There is a build check in net/ipv4/tcp.c to detect * changes between the TCP and BPF versions. Ideally this should never happen. * If it does, we need to add code to convert them before calling * the BPF sock_ops function. */ enum { BPF_TCP_ESTABLISHED = 1, BPF_TCP_SYN_SENT, BPF_TCP_SYN_RECV, BPF_TCP_FIN_WAIT1, BPF_TCP_FIN_WAIT2, BPF_TCP_TIME_WAIT, BPF_TCP_CLOSE, BPF_TCP_CLOSE_WAIT, BPF_TCP_LAST_ACK, BPF_TCP_LISTEN, BPF_TCP_CLOSING, /* Now a valid state */ BPF_TCP_NEW_SYN_RECV, BPF_TCP_MAX_STATES /* Leave at the end! */ }; #define TCP_BPF_IW 1001 /* Set TCP initial congestion window */ #define TCP_BPF_SNDCWND_CLAMP 1002 /* Set sndcwnd_clamp */ struct bpf_perf_event_value { __u64 counter; __u64 enabled; __u64 running; }; #define BPF_DEVCG_ACC_MKNOD (1ULL << 0) #define BPF_DEVCG_ACC_READ (1ULL << 1) #define BPF_DEVCG_ACC_WRITE (1ULL << 2) #define BPF_DEVCG_DEV_BLOCK (1ULL << 0) #define BPF_DEVCG_DEV_CHAR (1ULL << 1) struct bpf_cgroup_dev_ctx { /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */ __u32 access_type; __u32 major; __u32 minor; }; struct bpf_raw_tracepoint_args { __u64 args[0]; }; /* DIRECT: Skip the FIB rules and go to FIB table associated with device * OUTPUT: Do lookup from egress perspective; default is ingress */ #define BPF_FIB_LOOKUP_DIRECT BIT(0) #define BPF_FIB_LOOKUP_OUTPUT BIT(1) enum { BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */ BPF_FIB_LKUP_RET_BLACKHOLE, /* dest is blackholed; can be dropped */ BPF_FIB_LKUP_RET_UNREACHABLE, /* dest is unreachable; can be dropped */ BPF_FIB_LKUP_RET_PROHIBIT, /* dest not allowed; can be dropped */ BPF_FIB_LKUP_RET_NOT_FWDED, /* packet is not forwarded */ BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */ BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */ BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */ BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */ }; struct bpf_fib_lookup { /* input: network family for lookup (AF_INET, AF_INET6) * output: network family of egress nexthop */ __u8 family; /* set if lookup is to consider L4 data - e.g., FIB rules */ __u8 l4_protocol; __be16 sport; __be16 dport; /* total length of packet from network header - used for MTU check */ __u16 tot_len; /* input: L3 device index for lookup * output: device index from FIB lookup */ __u32 ifindex; union { /* inputs to lookup */ __u8 tos; /* AF_INET */ __be32 flowinfo; /* AF_INET6, flow_label + priority */ /* output: metric of fib result (IPv4/IPv6 only) */ __u32 rt_metric; }; union { __be32 ipv4_src; __u32 ipv6_src[4]; /* in6_addr; network order */ }; /* input to bpf_fib_lookup, ipv{4,6}_dst is destination address in * network header. output: bpf_fib_lookup sets to gateway address * if FIB lookup returns gateway route */ union { __be32 ipv4_dst; __u32 ipv6_dst[4]; /* in6_addr; network order */ }; /* output */ __be16 h_vlan_proto; __be16 h_vlan_TCI; __u8 smac[6]; /* ETH_ALEN */ __u8 dmac[6]; /* ETH_ALEN */ }; enum bpf_task_fd_type { BPF_FD_TYPE_RAW_TRACEPOINT, /* tp name */ BPF_FD_TYPE_TRACEPOINT, /* tp name */ BPF_FD_TYPE_KPROBE, /* (symbol + offset) or addr */ BPF_FD_TYPE_KRETPROBE, /* (symbol + offset) or addr */ BPF_FD_TYPE_UPROBE, /* filename + offset */ BPF_FD_TYPE_URETPROBE, /* filename + offset */ }; struct bpf_flow_keys { __u16 nhoff; __u16 thoff; __u16 addr_proto; /* ETH_P_* of valid addrs */ __u8 is_frag; __u8 is_first_frag; __u8 is_encap; __u8 ip_proto; __be16 n_proto; __be16 sport; __be16 dport; union { struct { __be32 ipv4_src; __be32 ipv4_dst; }; struct { __u32 ipv6_src[4]; /* in6_addr; network order */ __u32 ipv6_dst[4]; /* in6_addr; network order */ }; }; }; struct bpf_func_info { __u32 insn_off; __u32 type_id; }; #define BPF_LINE_INFO_LINE_NUM(line_col) ((line_col) >> 10) #define BPF_LINE_INFO_LINE_COL(line_col) ((line_col) & 0x3ff) struct bpf_line_info { __u32 insn_off; __u32 file_name_off; __u32 line_off; __u32 line_col; }; #endif /* _UAPI__LINUX_BPF_H__ */ gobpf-0.2.0/elf/include/uapi/linux/doc.go000066400000000000000000000000161404447410300201660ustar00rootroot00000000000000package linux gobpf-0.2.0/elf/include/uapi/linux/if_link.h000066400000000000000000000557721404447410300207010ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_LINUX_IF_LINK_H #define _UAPI_LINUX_IF_LINK_H #include #include /* This struct should be in sync with struct rtnl_link_stats64 */ struct rtnl_link_stats { __u32 rx_packets; /* total packets received */ __u32 tx_packets; /* total packets transmitted */ __u32 rx_bytes; /* total bytes received */ __u32 tx_bytes; /* total bytes transmitted */ __u32 rx_errors; /* bad packets received */ __u32 tx_errors; /* packet transmit problems */ __u32 rx_dropped; /* no space in linux buffers */ __u32 tx_dropped; /* no space available in linux */ __u32 multicast; /* multicast packets received */ __u32 collisions; /* detailed rx_errors: */ __u32 rx_length_errors; __u32 rx_over_errors; /* receiver ring buff overflow */ __u32 rx_crc_errors; /* recved pkt with crc error */ __u32 rx_frame_errors; /* recv'd frame alignment error */ __u32 rx_fifo_errors; /* recv'r fifo overrun */ __u32 rx_missed_errors; /* receiver missed packet */ /* detailed tx_errors */ __u32 tx_aborted_errors; __u32 tx_carrier_errors; __u32 tx_fifo_errors; __u32 tx_heartbeat_errors; __u32 tx_window_errors; /* for cslip etc */ __u32 rx_compressed; __u32 tx_compressed; __u32 rx_nohandler; /* dropped, no handler found */ }; /* The main device statistics structure */ struct rtnl_link_stats64 { __u64 rx_packets; /* total packets received */ __u64 tx_packets; /* total packets transmitted */ __u64 rx_bytes; /* total bytes received */ __u64 tx_bytes; /* total bytes transmitted */ __u64 rx_errors; /* bad packets received */ __u64 tx_errors; /* packet transmit problems */ __u64 rx_dropped; /* no space in linux buffers */ __u64 tx_dropped; /* no space available in linux */ __u64 multicast; /* multicast packets received */ __u64 collisions; /* detailed rx_errors: */ __u64 rx_length_errors; __u64 rx_over_errors; /* receiver ring buff overflow */ __u64 rx_crc_errors; /* recved pkt with crc error */ __u64 rx_frame_errors; /* recv'd frame alignment error */ __u64 rx_fifo_errors; /* recv'r fifo overrun */ __u64 rx_missed_errors; /* receiver missed packet */ /* detailed tx_errors */ __u64 tx_aborted_errors; __u64 tx_carrier_errors; __u64 tx_fifo_errors; __u64 tx_heartbeat_errors; __u64 tx_window_errors; /* for cslip etc */ __u64 rx_compressed; __u64 tx_compressed; __u64 rx_nohandler; /* dropped, no handler found */ }; /* The struct should be in sync with struct ifmap */ struct rtnl_link_ifmap { __u64 mem_start; __u64 mem_end; __u64 base_addr; __u16 irq; __u8 dma; __u8 port; }; /* * IFLA_AF_SPEC * Contains nested attributes for address family specific attributes. * Each address family may create a attribute with the address family * number as type and create its own attribute structure in it. * * Example: * [IFLA_AF_SPEC] = { * [AF_INET] = { * [IFLA_INET_CONF] = ..., * }, * [AF_INET6] = { * [IFLA_INET6_FLAGS] = ..., * [IFLA_INET6_CONF] = ..., * } * } */ enum { IFLA_UNSPEC, IFLA_ADDRESS, IFLA_BROADCAST, IFLA_IFNAME, IFLA_MTU, IFLA_LINK, IFLA_QDISC, IFLA_STATS, IFLA_COST, #define IFLA_COST IFLA_COST IFLA_PRIORITY, #define IFLA_PRIORITY IFLA_PRIORITY IFLA_MASTER, #define IFLA_MASTER IFLA_MASTER IFLA_WIRELESS, /* Wireless Extension event - see wireless.h */ #define IFLA_WIRELESS IFLA_WIRELESS IFLA_PROTINFO, /* Protocol specific information for a link */ #define IFLA_PROTINFO IFLA_PROTINFO IFLA_TXQLEN, #define IFLA_TXQLEN IFLA_TXQLEN IFLA_MAP, #define IFLA_MAP IFLA_MAP IFLA_WEIGHT, #define IFLA_WEIGHT IFLA_WEIGHT IFLA_OPERSTATE, IFLA_LINKMODE, IFLA_LINKINFO, #define IFLA_LINKINFO IFLA_LINKINFO IFLA_NET_NS_PID, IFLA_IFALIAS, IFLA_NUM_VF, /* Number of VFs if device is SR-IOV PF */ IFLA_VFINFO_LIST, IFLA_STATS64, IFLA_VF_PORTS, IFLA_PORT_SELF, IFLA_AF_SPEC, IFLA_GROUP, /* Group the device belongs to */ IFLA_NET_NS_FD, IFLA_EXT_MASK, /* Extended info mask, VFs, etc */ IFLA_PROMISCUITY, /* Promiscuity count: > 0 means acts PROMISC */ #define IFLA_PROMISCUITY IFLA_PROMISCUITY IFLA_NUM_TX_QUEUES, IFLA_NUM_RX_QUEUES, IFLA_CARRIER, IFLA_PHYS_PORT_ID, IFLA_CARRIER_CHANGES, IFLA_PHYS_SWITCH_ID, IFLA_LINK_NETNSID, IFLA_PHYS_PORT_NAME, IFLA_PROTO_DOWN, IFLA_GSO_MAX_SEGS, IFLA_GSO_MAX_SIZE, IFLA_PAD, IFLA_XDP, IFLA_EVENT, IFLA_NEW_NETNSID, IFLA_IF_NETNSID, IFLA_TARGET_NETNSID = IFLA_IF_NETNSID, /* new alias */ IFLA_CARRIER_UP_COUNT, IFLA_CARRIER_DOWN_COUNT, IFLA_NEW_IFINDEX, IFLA_MIN_MTU, IFLA_MAX_MTU, __IFLA_MAX }; #define IFLA_MAX (__IFLA_MAX - 1) /* backwards compatibility for userspace */ #ifndef __KERNEL__ #define IFLA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct ifinfomsg)))) #define IFLA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ifinfomsg)) #endif enum { IFLA_INET_UNSPEC, IFLA_INET_CONF, __IFLA_INET_MAX, }; #define IFLA_INET_MAX (__IFLA_INET_MAX - 1) /* ifi_flags. IFF_* flags. The only change is: IFF_LOOPBACK, IFF_BROADCAST and IFF_POINTOPOINT are more not changeable by user. They describe link media characteristics and set by device driver. Comments: - Combination IFF_BROADCAST|IFF_POINTOPOINT is invalid - If neither of these three flags are set; the interface is NBMA. - IFF_MULTICAST does not mean anything special: multicasts can be used on all not-NBMA links. IFF_MULTICAST means that this media uses special encapsulation for multicast frames. Apparently, all IFF_POINTOPOINT and IFF_BROADCAST devices are able to use multicasts too. */ /* IFLA_LINK. For usual devices it is equal ifi_index. If it is a "virtual interface" (f.e. tunnel), ifi_link can point to real physical interface (f.e. for bandwidth calculations), or maybe 0, what means, that real media is unknown (usual for IPIP tunnels, when route to endpoint is allowed to change) */ /* Subtype attributes for IFLA_PROTINFO */ enum { IFLA_INET6_UNSPEC, IFLA_INET6_FLAGS, /* link flags */ IFLA_INET6_CONF, /* sysctl parameters */ IFLA_INET6_STATS, /* statistics */ IFLA_INET6_MCAST, /* MC things. What of them? */ IFLA_INET6_CACHEINFO, /* time values and max reasm size */ IFLA_INET6_ICMP6STATS, /* statistics (icmpv6) */ IFLA_INET6_TOKEN, /* device token */ IFLA_INET6_ADDR_GEN_MODE, /* implicit address generator mode */ __IFLA_INET6_MAX }; #define IFLA_INET6_MAX (__IFLA_INET6_MAX - 1) enum in6_addr_gen_mode { IN6_ADDR_GEN_MODE_EUI64, IN6_ADDR_GEN_MODE_NONE, IN6_ADDR_GEN_MODE_STABLE_PRIVACY, IN6_ADDR_GEN_MODE_RANDOM, }; /* Bridge section */ enum { IFLA_BR_UNSPEC, IFLA_BR_FORWARD_DELAY, IFLA_BR_HELLO_TIME, IFLA_BR_MAX_AGE, IFLA_BR_AGEING_TIME, IFLA_BR_STP_STATE, IFLA_BR_PRIORITY, IFLA_BR_VLAN_FILTERING, IFLA_BR_VLAN_PROTOCOL, IFLA_BR_GROUP_FWD_MASK, IFLA_BR_ROOT_ID, IFLA_BR_BRIDGE_ID, IFLA_BR_ROOT_PORT, IFLA_BR_ROOT_PATH_COST, IFLA_BR_TOPOLOGY_CHANGE, IFLA_BR_TOPOLOGY_CHANGE_DETECTED, IFLA_BR_HELLO_TIMER, IFLA_BR_TCN_TIMER, IFLA_BR_TOPOLOGY_CHANGE_TIMER, IFLA_BR_GC_TIMER, IFLA_BR_GROUP_ADDR, IFLA_BR_FDB_FLUSH, IFLA_BR_MCAST_ROUTER, IFLA_BR_MCAST_SNOOPING, IFLA_BR_MCAST_QUERY_USE_IFADDR, IFLA_BR_MCAST_QUERIER, IFLA_BR_MCAST_HASH_ELASTICITY, IFLA_BR_MCAST_HASH_MAX, IFLA_BR_MCAST_LAST_MEMBER_CNT, IFLA_BR_MCAST_STARTUP_QUERY_CNT, IFLA_BR_MCAST_LAST_MEMBER_INTVL, IFLA_BR_MCAST_MEMBERSHIP_INTVL, IFLA_BR_MCAST_QUERIER_INTVL, IFLA_BR_MCAST_QUERY_INTVL, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, IFLA_BR_NF_CALL_IPTABLES, IFLA_BR_NF_CALL_IP6TABLES, IFLA_BR_NF_CALL_ARPTABLES, IFLA_BR_VLAN_DEFAULT_PVID, IFLA_BR_PAD, IFLA_BR_VLAN_STATS_ENABLED, IFLA_BR_MCAST_STATS_ENABLED, IFLA_BR_MCAST_IGMP_VERSION, IFLA_BR_MCAST_MLD_VERSION, IFLA_BR_VLAN_STATS_PER_PORT, IFLA_BR_MULTI_BOOLOPT, __IFLA_BR_MAX, }; #define IFLA_BR_MAX (__IFLA_BR_MAX - 1) struct ifla_bridge_id { __u8 prio[2]; __u8 addr[6]; /* ETH_ALEN */ }; enum { BRIDGE_MODE_UNSPEC, BRIDGE_MODE_HAIRPIN, }; enum { IFLA_BRPORT_UNSPEC, IFLA_BRPORT_STATE, /* Spanning tree state */ IFLA_BRPORT_PRIORITY, /* " priority */ IFLA_BRPORT_COST, /* " cost */ IFLA_BRPORT_MODE, /* mode (hairpin) */ IFLA_BRPORT_GUARD, /* bpdu guard */ IFLA_BRPORT_PROTECT, /* root port protection */ IFLA_BRPORT_FAST_LEAVE, /* multicast fast leave */ IFLA_BRPORT_LEARNING, /* mac learning */ IFLA_BRPORT_UNICAST_FLOOD, /* flood unicast traffic */ IFLA_BRPORT_PROXYARP, /* proxy ARP */ IFLA_BRPORT_LEARNING_SYNC, /* mac learning sync from device */ IFLA_BRPORT_PROXYARP_WIFI, /* proxy ARP for Wi-Fi */ IFLA_BRPORT_ROOT_ID, /* designated root */ IFLA_BRPORT_BRIDGE_ID, /* designated bridge */ IFLA_BRPORT_DESIGNATED_PORT, IFLA_BRPORT_DESIGNATED_COST, IFLA_BRPORT_ID, IFLA_BRPORT_NO, IFLA_BRPORT_TOPOLOGY_CHANGE_ACK, IFLA_BRPORT_CONFIG_PENDING, IFLA_BRPORT_MESSAGE_AGE_TIMER, IFLA_BRPORT_FORWARD_DELAY_TIMER, IFLA_BRPORT_HOLD_TIMER, IFLA_BRPORT_FLUSH, IFLA_BRPORT_MULTICAST_ROUTER, IFLA_BRPORT_PAD, IFLA_BRPORT_MCAST_FLOOD, IFLA_BRPORT_MCAST_TO_UCAST, IFLA_BRPORT_VLAN_TUNNEL, IFLA_BRPORT_BCAST_FLOOD, IFLA_BRPORT_GROUP_FWD_MASK, IFLA_BRPORT_NEIGH_SUPPRESS, IFLA_BRPORT_ISOLATED, IFLA_BRPORT_BACKUP_PORT, __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) struct ifla_cacheinfo { __u32 max_reasm_len; __u32 tstamp; /* ipv6InterfaceTable updated timestamp */ __u32 reachable_time; __u32 retrans_time; }; enum { IFLA_INFO_UNSPEC, IFLA_INFO_KIND, IFLA_INFO_DATA, IFLA_INFO_XSTATS, IFLA_INFO_SLAVE_KIND, IFLA_INFO_SLAVE_DATA, __IFLA_INFO_MAX, }; #define IFLA_INFO_MAX (__IFLA_INFO_MAX - 1) /* VLAN section */ enum { IFLA_VLAN_UNSPEC, IFLA_VLAN_ID, IFLA_VLAN_FLAGS, IFLA_VLAN_EGRESS_QOS, IFLA_VLAN_INGRESS_QOS, IFLA_VLAN_PROTOCOL, __IFLA_VLAN_MAX, }; #define IFLA_VLAN_MAX (__IFLA_VLAN_MAX - 1) struct ifla_vlan_flags { __u32 flags; __u32 mask; }; enum { IFLA_VLAN_QOS_UNSPEC, IFLA_VLAN_QOS_MAPPING, __IFLA_VLAN_QOS_MAX }; #define IFLA_VLAN_QOS_MAX (__IFLA_VLAN_QOS_MAX - 1) struct ifla_vlan_qos_mapping { __u32 from; __u32 to; }; /* MACVLAN section */ enum { IFLA_MACVLAN_UNSPEC, IFLA_MACVLAN_MODE, IFLA_MACVLAN_FLAGS, IFLA_MACVLAN_MACADDR_MODE, IFLA_MACVLAN_MACADDR, IFLA_MACVLAN_MACADDR_DATA, IFLA_MACVLAN_MACADDR_COUNT, __IFLA_MACVLAN_MAX, }; #define IFLA_MACVLAN_MAX (__IFLA_MACVLAN_MAX - 1) enum macvlan_mode { MACVLAN_MODE_PRIVATE = 1, /* don't talk to other macvlans */ MACVLAN_MODE_VEPA = 2, /* talk to other ports through ext bridge */ MACVLAN_MODE_BRIDGE = 4, /* talk to bridge ports directly */ MACVLAN_MODE_PASSTHRU = 8,/* take over the underlying device */ MACVLAN_MODE_SOURCE = 16,/* use source MAC address list to assign */ }; enum macvlan_macaddr_mode { MACVLAN_MACADDR_ADD, MACVLAN_MACADDR_DEL, MACVLAN_MACADDR_FLUSH, MACVLAN_MACADDR_SET, }; #define MACVLAN_FLAG_NOPROMISC 1 /* VRF section */ enum { IFLA_VRF_UNSPEC, IFLA_VRF_TABLE, __IFLA_VRF_MAX }; #define IFLA_VRF_MAX (__IFLA_VRF_MAX - 1) enum { IFLA_VRF_PORT_UNSPEC, IFLA_VRF_PORT_TABLE, __IFLA_VRF_PORT_MAX }; #define IFLA_VRF_PORT_MAX (__IFLA_VRF_PORT_MAX - 1) /* MACSEC section */ enum { IFLA_MACSEC_UNSPEC, IFLA_MACSEC_SCI, IFLA_MACSEC_PORT, IFLA_MACSEC_ICV_LEN, IFLA_MACSEC_CIPHER_SUITE, IFLA_MACSEC_WINDOW, IFLA_MACSEC_ENCODING_SA, IFLA_MACSEC_ENCRYPT, IFLA_MACSEC_PROTECT, IFLA_MACSEC_INC_SCI, IFLA_MACSEC_ES, IFLA_MACSEC_SCB, IFLA_MACSEC_REPLAY_PROTECT, IFLA_MACSEC_VALIDATION, IFLA_MACSEC_PAD, __IFLA_MACSEC_MAX, }; #define IFLA_MACSEC_MAX (__IFLA_MACSEC_MAX - 1) /* XFRM section */ enum { IFLA_XFRM_UNSPEC, IFLA_XFRM_LINK, IFLA_XFRM_IF_ID, __IFLA_XFRM_MAX }; #define IFLA_XFRM_MAX (__IFLA_XFRM_MAX - 1) enum macsec_validation_type { MACSEC_VALIDATE_DISABLED = 0, MACSEC_VALIDATE_CHECK = 1, MACSEC_VALIDATE_STRICT = 2, __MACSEC_VALIDATE_END, MACSEC_VALIDATE_MAX = __MACSEC_VALIDATE_END - 1, }; /* IPVLAN section */ enum { IFLA_IPVLAN_UNSPEC, IFLA_IPVLAN_MODE, IFLA_IPVLAN_FLAGS, __IFLA_IPVLAN_MAX }; #define IFLA_IPVLAN_MAX (__IFLA_IPVLAN_MAX - 1) enum ipvlan_mode { IPVLAN_MODE_L2 = 0, IPVLAN_MODE_L3, IPVLAN_MODE_L3S, IPVLAN_MODE_MAX }; #define IPVLAN_F_PRIVATE 0x01 #define IPVLAN_F_VEPA 0x02 /* VXLAN section */ enum { IFLA_VXLAN_UNSPEC, IFLA_VXLAN_ID, IFLA_VXLAN_GROUP, /* group or remote address */ IFLA_VXLAN_LINK, IFLA_VXLAN_LOCAL, IFLA_VXLAN_TTL, IFLA_VXLAN_TOS, IFLA_VXLAN_LEARNING, IFLA_VXLAN_AGEING, IFLA_VXLAN_LIMIT, IFLA_VXLAN_PORT_RANGE, /* source port */ IFLA_VXLAN_PROXY, IFLA_VXLAN_RSC, IFLA_VXLAN_L2MISS, IFLA_VXLAN_L3MISS, IFLA_VXLAN_PORT, /* destination port */ IFLA_VXLAN_GROUP6, IFLA_VXLAN_LOCAL6, IFLA_VXLAN_UDP_CSUM, IFLA_VXLAN_UDP_ZERO_CSUM6_TX, IFLA_VXLAN_UDP_ZERO_CSUM6_RX, IFLA_VXLAN_REMCSUM_TX, IFLA_VXLAN_REMCSUM_RX, IFLA_VXLAN_GBP, IFLA_VXLAN_REMCSUM_NOPARTIAL, IFLA_VXLAN_COLLECT_METADATA, IFLA_VXLAN_LABEL, IFLA_VXLAN_GPE, IFLA_VXLAN_TTL_INHERIT, IFLA_VXLAN_DF, __IFLA_VXLAN_MAX }; #define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1) struct ifla_vxlan_port_range { __be16 low; __be16 high; }; enum ifla_vxlan_df { VXLAN_DF_UNSET = 0, VXLAN_DF_SET, VXLAN_DF_INHERIT, __VXLAN_DF_END, VXLAN_DF_MAX = __VXLAN_DF_END - 1, }; /* GENEVE section */ enum { IFLA_GENEVE_UNSPEC, IFLA_GENEVE_ID, IFLA_GENEVE_REMOTE, IFLA_GENEVE_TTL, IFLA_GENEVE_TOS, IFLA_GENEVE_PORT, /* destination port */ IFLA_GENEVE_COLLECT_METADATA, IFLA_GENEVE_REMOTE6, IFLA_GENEVE_UDP_CSUM, IFLA_GENEVE_UDP_ZERO_CSUM6_TX, IFLA_GENEVE_UDP_ZERO_CSUM6_RX, IFLA_GENEVE_LABEL, IFLA_GENEVE_TTL_INHERIT, IFLA_GENEVE_DF, __IFLA_GENEVE_MAX }; #define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1) enum ifla_geneve_df { GENEVE_DF_UNSET = 0, GENEVE_DF_SET, GENEVE_DF_INHERIT, __GENEVE_DF_END, GENEVE_DF_MAX = __GENEVE_DF_END - 1, }; /* PPP section */ enum { IFLA_PPP_UNSPEC, IFLA_PPP_DEV_FD, __IFLA_PPP_MAX }; #define IFLA_PPP_MAX (__IFLA_PPP_MAX - 1) /* GTP section */ enum ifla_gtp_role { GTP_ROLE_GGSN = 0, GTP_ROLE_SGSN, }; enum { IFLA_GTP_UNSPEC, IFLA_GTP_FD0, IFLA_GTP_FD1, IFLA_GTP_PDP_HASHSIZE, IFLA_GTP_ROLE, __IFLA_GTP_MAX, }; #define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1) /* Bonding section */ enum { IFLA_BOND_UNSPEC, IFLA_BOND_MODE, IFLA_BOND_ACTIVE_SLAVE, IFLA_BOND_MIIMON, IFLA_BOND_UPDELAY, IFLA_BOND_DOWNDELAY, IFLA_BOND_USE_CARRIER, IFLA_BOND_ARP_INTERVAL, IFLA_BOND_ARP_IP_TARGET, IFLA_BOND_ARP_VALIDATE, IFLA_BOND_ARP_ALL_TARGETS, IFLA_BOND_PRIMARY, IFLA_BOND_PRIMARY_RESELECT, IFLA_BOND_FAIL_OVER_MAC, IFLA_BOND_XMIT_HASH_POLICY, IFLA_BOND_RESEND_IGMP, IFLA_BOND_NUM_PEER_NOTIF, IFLA_BOND_ALL_SLAVES_ACTIVE, IFLA_BOND_MIN_LINKS, IFLA_BOND_LP_INTERVAL, IFLA_BOND_PACKETS_PER_SLAVE, IFLA_BOND_AD_LACP_RATE, IFLA_BOND_AD_SELECT, IFLA_BOND_AD_INFO, IFLA_BOND_AD_ACTOR_SYS_PRIO, IFLA_BOND_AD_USER_PORT_KEY, IFLA_BOND_AD_ACTOR_SYSTEM, IFLA_BOND_TLB_DYNAMIC_LB, __IFLA_BOND_MAX, }; #define IFLA_BOND_MAX (__IFLA_BOND_MAX - 1) enum { IFLA_BOND_AD_INFO_UNSPEC, IFLA_BOND_AD_INFO_AGGREGATOR, IFLA_BOND_AD_INFO_NUM_PORTS, IFLA_BOND_AD_INFO_ACTOR_KEY, IFLA_BOND_AD_INFO_PARTNER_KEY, IFLA_BOND_AD_INFO_PARTNER_MAC, __IFLA_BOND_AD_INFO_MAX, }; #define IFLA_BOND_AD_INFO_MAX (__IFLA_BOND_AD_INFO_MAX - 1) enum { IFLA_BOND_SLAVE_UNSPEC, IFLA_BOND_SLAVE_STATE, IFLA_BOND_SLAVE_MII_STATUS, IFLA_BOND_SLAVE_LINK_FAILURE_COUNT, IFLA_BOND_SLAVE_PERM_HWADDR, IFLA_BOND_SLAVE_QUEUE_ID, IFLA_BOND_SLAVE_AD_AGGREGATOR_ID, IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE, IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE, __IFLA_BOND_SLAVE_MAX, }; #define IFLA_BOND_SLAVE_MAX (__IFLA_BOND_SLAVE_MAX - 1) /* SR-IOV virtual function management section */ enum { IFLA_VF_INFO_UNSPEC, IFLA_VF_INFO, __IFLA_VF_INFO_MAX, }; #define IFLA_VF_INFO_MAX (__IFLA_VF_INFO_MAX - 1) enum { IFLA_VF_UNSPEC, IFLA_VF_MAC, /* Hardware queue specific attributes */ IFLA_VF_VLAN, /* VLAN ID and QoS */ IFLA_VF_TX_RATE, /* Max TX Bandwidth Allocation */ IFLA_VF_SPOOFCHK, /* Spoof Checking on/off switch */ IFLA_VF_LINK_STATE, /* link state enable/disable/auto switch */ IFLA_VF_RATE, /* Min and Max TX Bandwidth Allocation */ IFLA_VF_RSS_QUERY_EN, /* RSS Redirection Table and Hash Key query * on/off switch */ IFLA_VF_STATS, /* network device statistics */ IFLA_VF_TRUST, /* Trust VF */ IFLA_VF_IB_NODE_GUID, /* VF Infiniband node GUID */ IFLA_VF_IB_PORT_GUID, /* VF Infiniband port GUID */ IFLA_VF_VLAN_LIST, /* nested list of vlans, option for QinQ */ __IFLA_VF_MAX, }; #define IFLA_VF_MAX (__IFLA_VF_MAX - 1) struct ifla_vf_mac { __u32 vf; __u8 mac[32]; /* MAX_ADDR_LEN */ }; struct ifla_vf_vlan { __u32 vf; __u32 vlan; /* 0 - 4095, 0 disables VLAN filter */ __u32 qos; }; enum { IFLA_VF_VLAN_INFO_UNSPEC, IFLA_VF_VLAN_INFO, /* VLAN ID, QoS and VLAN protocol */ __IFLA_VF_VLAN_INFO_MAX, }; #define IFLA_VF_VLAN_INFO_MAX (__IFLA_VF_VLAN_INFO_MAX - 1) #define MAX_VLAN_LIST_LEN 1 struct ifla_vf_vlan_info { __u32 vf; __u32 vlan; /* 0 - 4095, 0 disables VLAN filter */ __u32 qos; __be16 vlan_proto; /* VLAN protocol either 802.1Q or 802.1ad */ }; struct ifla_vf_tx_rate { __u32 vf; __u32 rate; /* Max TX bandwidth in Mbps, 0 disables throttling */ }; struct ifla_vf_rate { __u32 vf; __u32 min_tx_rate; /* Min Bandwidth in Mbps */ __u32 max_tx_rate; /* Max Bandwidth in Mbps */ }; struct ifla_vf_spoofchk { __u32 vf; __u32 setting; }; struct ifla_vf_guid { __u32 vf; __u64 guid; }; enum { IFLA_VF_LINK_STATE_AUTO, /* link state of the uplink */ IFLA_VF_LINK_STATE_ENABLE, /* link always up */ IFLA_VF_LINK_STATE_DISABLE, /* link always down */ __IFLA_VF_LINK_STATE_MAX, }; struct ifla_vf_link_state { __u32 vf; __u32 link_state; }; struct ifla_vf_rss_query_en { __u32 vf; __u32 setting; }; enum { IFLA_VF_STATS_RX_PACKETS, IFLA_VF_STATS_TX_PACKETS, IFLA_VF_STATS_RX_BYTES, IFLA_VF_STATS_TX_BYTES, IFLA_VF_STATS_BROADCAST, IFLA_VF_STATS_MULTICAST, IFLA_VF_STATS_PAD, IFLA_VF_STATS_RX_DROPPED, IFLA_VF_STATS_TX_DROPPED, __IFLA_VF_STATS_MAX, }; #define IFLA_VF_STATS_MAX (__IFLA_VF_STATS_MAX - 1) struct ifla_vf_trust { __u32 vf; __u32 setting; }; /* VF ports management section * * Nested layout of set/get msg is: * * [IFLA_NUM_VF] * [IFLA_VF_PORTS] * [IFLA_VF_PORT] * [IFLA_PORT_*], ... * [IFLA_VF_PORT] * [IFLA_PORT_*], ... * ... * [IFLA_PORT_SELF] * [IFLA_PORT_*], ... */ enum { IFLA_VF_PORT_UNSPEC, IFLA_VF_PORT, /* nest */ __IFLA_VF_PORT_MAX, }; #define IFLA_VF_PORT_MAX (__IFLA_VF_PORT_MAX - 1) enum { IFLA_PORT_UNSPEC, IFLA_PORT_VF, /* __u32 */ IFLA_PORT_PROFILE, /* string */ IFLA_PORT_VSI_TYPE, /* 802.1Qbg (pre-)standard VDP */ IFLA_PORT_INSTANCE_UUID, /* binary UUID */ IFLA_PORT_HOST_UUID, /* binary UUID */ IFLA_PORT_REQUEST, /* __u8 */ IFLA_PORT_RESPONSE, /* __u16, output only */ __IFLA_PORT_MAX, }; #define IFLA_PORT_MAX (__IFLA_PORT_MAX - 1) #define PORT_PROFILE_MAX 40 #define PORT_UUID_MAX 16 #define PORT_SELF_VF -1 enum { PORT_REQUEST_PREASSOCIATE = 0, PORT_REQUEST_PREASSOCIATE_RR, PORT_REQUEST_ASSOCIATE, PORT_REQUEST_DISASSOCIATE, }; enum { PORT_VDP_RESPONSE_SUCCESS = 0, PORT_VDP_RESPONSE_INVALID_FORMAT, PORT_VDP_RESPONSE_INSUFFICIENT_RESOURCES, PORT_VDP_RESPONSE_UNUSED_VTID, PORT_VDP_RESPONSE_VTID_VIOLATION, PORT_VDP_RESPONSE_VTID_VERSION_VIOALTION, PORT_VDP_RESPONSE_OUT_OF_SYNC, /* 0x08-0xFF reserved for future VDP use */ PORT_PROFILE_RESPONSE_SUCCESS = 0x100, PORT_PROFILE_RESPONSE_INPROGRESS, PORT_PROFILE_RESPONSE_INVALID, PORT_PROFILE_RESPONSE_BADSTATE, PORT_PROFILE_RESPONSE_INSUFFICIENT_RESOURCES, PORT_PROFILE_RESPONSE_ERROR, }; struct ifla_port_vsi { __u8 vsi_mgr_id; __u8 vsi_type_id[3]; __u8 vsi_type_version; __u8 pad[3]; }; /* IPoIB section */ enum { IFLA_IPOIB_UNSPEC, IFLA_IPOIB_PKEY, IFLA_IPOIB_MODE, IFLA_IPOIB_UMCAST, __IFLA_IPOIB_MAX }; enum { IPOIB_MODE_DATAGRAM = 0, /* using unreliable datagram QPs */ IPOIB_MODE_CONNECTED = 1, /* using connected QPs */ }; #define IFLA_IPOIB_MAX (__IFLA_IPOIB_MAX - 1) /* HSR section */ enum { IFLA_HSR_UNSPEC, IFLA_HSR_SLAVE1, IFLA_HSR_SLAVE2, IFLA_HSR_MULTICAST_SPEC, /* Last byte of supervision addr */ IFLA_HSR_SUPERVISION_ADDR, /* Supervision frame multicast addr */ IFLA_HSR_SEQ_NR, IFLA_HSR_VERSION, /* HSR version */ __IFLA_HSR_MAX, }; #define IFLA_HSR_MAX (__IFLA_HSR_MAX - 1) /* STATS section */ struct if_stats_msg { __u8 family; __u8 pad1; __u16 pad2; __u32 ifindex; __u32 filter_mask; }; /* A stats attribute can be netdev specific or a global stat. * For netdev stats, lets use the prefix IFLA_STATS_LINK_* */ enum { IFLA_STATS_UNSPEC, /* also used as 64bit pad attribute */ IFLA_STATS_LINK_64, IFLA_STATS_LINK_XSTATS, IFLA_STATS_LINK_XSTATS_SLAVE, IFLA_STATS_LINK_OFFLOAD_XSTATS, IFLA_STATS_AF_SPEC, __IFLA_STATS_MAX, }; #define IFLA_STATS_MAX (__IFLA_STATS_MAX - 1) #define IFLA_STATS_FILTER_BIT(ATTR) (1 << (ATTR - 1)) /* These are embedded into IFLA_STATS_LINK_XSTATS: * [IFLA_STATS_LINK_XSTATS] * -> [LINK_XSTATS_TYPE_xxx] * -> [rtnl link type specific attributes] */ enum { LINK_XSTATS_TYPE_UNSPEC, LINK_XSTATS_TYPE_BRIDGE, __LINK_XSTATS_TYPE_MAX }; #define LINK_XSTATS_TYPE_MAX (__LINK_XSTATS_TYPE_MAX - 1) /* These are stats embedded into IFLA_STATS_LINK_OFFLOAD_XSTATS */ enum { IFLA_OFFLOAD_XSTATS_UNSPEC, IFLA_OFFLOAD_XSTATS_CPU_HIT, /* struct rtnl_link_stats64 */ __IFLA_OFFLOAD_XSTATS_MAX }; #define IFLA_OFFLOAD_XSTATS_MAX (__IFLA_OFFLOAD_XSTATS_MAX - 1) /* XDP section */ #define XDP_FLAGS_UPDATE_IF_NOEXIST (1U << 0) #define XDP_FLAGS_SKB_MODE (1U << 1) #define XDP_FLAGS_DRV_MODE (1U << 2) #define XDP_FLAGS_HW_MODE (1U << 3) #define XDP_FLAGS_MODES (XDP_FLAGS_SKB_MODE | \ XDP_FLAGS_DRV_MODE | \ XDP_FLAGS_HW_MODE) #define XDP_FLAGS_MASK (XDP_FLAGS_UPDATE_IF_NOEXIST | \ XDP_FLAGS_MODES) /* These are stored into IFLA_XDP_ATTACHED on dump. */ enum { XDP_ATTACHED_NONE = 0, XDP_ATTACHED_DRV, XDP_ATTACHED_SKB, XDP_ATTACHED_HW, XDP_ATTACHED_MULTI, }; enum { IFLA_XDP_UNSPEC, IFLA_XDP_FD, IFLA_XDP_ATTACHED, IFLA_XDP_FLAGS, IFLA_XDP_PROG_ID, IFLA_XDP_DRV_PROG_ID, IFLA_XDP_SKB_PROG_ID, IFLA_XDP_HW_PROG_ID, __IFLA_XDP_MAX, }; #define IFLA_XDP_MAX (__IFLA_XDP_MAX - 1) enum { IFLA_EVENT_NONE, IFLA_EVENT_REBOOT, /* internal reset / reboot */ IFLA_EVENT_FEATURES, /* change in offload features */ IFLA_EVENT_BONDING_FAILOVER, /* change in active slave */ IFLA_EVENT_NOTIFY_PEERS, /* re-sent grat. arp/ndisc */ IFLA_EVENT_IGMP_RESEND, /* re-sent IGMP JOIN */ IFLA_EVENT_BONDING_OPTIONS, /* change in bonding options */ }; /* tun section */ enum { IFLA_TUN_UNSPEC, IFLA_TUN_OWNER, IFLA_TUN_GROUP, IFLA_TUN_TYPE, IFLA_TUN_PI, IFLA_TUN_VNET_HDR, IFLA_TUN_PERSIST, IFLA_TUN_MULTI_QUEUE, IFLA_TUN_NUM_QUEUES, IFLA_TUN_NUM_DISABLED_QUEUES, __IFLA_TUN_MAX, }; #define IFLA_TUN_MAX (__IFLA_TUN_MAX - 1) /* rmnet section */ #define RMNET_FLAGS_INGRESS_DEAGGREGATION (1U << 0) #define RMNET_FLAGS_INGRESS_MAP_COMMANDS (1U << 1) #define RMNET_FLAGS_INGRESS_MAP_CKSUMV4 (1U << 2) #define RMNET_FLAGS_EGRESS_MAP_CKSUMV4 (1U << 3) enum { IFLA_RMNET_UNSPEC, IFLA_RMNET_MUX_ID, IFLA_RMNET_FLAGS, __IFLA_RMNET_MAX, }; #define IFLA_RMNET_MAX (__IFLA_RMNET_MAX - 1) struct ifla_rmnet_flags { __u32 flags; __u32 mask; }; #endif /* _UAPI_LINUX_IF_LINK_H */ gobpf-0.2.0/elf/include/uapi/linux/netlink.h000066400000000000000000000173241404447410300207210ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI__LINUX_NETLINK_H #define _UAPI__LINUX_NETLINK_H #include #include /* for __kernel_sa_family_t */ #include #define NETLINK_ROUTE 0 /* Routing/device hook */ #define NETLINK_UNUSED 1 /* Unused number */ #define NETLINK_USERSOCK 2 /* Reserved for user mode socket protocols */ #define NETLINK_FIREWALL 3 /* Unused number, formerly ip_queue */ #define NETLINK_SOCK_DIAG 4 /* socket monitoring */ #define NETLINK_NFLOG 5 /* netfilter/iptables ULOG */ #define NETLINK_XFRM 6 /* ipsec */ #define NETLINK_SELINUX 7 /* SELinux event notifications */ #define NETLINK_ISCSI 8 /* Open-iSCSI */ #define NETLINK_AUDIT 9 /* auditing */ #define NETLINK_FIB_LOOKUP 10 #define NETLINK_CONNECTOR 11 #define NETLINK_NETFILTER 12 /* netfilter subsystem */ #define NETLINK_IP6_FW 13 #define NETLINK_DNRTMSG 14 /* DECnet routing messages */ #define NETLINK_KOBJECT_UEVENT 15 /* Kernel messages to userspace */ #define NETLINK_GENERIC 16 /* leave room for NETLINK_DM (DM Events) */ #define NETLINK_SCSITRANSPORT 18 /* SCSI Transports */ #define NETLINK_ECRYPTFS 19 #define NETLINK_RDMA 20 #define NETLINK_CRYPTO 21 /* Crypto layer */ #define NETLINK_SMC 22 /* SMC monitoring */ #define NETLINK_INET_DIAG NETLINK_SOCK_DIAG #define MAX_LINKS 32 struct sockaddr_nl { __kernel_sa_family_t nl_family; /* AF_NETLINK */ unsigned short nl_pad; /* zero */ __u32 nl_pid; /* port ID */ __u32 nl_groups; /* multicast groups mask */ }; struct nlmsghdr { __u32 nlmsg_len; /* Length of message including header */ __u16 nlmsg_type; /* Message content */ __u16 nlmsg_flags; /* Additional flags */ __u32 nlmsg_seq; /* Sequence number */ __u32 nlmsg_pid; /* Sending process port ID */ }; /* Flags values */ #define NLM_F_REQUEST 0x01 /* It is request message. */ #define NLM_F_MULTI 0x02 /* Multipart message, terminated by NLMSG_DONE */ #define NLM_F_ACK 0x04 /* Reply with ack, with zero or error code */ #define NLM_F_ECHO 0x08 /* Echo this request */ #define NLM_F_DUMP_INTR 0x10 /* Dump was inconsistent due to sequence change */ #define NLM_F_DUMP_FILTERED 0x20 /* Dump was filtered as requested */ /* Modifiers to GET request */ #define NLM_F_ROOT 0x100 /* specify tree root */ #define NLM_F_MATCH 0x200 /* return all matching */ #define NLM_F_ATOMIC 0x400 /* atomic GET */ #define NLM_F_DUMP (NLM_F_ROOT|NLM_F_MATCH) /* Modifiers to NEW request */ #define NLM_F_REPLACE 0x100 /* Override existing */ #define NLM_F_EXCL 0x200 /* Do not touch, if it exists */ #define NLM_F_CREATE 0x400 /* Create, if it does not exist */ #define NLM_F_APPEND 0x800 /* Add to end of list */ /* Modifiers to DELETE request */ #define NLM_F_NONREC 0x100 /* Do not delete recursively */ /* Flags for ACK message */ #define NLM_F_CAPPED 0x100 /* request was capped */ #define NLM_F_ACK_TLVS 0x200 /* extended ACK TVLs were included */ /* 4.4BSD ADD NLM_F_CREATE|NLM_F_EXCL 4.4BSD CHANGE NLM_F_REPLACE True CHANGE NLM_F_CREATE|NLM_F_REPLACE Append NLM_F_CREATE Check NLM_F_EXCL */ #define NLMSG_ALIGNTO 4U #define NLMSG_ALIGN(len) ( ((len)+NLMSG_ALIGNTO-1) & ~(NLMSG_ALIGNTO-1) ) #define NLMSG_HDRLEN ((int) NLMSG_ALIGN(sizeof(struct nlmsghdr))) #define NLMSG_LENGTH(len) ((len) + NLMSG_HDRLEN) #define NLMSG_SPACE(len) NLMSG_ALIGN(NLMSG_LENGTH(len)) #define NLMSG_DATA(nlh) ((void*)(((char*)nlh) + NLMSG_LENGTH(0))) #define NLMSG_NEXT(nlh,len) ((len) -= NLMSG_ALIGN((nlh)->nlmsg_len), \ (struct nlmsghdr*)(((char*)(nlh)) + NLMSG_ALIGN((nlh)->nlmsg_len))) #define NLMSG_OK(nlh,len) ((len) >= (int)sizeof(struct nlmsghdr) && \ (nlh)->nlmsg_len >= sizeof(struct nlmsghdr) && \ (nlh)->nlmsg_len <= (len)) #define NLMSG_PAYLOAD(nlh,len) ((nlh)->nlmsg_len - NLMSG_SPACE((len))) #define NLMSG_NOOP 0x1 /* Nothing. */ #define NLMSG_ERROR 0x2 /* Error */ #define NLMSG_DONE 0x3 /* End of a dump */ #define NLMSG_OVERRUN 0x4 /* Data lost */ #define NLMSG_MIN_TYPE 0x10 /* < 0x10: reserved control messages */ struct nlmsgerr { int error; struct nlmsghdr msg; /* * followed by the message contents unless NETLINK_CAP_ACK was set * or the ACK indicates success (error == 0) * message length is aligned with NLMSG_ALIGN() */ /* * followed by TLVs defined in enum nlmsgerr_attrs * if NETLINK_EXT_ACK was set */ }; /** * enum nlmsgerr_attrs - nlmsgerr attributes * @NLMSGERR_ATTR_UNUSED: unused * @NLMSGERR_ATTR_MSG: error message string (string) * @NLMSGERR_ATTR_OFFS: offset of the invalid attribute in the original * message, counting from the beginning of the header (u32) * @NLMSGERR_ATTR_COOKIE: arbitrary subsystem specific cookie to * be used - in the success case - to identify a created * object or operation or similar (binary) * @__NLMSGERR_ATTR_MAX: number of attributes * @NLMSGERR_ATTR_MAX: highest attribute number */ enum nlmsgerr_attrs { NLMSGERR_ATTR_UNUSED, NLMSGERR_ATTR_MSG, NLMSGERR_ATTR_OFFS, NLMSGERR_ATTR_COOKIE, __NLMSGERR_ATTR_MAX, NLMSGERR_ATTR_MAX = __NLMSGERR_ATTR_MAX - 1 }; #define NETLINK_ADD_MEMBERSHIP 1 #define NETLINK_DROP_MEMBERSHIP 2 #define NETLINK_PKTINFO 3 #define NETLINK_BROADCAST_ERROR 4 #define NETLINK_NO_ENOBUFS 5 #ifndef __KERNEL__ #define NETLINK_RX_RING 6 #define NETLINK_TX_RING 7 #endif #define NETLINK_LISTEN_ALL_NSID 8 #define NETLINK_LIST_MEMBERSHIPS 9 #define NETLINK_CAP_ACK 10 #define NETLINK_EXT_ACK 11 #define NETLINK_GET_STRICT_CHK 12 struct nl_pktinfo { __u32 group; }; struct nl_mmap_req { unsigned int nm_block_size; unsigned int nm_block_nr; unsigned int nm_frame_size; unsigned int nm_frame_nr; }; struct nl_mmap_hdr { unsigned int nm_status; unsigned int nm_len; __u32 nm_group; /* credentials */ __u32 nm_pid; __u32 nm_uid; __u32 nm_gid; }; #ifndef __KERNEL__ enum nl_mmap_status { NL_MMAP_STATUS_UNUSED, NL_MMAP_STATUS_RESERVED, NL_MMAP_STATUS_VALID, NL_MMAP_STATUS_COPY, NL_MMAP_STATUS_SKIP, }; #define NL_MMAP_MSG_ALIGNMENT NLMSG_ALIGNTO #define NL_MMAP_MSG_ALIGN(sz) __ALIGN_KERNEL(sz, NL_MMAP_MSG_ALIGNMENT) #define NL_MMAP_HDRLEN NL_MMAP_MSG_ALIGN(sizeof(struct nl_mmap_hdr)) #endif #define NET_MAJOR 36 /* Major 36 is reserved for networking */ enum { NETLINK_UNCONNECTED = 0, NETLINK_CONNECTED, }; /* * <------- NLA_HDRLEN ------> <-- NLA_ALIGN(payload)--> * +---------------------+- - -+- - - - - - - - - -+- - -+ * | Header | Pad | Payload | Pad | * | (struct nlattr) | ing | | ing | * +---------------------+- - -+- - - - - - - - - -+- - -+ * <-------------- nlattr->nla_len --------------> */ struct nlattr { __u16 nla_len; __u16 nla_type; }; /* * nla_type (16 bits) * +---+---+-------------------------------+ * | N | O | Attribute Type | * +---+---+-------------------------------+ * N := Carries nested attributes * O := Payload stored in network byte order * * Note: The N and O flag are mutually exclusive. */ #define NLA_F_NESTED (1 << 15) #define NLA_F_NET_BYTEORDER (1 << 14) #define NLA_TYPE_MASK ~(NLA_F_NESTED | NLA_F_NET_BYTEORDER) #define NLA_ALIGNTO 4 #define NLA_ALIGN(len) (((len) + NLA_ALIGNTO - 1) & ~(NLA_ALIGNTO - 1)) #define NLA_HDRLEN ((int) NLA_ALIGN(sizeof(struct nlattr))) /* Generic 32 bitflags attribute content sent to the kernel. * * The value is a bitmap that defines the values being set * The selector is a bitmask that defines which value is legit * * Examples: * value = 0x0, and selector = 0x1 * implies we are selecting bit 1 and we want to set its value to 0. * * value = 0x2, and selector = 0x2 * implies we are selecting bit 2 and we want to set its value to 1. * */ struct nla_bitfield32 { __u32 value; __u32 selector; }; #endif /* _UAPI__LINUX_NETLINK_H */ gobpf-0.2.0/elf/include/uapi/linux/perf_event.h000066400000000000000000001041231404447410300214040ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * Performance events: * * Copyright (C) 2008-2009, Thomas Gleixner * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra * * Data type definitions, declarations, prototypes. * * Started by: Thomas Gleixner and Ingo Molnar * * For licencing details see kernel-base/COPYING */ #ifndef _UAPI_LINUX_PERF_EVENT_H #define _UAPI_LINUX_PERF_EVENT_H #include #include #include /* * User-space ABI bits: */ /* * attr.type */ enum perf_type_id { PERF_TYPE_HARDWARE = 0, PERF_TYPE_SOFTWARE = 1, PERF_TYPE_TRACEPOINT = 2, PERF_TYPE_HW_CACHE = 3, PERF_TYPE_RAW = 4, PERF_TYPE_BREAKPOINT = 5, PERF_TYPE_MAX, /* non-ABI */ }; /* * Generalized performance event event_id types, used by the * attr.event_id parameter of the sys_perf_event_open() * syscall: */ enum perf_hw_id { /* * Common hardware events, generalized by the kernel: */ PERF_COUNT_HW_CPU_CYCLES = 0, PERF_COUNT_HW_INSTRUCTIONS = 1, PERF_COUNT_HW_CACHE_REFERENCES = 2, PERF_COUNT_HW_CACHE_MISSES = 3, PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, PERF_COUNT_HW_BRANCH_MISSES = 5, PERF_COUNT_HW_BUS_CYCLES = 6, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, PERF_COUNT_HW_REF_CPU_CYCLES = 9, PERF_COUNT_HW_MAX, /* non-ABI */ }; /* * Generalized hardware cache events: * * { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x * { read, write, prefetch } x * { accesses, misses } */ enum perf_hw_cache_id { PERF_COUNT_HW_CACHE_L1D = 0, PERF_COUNT_HW_CACHE_L1I = 1, PERF_COUNT_HW_CACHE_LL = 2, PERF_COUNT_HW_CACHE_DTLB = 3, PERF_COUNT_HW_CACHE_ITLB = 4, PERF_COUNT_HW_CACHE_BPU = 5, PERF_COUNT_HW_CACHE_NODE = 6, PERF_COUNT_HW_CACHE_MAX, /* non-ABI */ }; enum perf_hw_cache_op_id { PERF_COUNT_HW_CACHE_OP_READ = 0, PERF_COUNT_HW_CACHE_OP_WRITE = 1, PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */ }; enum perf_hw_cache_op_result_id { PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, PERF_COUNT_HW_CACHE_RESULT_MISS = 1, PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */ }; /* * Special "software" events provided by the kernel, even if the hardware * does not support performance events. These events measure various * physical and sw events of the kernel (and allow the profiling of them as * well): */ enum perf_sw_ids { PERF_COUNT_SW_CPU_CLOCK = 0, PERF_COUNT_SW_TASK_CLOCK = 1, PERF_COUNT_SW_PAGE_FAULTS = 2, PERF_COUNT_SW_CONTEXT_SWITCHES = 3, PERF_COUNT_SW_CPU_MIGRATIONS = 4, PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, PERF_COUNT_SW_EMULATION_FAULTS = 8, PERF_COUNT_SW_DUMMY = 9, PERF_COUNT_SW_BPF_OUTPUT = 10, PERF_COUNT_SW_MAX, /* non-ABI */ }; /* * Bits that can be set in attr.sample_type to request information * in the overflow packets. */ enum perf_event_sample_format { PERF_SAMPLE_IP = 1U << 0, PERF_SAMPLE_TID = 1U << 1, PERF_SAMPLE_TIME = 1U << 2, PERF_SAMPLE_ADDR = 1U << 3, PERF_SAMPLE_READ = 1U << 4, PERF_SAMPLE_CALLCHAIN = 1U << 5, PERF_SAMPLE_ID = 1U << 6, PERF_SAMPLE_CPU = 1U << 7, PERF_SAMPLE_PERIOD = 1U << 8, PERF_SAMPLE_STREAM_ID = 1U << 9, PERF_SAMPLE_RAW = 1U << 10, PERF_SAMPLE_BRANCH_STACK = 1U << 11, PERF_SAMPLE_REGS_USER = 1U << 12, PERF_SAMPLE_STACK_USER = 1U << 13, PERF_SAMPLE_WEIGHT = 1U << 14, PERF_SAMPLE_DATA_SRC = 1U << 15, PERF_SAMPLE_IDENTIFIER = 1U << 16, PERF_SAMPLE_TRANSACTION = 1U << 17, PERF_SAMPLE_REGS_INTR = 1U << 18, PERF_SAMPLE_PHYS_ADDR = 1U << 19, PERF_SAMPLE_AUX = 1U << 20, PERF_SAMPLE_CGROUP = 1U << 21, PERF_SAMPLE_MAX = 1U << 22, /* non-ABI */ __PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63, /* non-ABI; internal use */ }; /* * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set * * If the user does not pass priv level information via branch_sample_type, * the kernel uses the event's priv level. Branch and event priv levels do * not have to match. Branch priv level is checked for permissions. * * The branch types can be combined, however BRANCH_ANY covers all types * of branches and therefore it supersedes all the other types. */ enum perf_branch_sample_type_shift { PERF_SAMPLE_BRANCH_USER_SHIFT = 0, /* user branches */ PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, /* kernel branches */ PERF_SAMPLE_BRANCH_HV_SHIFT = 2, /* hypervisor branches */ PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, /* any branch types */ PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, /* any call branch */ PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, /* any return branch */ PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, /* indirect calls */ PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, /* transaction aborts */ PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, /* in transaction */ PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /* not in transaction */ PERF_SAMPLE_BRANCH_COND_SHIFT = 10, /* conditional branches */ PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* call/ret stack */ PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, /* indirect jumps */ PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, /* direct call */ PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14, /* no flags */ PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15, /* no cycles */ PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 16, /* save branch type */ PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 17, /* save low level index of raw branch records */ PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */ }; enum perf_branch_sample_type { PERF_SAMPLE_BRANCH_USER = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT, PERF_SAMPLE_BRANCH_KERNEL = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT, PERF_SAMPLE_BRANCH_HV = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT, PERF_SAMPLE_BRANCH_ANY = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT, PERF_SAMPLE_BRANCH_ANY_CALL = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT, PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT, PERF_SAMPLE_BRANCH_IND_CALL = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT, PERF_SAMPLE_BRANCH_ABORT_TX = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT, PERF_SAMPLE_BRANCH_IN_TX = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT, PERF_SAMPLE_BRANCH_NO_TX = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT, PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT, PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT, PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT, PERF_SAMPLE_BRANCH_CALL = 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT, PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT, PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT, PERF_SAMPLE_BRANCH_TYPE_SAVE = 1U << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT, PERF_SAMPLE_BRANCH_HW_INDEX = 1U << PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT, PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT, }; /* * Common flow change classification */ enum { PERF_BR_UNKNOWN = 0, /* unknown */ PERF_BR_COND = 1, /* conditional */ PERF_BR_UNCOND = 2, /* unconditional */ PERF_BR_IND = 3, /* indirect */ PERF_BR_CALL = 4, /* function call */ PERF_BR_IND_CALL = 5, /* indirect function call */ PERF_BR_RET = 6, /* function return */ PERF_BR_SYSCALL = 7, /* syscall */ PERF_BR_SYSRET = 8, /* syscall return */ PERF_BR_COND_CALL = 9, /* conditional function call */ PERF_BR_COND_RET = 10, /* conditional function return */ PERF_BR_MAX, }; #define PERF_SAMPLE_BRANCH_PLM_ALL \ (PERF_SAMPLE_BRANCH_USER|\ PERF_SAMPLE_BRANCH_KERNEL|\ PERF_SAMPLE_BRANCH_HV) /* * Values to determine ABI of the registers dump. */ enum perf_sample_regs_abi { PERF_SAMPLE_REGS_ABI_NONE = 0, PERF_SAMPLE_REGS_ABI_32 = 1, PERF_SAMPLE_REGS_ABI_64 = 2, }; /* * Values for the memory transaction event qualifier, mostly for * abort events. Multiple bits can be set. */ enum { PERF_TXN_ELISION = (1 << 0), /* From elision */ PERF_TXN_TRANSACTION = (1 << 1), /* From transaction */ PERF_TXN_SYNC = (1 << 2), /* Instruction is related */ PERF_TXN_ASYNC = (1 << 3), /* Instruction not related */ PERF_TXN_RETRY = (1 << 4), /* Retry possible */ PERF_TXN_CONFLICT = (1 << 5), /* Conflict abort */ PERF_TXN_CAPACITY_WRITE = (1 << 6), /* Capacity write abort */ PERF_TXN_CAPACITY_READ = (1 << 7), /* Capacity read abort */ PERF_TXN_MAX = (1 << 8), /* non-ABI */ /* bits 32..63 are reserved for the abort code */ PERF_TXN_ABORT_MASK = (0xffffffffULL << 32), PERF_TXN_ABORT_SHIFT = 32, }; /* * The format of the data returned by read() on a perf event fd, * as specified by attr.read_format: * * struct read_format { * { u64 value; * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING * { u64 id; } && PERF_FORMAT_ID * } && !PERF_FORMAT_GROUP * * { u64 nr; * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING * { u64 value; * { u64 id; } && PERF_FORMAT_ID * } cntr[nr]; * } && PERF_FORMAT_GROUP * }; */ enum perf_event_read_format { PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, PERF_FORMAT_ID = 1U << 2, PERF_FORMAT_GROUP = 1U << 3, PERF_FORMAT_MAX = 1U << 4, /* non-ABI */ }; #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ #define PERF_ATTR_SIZE_VER1 72 /* add: config2 */ #define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */ #define PERF_ATTR_SIZE_VER3 96 /* add: sample_regs_user */ /* add: sample_stack_user */ #define PERF_ATTR_SIZE_VER4 104 /* add: sample_regs_intr */ #define PERF_ATTR_SIZE_VER5 112 /* add: aux_watermark */ #define PERF_ATTR_SIZE_VER6 120 /* add: aux_sample_size */ /* * Hardware event_id to monitor via a performance monitoring event: * * @sample_max_stack: Max number of frame pointers in a callchain, * should be < /proc/sys/kernel/perf_event_max_stack */ struct perf_event_attr { /* * Major type: hardware/software/tracepoint/etc. */ __u32 type; /* * Size of the attr structure, for fwd/bwd compat. */ __u32 size; /* * Type specific configuration information. */ __u64 config; union { __u64 sample_period; __u64 sample_freq; }; __u64 sample_type; __u64 read_format; __u64 disabled : 1, /* off by default */ inherit : 1, /* children inherit it */ pinned : 1, /* must always be on PMU */ exclusive : 1, /* only group on PMU */ exclude_user : 1, /* don't count user */ exclude_kernel : 1, /* ditto kernel */ exclude_hv : 1, /* ditto hypervisor */ exclude_idle : 1, /* don't count when idle */ mmap : 1, /* include mmap data */ comm : 1, /* include comm data */ freq : 1, /* use freq, not period */ inherit_stat : 1, /* per task counts */ enable_on_exec : 1, /* next exec enables */ task : 1, /* trace fork/exit */ watermark : 1, /* wakeup_watermark */ /* * precise_ip: * * 0 - SAMPLE_IP can have arbitrary skid * 1 - SAMPLE_IP must have constant skid * 2 - SAMPLE_IP requested to have 0 skid * 3 - SAMPLE_IP must have 0 skid * * See also PERF_RECORD_MISC_EXACT_IP */ precise_ip : 2, /* skid constraint */ mmap_data : 1, /* non-exec mmap data */ sample_id_all : 1, /* sample_type all events */ exclude_host : 1, /* don't count in host */ exclude_guest : 1, /* don't count in guest */ exclude_callchain_kernel : 1, /* exclude kernel callchains */ exclude_callchain_user : 1, /* exclude user callchains */ mmap2 : 1, /* include mmap with inode data */ comm_exec : 1, /* flag comm events that are due to an exec */ use_clockid : 1, /* use @clockid for time fields */ context_switch : 1, /* context switch data */ write_backward : 1, /* Write ring buffer from end to beginning */ namespaces : 1, /* include namespaces data */ ksymbol : 1, /* include ksymbol events */ bpf_event : 1, /* include bpf events */ aux_output : 1, /* generate AUX records instead of events */ cgroup : 1, /* include cgroup events */ __reserved_1 : 31; union { __u32 wakeup_events; /* wakeup every n events */ __u32 wakeup_watermark; /* bytes before wakeup */ }; __u32 bp_type; union { __u64 bp_addr; __u64 kprobe_func; /* for perf_kprobe */ __u64 uprobe_path; /* for perf_uprobe */ __u64 config1; /* extension of config */ }; union { __u64 bp_len; __u64 kprobe_addr; /* when kprobe_func == NULL */ __u64 probe_offset; /* for perf_[k,u]probe */ __u64 config2; /* extension of config1 */ }; __u64 branch_sample_type; /* enum perf_branch_sample_type */ /* * Defines set of user regs to dump on samples. * See asm/perf_regs.h for details. */ __u64 sample_regs_user; /* * Defines size of the user stack to dump on samples. */ __u32 sample_stack_user; __s32 clockid; /* * Defines set of regs to dump for each sample * state captured on: * - precise = 0: PMU interrupt * - precise > 0: sampled instruction * * See asm/perf_regs.h for details. */ __u64 sample_regs_intr; /* * Wakeup watermark for AUX area */ __u32 aux_watermark; __u16 sample_max_stack; __u16 __reserved_2; __u32 aux_sample_size; __u32 __reserved_3; }; /* * Structure used by below PERF_EVENT_IOC_QUERY_BPF command * to query bpf programs attached to the same perf tracepoint * as the given perf event. */ struct perf_event_query_bpf { /* * The below ids array length */ __u32 ids_len; /* * Set by the kernel to indicate the number of * available programs */ __u32 prog_cnt; /* * User provided buffer to store program ids */ __u32 ids[0]; }; /* * Ioctls that can be done on a perf event fd: */ #define PERF_EVENT_IOC_ENABLE _IO ('$', 0) #define PERF_EVENT_IOC_DISABLE _IO ('$', 1) #define PERF_EVENT_IOC_REFRESH _IO ('$', 2) #define PERF_EVENT_IOC_RESET _IO ('$', 3) #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64) #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) #define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *) #define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *) #define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32) #define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW('$', 9, __u32) #define PERF_EVENT_IOC_QUERY_BPF _IOWR('$', 10, struct perf_event_query_bpf *) #define PERF_EVENT_IOC_MODIFY_ATTRIBUTES _IOW('$', 11, struct perf_event_attr *) enum perf_event_ioc_flags { PERF_IOC_FLAG_GROUP = 1U << 0, }; /* * Structure of the page that can be mapped via mmap */ struct perf_event_mmap_page { __u32 version; /* version number of this structure */ __u32 compat_version; /* lowest version this is compat with */ /* * Bits needed to read the hw events in user-space. * * u32 seq, time_mult, time_shift, index, width; * u64 count, enabled, running; * u64 cyc, time_offset; * s64 pmc = 0; * * do { * seq = pc->lock; * barrier() * * enabled = pc->time_enabled; * running = pc->time_running; * * if (pc->cap_usr_time && enabled != running) { * cyc = rdtsc(); * time_offset = pc->time_offset; * time_mult = pc->time_mult; * time_shift = pc->time_shift; * } * * index = pc->index; * count = pc->offset; * if (pc->cap_user_rdpmc && index) { * width = pc->pmc_width; * pmc = rdpmc(index - 1); * } * * barrier(); * } while (pc->lock != seq); * * NOTE: for obvious reason this only works on self-monitoring * processes. */ __u32 lock; /* seqlock for synchronization */ __u32 index; /* hardware event identifier */ __s64 offset; /* add to hardware event value */ __u64 time_enabled; /* time event active */ __u64 time_running; /* time event on cpu */ union { __u64 capabilities; struct { __u64 cap_bit0 : 1, /* Always 0, deprecated, see commit 860f085b74e9 */ cap_bit0_is_deprecated : 1, /* Always 1, signals that bit 0 is zero */ cap_user_rdpmc : 1, /* The RDPMC instruction can be used to read counts */ cap_user_time : 1, /* The time_* fields are used */ cap_user_time_zero : 1, /* The time_zero field is used */ cap_____res : 59; }; }; /* * If cap_user_rdpmc this field provides the bit-width of the value * read using the rdpmc() or equivalent instruction. This can be used * to sign extend the result like: * * pmc <<= 64 - width; * pmc >>= 64 - width; // signed shift right * count += pmc; */ __u16 pmc_width; /* * If cap_usr_time the below fields can be used to compute the time * delta since time_enabled (in ns) using rdtsc or similar. * * u64 quot, rem; * u64 delta; * * quot = (cyc >> time_shift); * rem = cyc & (((u64)1 << time_shift) - 1); * delta = time_offset + quot * time_mult + * ((rem * time_mult) >> time_shift); * * Where time_offset,time_mult,time_shift and cyc are read in the * seqcount loop described above. This delta can then be added to * enabled and possible running (if index), improving the scaling: * * enabled += delta; * if (index) * running += delta; * * quot = count / running; * rem = count % running; * count = quot * enabled + (rem * enabled) / running; */ __u16 time_shift; __u32 time_mult; __u64 time_offset; /* * If cap_usr_time_zero, the hardware clock (e.g. TSC) can be calculated * from sample timestamps. * * time = timestamp - time_zero; * quot = time / time_mult; * rem = time % time_mult; * cyc = (quot << time_shift) + (rem << time_shift) / time_mult; * * And vice versa: * * quot = cyc >> time_shift; * rem = cyc & (((u64)1 << time_shift) - 1); * timestamp = time_zero + quot * time_mult + * ((rem * time_mult) >> time_shift); */ __u64 time_zero; __u32 size; /* Header size up to __reserved[] fields. */ /* * Hole for extension of the self monitor capabilities */ __u8 __reserved[118*8+4]; /* align to 1k. */ /* * Control data for the mmap() data buffer. * * User-space reading the @data_head value should issue an smp_rmb(), * after reading this value. * * When the mapping is PROT_WRITE the @data_tail value should be * written by userspace to reflect the last read data, after issueing * an smp_mb() to separate the data read from the ->data_tail store. * In this case the kernel will not over-write unread data. * * See perf_output_put_handle() for the data ordering. * * data_{offset,size} indicate the location and size of the perf record * buffer within the mmapped area. */ __u64 data_head; /* head in the data section */ __u64 data_tail; /* user-space written tail */ __u64 data_offset; /* where the buffer starts */ __u64 data_size; /* data buffer size */ /* * AUX area is defined by aux_{offset,size} fields that should be set * by the userspace, so that * * aux_offset >= data_offset + data_size * * prior to mmap()ing it. Size of the mmap()ed area should be aux_size. * * Ring buffer pointers aux_{head,tail} have the same semantics as * data_{head,tail} and same ordering rules apply. */ __u64 aux_head; __u64 aux_tail; __u64 aux_offset; __u64 aux_size; }; #define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0) #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) #define PERF_RECORD_MISC_KERNEL (1 << 0) #define PERF_RECORD_MISC_USER (2 << 0) #define PERF_RECORD_MISC_HYPERVISOR (3 << 0) #define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0) #define PERF_RECORD_MISC_GUEST_USER (5 << 0) /* * Indicates that /proc/PID/maps parsing are truncated by time out. */ #define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT (1 << 12) /* * Following PERF_RECORD_MISC_* are used on different * events, so can reuse the same bit position: * * PERF_RECORD_MISC_MMAP_DATA - PERF_RECORD_MMAP* events * PERF_RECORD_MISC_COMM_EXEC - PERF_RECORD_COMM event * PERF_RECORD_MISC_FORK_EXEC - PERF_RECORD_FORK event (perf internal) * PERF_RECORD_MISC_SWITCH_OUT - PERF_RECORD_SWITCH* events */ #define PERF_RECORD_MISC_MMAP_DATA (1 << 13) #define PERF_RECORD_MISC_COMM_EXEC (1 << 13) #define PERF_RECORD_MISC_FORK_EXEC (1 << 13) #define PERF_RECORD_MISC_SWITCH_OUT (1 << 13) /* * These PERF_RECORD_MISC_* flags below are safely reused * for the following events: * * PERF_RECORD_MISC_EXACT_IP - PERF_RECORD_SAMPLE of precise events * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT - PERF_RECORD_SWITCH* events * * * PERF_RECORD_MISC_EXACT_IP: * Indicates that the content of PERF_SAMPLE_IP points to * the actual instruction that triggered the event. See also * perf_event_attr::precise_ip. * * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT: * Indicates that thread was preempted in TASK_RUNNING state. */ #define PERF_RECORD_MISC_EXACT_IP (1 << 14) #define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT (1 << 14) /* * Reserve the last bit to indicate some extended misc field */ #define PERF_RECORD_MISC_EXT_RESERVED (1 << 15) struct perf_event_header { __u32 type; __u16 misc; __u16 size; }; struct perf_ns_link_info { __u64 dev; __u64 ino; }; enum { NET_NS_INDEX = 0, UTS_NS_INDEX = 1, IPC_NS_INDEX = 2, PID_NS_INDEX = 3, USER_NS_INDEX = 4, MNT_NS_INDEX = 5, CGROUP_NS_INDEX = 6, NR_NAMESPACES, /* number of available namespaces */ }; enum perf_event_type { /* * If perf_event_attr.sample_id_all is set then all event types will * have the sample_type selected fields related to where/when * (identity) an event took place (TID, TIME, ID, STREAM_ID, CPU, * IDENTIFIER) described in PERF_RECORD_SAMPLE below, it will be stashed * just after the perf_event_header and the fields already present for * the existing fields, i.e. at the end of the payload. That way a newer * perf.data file will be supported by older perf tools, with these new * optional fields being ignored. * * struct sample_id { * { u32 pid, tid; } && PERF_SAMPLE_TID * { u64 time; } && PERF_SAMPLE_TIME * { u64 id; } && PERF_SAMPLE_ID * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID * { u32 cpu, res; } && PERF_SAMPLE_CPU * { u64 id; } && PERF_SAMPLE_IDENTIFIER * } && perf_event_attr::sample_id_all * * Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. The * advantage of PERF_SAMPLE_IDENTIFIER is that its position is fixed * relative to header.size. */ /* * The MMAP events record the PROT_EXEC mappings so that we can * correlate userspace IPs to code. They have the following structure: * * struct { * struct perf_event_header header; * * u32 pid, tid; * u64 addr; * u64 len; * u64 pgoff; * char filename[]; * struct sample_id sample_id; * }; */ PERF_RECORD_MMAP = 1, /* * struct { * struct perf_event_header header; * u64 id; * u64 lost; * struct sample_id sample_id; * }; */ PERF_RECORD_LOST = 2, /* * struct { * struct perf_event_header header; * * u32 pid, tid; * char comm[]; * struct sample_id sample_id; * }; */ PERF_RECORD_COMM = 3, /* * struct { * struct perf_event_header header; * u32 pid, ppid; * u32 tid, ptid; * u64 time; * struct sample_id sample_id; * }; */ PERF_RECORD_EXIT = 4, /* * struct { * struct perf_event_header header; * u64 time; * u64 id; * u64 stream_id; * struct sample_id sample_id; * }; */ PERF_RECORD_THROTTLE = 5, PERF_RECORD_UNTHROTTLE = 6, /* * struct { * struct perf_event_header header; * u32 pid, ppid; * u32 tid, ptid; * u64 time; * struct sample_id sample_id; * }; */ PERF_RECORD_FORK = 7, /* * struct { * struct perf_event_header header; * u32 pid, tid; * * struct read_format values; * struct sample_id sample_id; * }; */ PERF_RECORD_READ = 8, /* * struct { * struct perf_event_header header; * * # * # Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. * # The advantage of PERF_SAMPLE_IDENTIFIER is that its position * # is fixed relative to header. * # * * { u64 id; } && PERF_SAMPLE_IDENTIFIER * { u64 ip; } && PERF_SAMPLE_IP * { u32 pid, tid; } && PERF_SAMPLE_TID * { u64 time; } && PERF_SAMPLE_TIME * { u64 addr; } && PERF_SAMPLE_ADDR * { u64 id; } && PERF_SAMPLE_ID * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID * { u32 cpu, res; } && PERF_SAMPLE_CPU * { u64 period; } && PERF_SAMPLE_PERIOD * * { struct read_format values; } && PERF_SAMPLE_READ * * { u64 nr, * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN * * # * # The RAW record below is opaque data wrt the ABI * # * # That is, the ABI doesn't make any promises wrt to * # the stability of its content, it may vary depending * # on event, hardware, kernel version and phase of * # the moon. * # * # In other words, PERF_SAMPLE_RAW contents are not an ABI. * # * * { u32 size; * char data[size];}&& PERF_SAMPLE_RAW * * { u64 nr; * { u64 hw_idx; } && PERF_SAMPLE_BRANCH_HW_INDEX * { u64 from, to, flags } lbr[nr]; * } && PERF_SAMPLE_BRANCH_STACK * * { u64 abi; # enum perf_sample_regs_abi * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER * * { u64 size; * char data[size]; * u64 dyn_size; } && PERF_SAMPLE_STACK_USER * * { u64 weight; } && PERF_SAMPLE_WEIGHT * { u64 data_src; } && PERF_SAMPLE_DATA_SRC * { u64 transaction; } && PERF_SAMPLE_TRANSACTION * { u64 abi; # enum perf_sample_regs_abi * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR * { u64 phys_addr;} && PERF_SAMPLE_PHYS_ADDR * { u64 size; * char data[size]; } && PERF_SAMPLE_AUX * }; */ PERF_RECORD_SAMPLE = 9, /* * The MMAP2 records are an augmented version of MMAP, they add * maj, min, ino numbers to be used to uniquely identify each mapping * * struct { * struct perf_event_header header; * * u32 pid, tid; * u64 addr; * u64 len; * u64 pgoff; * u32 maj; * u32 min; * u64 ino; * u64 ino_generation; * u32 prot, flags; * char filename[]; * struct sample_id sample_id; * }; */ PERF_RECORD_MMAP2 = 10, /* * Records that new data landed in the AUX buffer part. * * struct { * struct perf_event_header header; * * u64 aux_offset; * u64 aux_size; * u64 flags; * struct sample_id sample_id; * }; */ PERF_RECORD_AUX = 11, /* * Indicates that instruction trace has started * * struct { * struct perf_event_header header; * u32 pid; * u32 tid; * struct sample_id sample_id; * }; */ PERF_RECORD_ITRACE_START = 12, /* * Records the dropped/lost sample number. * * struct { * struct perf_event_header header; * * u64 lost; * struct sample_id sample_id; * }; */ PERF_RECORD_LOST_SAMPLES = 13, /* * Records a context switch in or out (flagged by * PERF_RECORD_MISC_SWITCH_OUT). See also * PERF_RECORD_SWITCH_CPU_WIDE. * * struct { * struct perf_event_header header; * struct sample_id sample_id; * }; */ PERF_RECORD_SWITCH = 14, /* * CPU-wide version of PERF_RECORD_SWITCH with next_prev_pid and * next_prev_tid that are the next (switching out) or previous * (switching in) pid/tid. * * struct { * struct perf_event_header header; * u32 next_prev_pid; * u32 next_prev_tid; * struct sample_id sample_id; * }; */ PERF_RECORD_SWITCH_CPU_WIDE = 15, /* * struct { * struct perf_event_header header; * u32 pid; * u32 tid; * u64 nr_namespaces; * { u64 dev, inode; } [nr_namespaces]; * struct sample_id sample_id; * }; */ PERF_RECORD_NAMESPACES = 16, /* * Record ksymbol register/unregister events: * * struct { * struct perf_event_header header; * u64 addr; * u32 len; * u16 ksym_type; * u16 flags; * char name[]; * struct sample_id sample_id; * }; */ PERF_RECORD_KSYMBOL = 17, /* * Record bpf events: * enum perf_bpf_event_type { * PERF_BPF_EVENT_UNKNOWN = 0, * PERF_BPF_EVENT_PROG_LOAD = 1, * PERF_BPF_EVENT_PROG_UNLOAD = 2, * }; * * struct { * struct perf_event_header header; * u16 type; * u16 flags; * u32 id; * u8 tag[BPF_TAG_SIZE]; * struct sample_id sample_id; * }; */ PERF_RECORD_BPF_EVENT = 18, /* * struct { * struct perf_event_header header; * u64 id; * char path[]; * struct sample_id sample_id; * }; */ PERF_RECORD_CGROUP = 19, PERF_RECORD_MAX, /* non-ABI */ }; enum perf_record_ksymbol_type { PERF_RECORD_KSYMBOL_TYPE_UNKNOWN = 0, PERF_RECORD_KSYMBOL_TYPE_BPF = 1, PERF_RECORD_KSYMBOL_TYPE_MAX /* non-ABI */ }; #define PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER (1 << 0) enum perf_bpf_event_type { PERF_BPF_EVENT_UNKNOWN = 0, PERF_BPF_EVENT_PROG_LOAD = 1, PERF_BPF_EVENT_PROG_UNLOAD = 2, PERF_BPF_EVENT_MAX, /* non-ABI */ }; #define PERF_MAX_STACK_DEPTH 127 #define PERF_MAX_CONTEXTS_PER_STACK 8 enum perf_callchain_context { PERF_CONTEXT_HV = (__u64)-32, PERF_CONTEXT_KERNEL = (__u64)-128, PERF_CONTEXT_USER = (__u64)-512, PERF_CONTEXT_GUEST = (__u64)-2048, PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176, PERF_CONTEXT_GUEST_USER = (__u64)-2560, PERF_CONTEXT_MAX = (__u64)-4095, }; /** * PERF_RECORD_AUX::flags bits */ #define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */ #define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */ #define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */ #define PERF_AUX_FLAG_COLLISION 0x08 /* sample collided with another */ #define PERF_FLAG_FD_NO_GROUP (1UL << 0) #define PERF_FLAG_FD_OUTPUT (1UL << 1) #define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup id, per-cpu mode only */ #define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */ #if defined(__LITTLE_ENDIAN_BITFIELD) union perf_mem_data_src { __u64 val; struct { __u64 mem_op:5, /* type of opcode */ mem_lvl:14, /* memory hierarchy level */ mem_snoop:5, /* snoop mode */ mem_lock:2, /* lock instr */ mem_dtlb:7, /* tlb access */ mem_lvl_num:4, /* memory hierarchy level number */ mem_remote:1, /* remote */ mem_snoopx:2, /* snoop mode, ext */ mem_rsvd:24; }; }; #elif defined(__BIG_ENDIAN_BITFIELD) union perf_mem_data_src { __u64 val; struct { __u64 mem_rsvd:24, mem_snoopx:2, /* snoop mode, ext */ mem_remote:1, /* remote */ mem_lvl_num:4, /* memory hierarchy level number */ mem_dtlb:7, /* tlb access */ mem_lock:2, /* lock instr */ mem_snoop:5, /* snoop mode */ mem_lvl:14, /* memory hierarchy level */ mem_op:5; /* type of opcode */ }; }; #else #error "Unknown endianness" #endif /* type of opcode (load/store/prefetch,code) */ #define PERF_MEM_OP_NA 0x01 /* not available */ #define PERF_MEM_OP_LOAD 0x02 /* load instruction */ #define PERF_MEM_OP_STORE 0x04 /* store instruction */ #define PERF_MEM_OP_PFETCH 0x08 /* prefetch */ #define PERF_MEM_OP_EXEC 0x10 /* code (execution) */ #define PERF_MEM_OP_SHIFT 0 /* memory hierarchy (memory level, hit or miss) */ #define PERF_MEM_LVL_NA 0x01 /* not available */ #define PERF_MEM_LVL_HIT 0x02 /* hit level */ #define PERF_MEM_LVL_MISS 0x04 /* miss level */ #define PERF_MEM_LVL_L1 0x08 /* L1 */ #define PERF_MEM_LVL_LFB 0x10 /* Line Fill Buffer */ #define PERF_MEM_LVL_L2 0x20 /* L2 */ #define PERF_MEM_LVL_L3 0x40 /* L3 */ #define PERF_MEM_LVL_LOC_RAM 0x80 /* Local DRAM */ #define PERF_MEM_LVL_REM_RAM1 0x100 /* Remote DRAM (1 hop) */ #define PERF_MEM_LVL_REM_RAM2 0x200 /* Remote DRAM (2 hops) */ #define PERF_MEM_LVL_REM_CCE1 0x400 /* Remote Cache (1 hop) */ #define PERF_MEM_LVL_REM_CCE2 0x800 /* Remote Cache (2 hops) */ #define PERF_MEM_LVL_IO 0x1000 /* I/O memory */ #define PERF_MEM_LVL_UNC 0x2000 /* Uncached memory */ #define PERF_MEM_LVL_SHIFT 5 #define PERF_MEM_REMOTE_REMOTE 0x01 /* Remote */ #define PERF_MEM_REMOTE_SHIFT 37 #define PERF_MEM_LVLNUM_L1 0x01 /* L1 */ #define PERF_MEM_LVLNUM_L2 0x02 /* L2 */ #define PERF_MEM_LVLNUM_L3 0x03 /* L3 */ #define PERF_MEM_LVLNUM_L4 0x04 /* L4 */ /* 5-0xa available */ #define PERF_MEM_LVLNUM_ANY_CACHE 0x0b /* Any cache */ #define PERF_MEM_LVLNUM_LFB 0x0c /* LFB */ #define PERF_MEM_LVLNUM_RAM 0x0d /* RAM */ #define PERF_MEM_LVLNUM_PMEM 0x0e /* PMEM */ #define PERF_MEM_LVLNUM_NA 0x0f /* N/A */ #define PERF_MEM_LVLNUM_SHIFT 33 /* snoop mode */ #define PERF_MEM_SNOOP_NA 0x01 /* not available */ #define PERF_MEM_SNOOP_NONE 0x02 /* no snoop */ #define PERF_MEM_SNOOP_HIT 0x04 /* snoop hit */ #define PERF_MEM_SNOOP_MISS 0x08 /* snoop miss */ #define PERF_MEM_SNOOP_HITM 0x10 /* snoop hit modified */ #define PERF_MEM_SNOOP_SHIFT 19 #define PERF_MEM_SNOOPX_FWD 0x01 /* forward */ /* 1 free */ #define PERF_MEM_SNOOPX_SHIFT 37 /* locked instruction */ #define PERF_MEM_LOCK_NA 0x01 /* not available */ #define PERF_MEM_LOCK_LOCKED 0x02 /* locked transaction */ #define PERF_MEM_LOCK_SHIFT 24 /* TLB access */ #define PERF_MEM_TLB_NA 0x01 /* not available */ #define PERF_MEM_TLB_HIT 0x02 /* hit level */ #define PERF_MEM_TLB_MISS 0x04 /* miss level */ #define PERF_MEM_TLB_L1 0x08 /* L1 */ #define PERF_MEM_TLB_L2 0x10 /* L2 */ #define PERF_MEM_TLB_WK 0x20 /* Hardware Walker*/ #define PERF_MEM_TLB_OS 0x40 /* OS fault handler */ #define PERF_MEM_TLB_SHIFT 26 #define PERF_MEM_S(a, s) \ (((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT) /* * single taken branch record layout: * * from: source instruction (may not always be a branch insn) * to: branch target * mispred: branch target was mispredicted * predicted: branch target was predicted * * support for mispred, predicted is optional. In case it * is not supported mispred = predicted = 0. * * in_tx: running in a hardware transaction * abort: aborting a hardware transaction * cycles: cycles from last branch (or 0 if not supported) * type: branch type */ struct perf_branch_entry { __u64 from; __u64 to; __u64 mispred:1, /* target mispredicted */ predicted:1,/* target predicted */ in_tx:1, /* in transaction */ abort:1, /* transaction abort */ cycles:16, /* cycle count to last branch */ type:4, /* branch type */ reserved:40; }; #endif /* _UAPI_LINUX_PERF_EVENT_H */ gobpf-0.2.0/elf/kernel_version.go000066400000000000000000000067001404447410300167340ustar00rootroot00000000000000// +build linux // Copyright 2016-2017 Kinvolk // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package elf import ( "fmt" "io/ioutil" "regexp" "strconv" "strings" "syscall" ) var versionRegex = regexp.MustCompile(`^(\d+)\.(\d+).(\d+).*$`) // KernelVersionFromReleaseString converts a release string with format // 4.4.2[-1] to a kernel version number in LINUX_VERSION_CODE format. // That is, for kernel "a.b.c", the version number will be (a<<16 + b<<8 + c) func KernelVersionFromReleaseString(releaseString string) (uint32, error) { versionParts := versionRegex.FindStringSubmatch(releaseString) if len(versionParts) != 4 { return 0, fmt.Errorf("got invalid release version %q (expected format '4.3.2-1')", releaseString) } major, err := strconv.Atoi(versionParts[1]) if err != nil { return 0, err } minor, err := strconv.Atoi(versionParts[2]) if err != nil { return 0, err } patch, err := strconv.Atoi(versionParts[3]) if err != nil { return 0, err } out := major*256*256 + minor*256 + patch return uint32(out), nil } func currentVersionUname() (uint32, error) { var buf syscall.Utsname if err := syscall.Uname(&buf); err != nil { return 0, err } releaseString := strings.Trim(utsnameStr(buf.Release[:]), "\x00") return KernelVersionFromReleaseString(releaseString) } func currentVersionUbuntu() (uint32, error) { procVersion, err := ioutil.ReadFile("/proc/version_signature") if err != nil { return 0, err } var u1, u2, releaseString string _, err = fmt.Sscanf(string(procVersion), "%s %s %s", &u1, &u2, &releaseString) if err != nil { return 0, err } return KernelVersionFromReleaseString(releaseString) } var debianVersionRegex = regexp.MustCompile(`.* SMP Debian (\d+\.\d+.\d+-\d+)(?:\+[[:alnum:]]*)?.*`) func parseDebianVersion(str string) (uint32, error) { match := debianVersionRegex.FindStringSubmatch(str) if len(match) != 2 { return 0, fmt.Errorf("failed to parse kernel version from /proc/version: %s", str) } return KernelVersionFromReleaseString(match[1]) } func currentVersionDebian() (uint32, error) { procVersion, err := ioutil.ReadFile("/proc/version") if err != nil { return 0, fmt.Errorf("error reading /proc/version: %s", err) } return parseDebianVersion(string(procVersion)) } // CurrentKernelVersion returns the current kernel version in // LINUX_VERSION_CODE format (see KernelVersionFromReleaseString()) func CurrentKernelVersion() (uint32, error) { // We need extra checks for Debian and Ubuntu as they modify // the kernel version patch number for compatibilty with // out-of-tree modules. Linux perf tools do the same for Ubuntu // systems: https://github.com/torvalds/linux/commit/d18acd15c // // See also: // https://kernel-handbook.alioth.debian.org/ch-versions.html // https://wiki.ubuntu.com/Kernel/FAQ version, err := currentVersionUbuntu() if err == nil { return version, nil } version, err = currentVersionDebian() if err == nil { return version, nil } return currentVersionUname() } gobpf-0.2.0/elf/kernel_version_test.go000066400000000000000000000052571404447410300200010ustar00rootroot00000000000000// +build linux // Copyright 2017 Kinvolk // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package elf import ( "testing" ) var testData = []struct { succeed bool releaseString string kernelVersion uint32 }{ {true, "4.1.2-3", 262402}, {true, "4.8.14-200.fc24.x86_64", 264206}, {true, "4.1.2-3foo", 262402}, {true, "4.1.2foo-1", 262402}, {true, "4.1.2-rkt-v1", 262402}, {true, "4.1.2rkt-v1", 262402}, {true, "4.1.2-3 foo", 262402}, {false, "foo 4.1.2-3", 0}, {true, "4.1.2", 262402}, {false, ".4.1.2", 0}, {false, "4.1.", 0}, {false, "4.1", 0}, } func TestKernelVersionFromReleaseString(t *testing.T) { for _, test := range testData { version, err := KernelVersionFromReleaseString(test.releaseString) if err != nil && test.succeed { t.Errorf("expected %q to succeed: %s", test.releaseString, err) } else if err == nil && !test.succeed { t.Errorf("expected %q to fail", test.releaseString) } if version != test.kernelVersion { t.Errorf("expected kernel version %d, got %d", test.kernelVersion, version) } } } func TestParseDebianVersion(t *testing.T) { for _, tc := range []struct { succeed bool releaseString string kernelVersion uint32 }{ // 4.9.168 {true, "Linux version 4.9.0-9-amd64 (debian-kernel@lists.debian.org) (gcc version 6.3.0 20170516 (Debian 6.3.0-18+deb9u1) ) #1 SMP Debian 4.9.168-1+deb9u3 (2019-06-16)", 264616}, // 4.9.88 {true, "Linux ip-10-0-75-49 4.9.0-6-amd64 #1 SMP Debian 4.9.88-1+deb9u1 (2018-05-07) x86_64 GNU/Linux", 264536}, // 3.0.4 {true, "Linux version 3.16.0-9-amd64 (debian-kernel@lists.debian.org) (gcc version 4.9.2 (Debian 4.9.2-10+deb8u2) ) #1 SMP Debian 3.16.68-1 (2019-05-22)", 200772}, // Invalid {false, "Linux version 4.9.125-linuxkit (root@659b6d51c354) (gcc version 6.4.0 (Alpine 6.4.0) ) #1 SMP Fri Sep 7 08:20:28 UTC 2018", 0}, } { version, err := parseDebianVersion(tc.releaseString) if err != nil && tc.succeed { t.Errorf("expected %q to succeed: %s", tc.releaseString, err) } else if err == nil && !tc.succeed { t.Errorf("expected %q to fail", tc.releaseString) } if version != tc.kernelVersion { t.Errorf("expected kernel version %d, got %d", tc.kernelVersion, version) } } } gobpf-0.2.0/elf/module.go000066400000000000000000000572721404447410300152060ustar00rootroot00000000000000// +build linux // Copyright 2016 Cilium Project // Copyright 2016 Sylvain Afchain // Copyright 2016 Kinvolk // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package elf import ( "debug/elf" "errors" "fmt" "io" "io/ioutil" "os" "regexp" "strconv" "strings" "syscall" "unsafe" ) /* #cgo CFLAGS: -I${SRCDIR}/include/uapi -I${SRCDIR}/include #include #include #include #include #include #include #include "libbpf.h" #include #include #include #include #include #include #include #include #include #include static int perf_event_open_tracepoint(int tracepoint_id, int pid, int cpu, int group_fd, unsigned long flags) { struct perf_event_attr attr = {0,}; attr.type = PERF_TYPE_TRACEPOINT; attr.sample_type = PERF_SAMPLE_RAW; attr.sample_period = 1; attr.wakeup_events = 1; attr.config = tracepoint_id; return syscall(__NR_perf_event_open, &attr, pid, cpu, group_fd, flags); } int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type) { union bpf_attr attr; memset(&attr, 0, sizeof(attr)); attr.target_fd = target_fd; attr.attach_bpf_fd = prog_fd; attr.attach_type = type; return syscall(__NR_bpf, BPF_PROG_ATTACH, &attr, sizeof(attr)); } int bpf_prog_detach(int prog_fd, int target_fd, enum bpf_attach_type type) { union bpf_attr attr; memset(&attr, 0, sizeof(attr)); attr.target_fd = target_fd; attr.attach_bpf_fd = prog_fd; attr.attach_type = type; return syscall(__NR_bpf, BPF_PROG_DETACH, &attr, sizeof(attr)); } int bpf_attach_socket(int sock, int fd) { return setsockopt(sock, SOL_SOCKET, SO_ATTACH_BPF, &fd, sizeof(fd)); } int bpf_detach_socket(int sock, int fd) { return setsockopt(sock, SOL_SOCKET, SO_DETACH_BPF, &fd, sizeof(fd)); } int bpf_attach_xdp(const char *dev_name, int progfd, uint32_t flags) { int ifindex = if_nametoindex(dev_name); char err_buf[256]; int ret = -1; if (ifindex == 0) { fprintf(stderr, "bpf: Resolving device name to index: %s\n", strerror(errno)); return -1; } ret = bpf_set_link_xdp_fd(ifindex, progfd, flags); if (ret) { fprintf(stderr, "bpf: Attaching prog to %s: %s", dev_name, err_buf); return -1; } return 0; } */ import "C" type Module struct { fileName string fileReader io.ReaderAt file *elf.File log []byte maps map[string]*Map probes map[string]*Kprobe uprobes map[string]*Uprobe cgroupPrograms map[string]*CgroupProgram socketFilters map[string]*SocketFilter tracepointPrograms map[string]*TracepointProgram schedPrograms map[string]*SchedProgram xdpPrograms map[string]*XDPProgram compatProbe bool // try to be automatically convert function names depending on kernel versions (SyS_ and __x64_sys_) } // Kprobe represents a kprobe or kretprobe and has to be declared // in the C file, type Kprobe struct { Name string insns *C.struct_bpf_insn fd int efd int } type Uprobe struct { Name string insns *C.struct_bpf_insn fd int efds map[string]int } type AttachType int const ( IngressType AttachType = iota EgressType SockCreateType ) const defaultLogSize uint32 = 524288 // CgroupProgram represents a cgroup skb/sock program type CgroupProgram struct { Name string insns *C.struct_bpf_insn fd int } // SocketFilter represents a socket filter type SocketFilter struct { Name string insns *C.struct_bpf_insn fd int } // TracepointProgram represents a tracepoint program type TracepointProgram struct { Name string insns *C.struct_bpf_insn fd int efd int } // SchedProgram represents a traffic classifier program type SchedProgram struct { Name string insns *C.struct_bpf_insn fd int } // XDPProgram represents a XDP hook program type XDPProgram struct { Name string insns *C.struct_bpf_insn fd int } func newModule(logSize uint32) *Module { return &Module{ probes: make(map[string]*Kprobe), uprobes: make(map[string]*Uprobe), cgroupPrograms: make(map[string]*CgroupProgram), socketFilters: make(map[string]*SocketFilter), tracepointPrograms: make(map[string]*TracepointProgram), schedPrograms: make(map[string]*SchedProgram), xdpPrograms: make(map[string]*XDPProgram), log: make([]byte, logSize), } } func NewModuleWithLog(fileName string, logSize uint32) *Module { module := newModule(logSize) module.fileName = fileName return module } func NewModuleFromReaderWithLog(fileReader io.ReaderAt, logSize uint32) *Module { module := newModule(logSize) module.fileReader = fileReader return module } func NewModule(fileName string) *Module { module := newModule(defaultLogSize) module.fileName = fileName return module } func NewModuleFromReader(fileReader io.ReaderAt) *Module { module := newModule(defaultLogSize) module.fileReader = fileReader return module } var kprobeIDNotExist error = errors.New("kprobe id file doesn't exist") func writeKprobeEvent(probeType, eventName, funcName, maxactiveStr string) (int, error) { kprobeEventsFileName := "/sys/kernel/debug/tracing/kprobe_events" f, err := os.OpenFile(kprobeEventsFileName, os.O_APPEND|os.O_WRONLY, 0666) if err != nil { return -1, fmt.Errorf("cannot open kprobe_events: %v", err) } defer f.Close() cmd := fmt.Sprintf("%s%s:%s %s\n", probeType, maxactiveStr, eventName, funcName) if _, err = f.WriteString(cmd); err != nil { return -1, fmt.Errorf("cannot write %q to kprobe_events: %v", cmd, err) } kprobeIdFile := fmt.Sprintf("/sys/kernel/debug/tracing/events/kprobes/%s/id", eventName) kprobeIdBytes, err := ioutil.ReadFile(kprobeIdFile) if err != nil { if os.IsNotExist(err) { return -1, kprobeIDNotExist } return -1, fmt.Errorf("cannot read kprobe id: %v", err) } kprobeId, err := strconv.Atoi(strings.TrimSpace(string(kprobeIdBytes))) if err != nil { return -1, fmt.Errorf("invalid kprobe id: %v", err) } return kprobeId, nil } func writeUprobeEvent(probeType, eventName, path string, offset uint64) (int, error) { uprobeEventsFileName := "/sys/kernel/debug/tracing/uprobe_events" f, err := os.OpenFile(uprobeEventsFileName, os.O_APPEND|os.O_WRONLY, 0666) if err != nil { return -1, fmt.Errorf("cannot open uprobe_events: %v", err) } defer f.Close() cmd := fmt.Sprintf("%s:%s %s:%#x\n", probeType, eventName, path, offset) if _, err = f.WriteString(cmd); err != nil { return -1, fmt.Errorf("cannot write %q to uprobe_events: %v", cmd, err) } uprobeIdFile := fmt.Sprintf("/sys/kernel/debug/tracing/events/uprobes/%s/id", eventName) uprobeIdBytes, err := ioutil.ReadFile(uprobeIdFile) if err != nil { return -1, fmt.Errorf("cannot read uprobe id: %v", err) } uprobeId, err := strconv.Atoi(strings.TrimSpace(string(uprobeIdBytes))) if err != nil { return -1, fmt.Errorf("invalid uprobe id: %v", err) } return uprobeId, nil } func perfEventOpenTracepoint(id int, progFd int) (int, error) { efd, err := C.perf_event_open_tracepoint(C.int(id), -1 /* pid */, 0 /* cpu */, -1 /* group_fd */, C.PERF_FLAG_FD_CLOEXEC) if efd < 0 { return -1, fmt.Errorf("perf_event_open error: %v", err) } if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(efd), C.PERF_EVENT_IOC_ENABLE, 0); err != 0 { return -1, fmt.Errorf("error enabling perf event: %v", err) } if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(efd), C.PERF_EVENT_IOC_SET_BPF, uintptr(progFd)); err != 0 { return -1, fmt.Errorf("error attaching bpf program to perf event: %v", err) } return int(efd), nil } // Log gives users access to the log buffer with verifier messages func (b *Module) Log() []byte { return b.log } // EnableOptionCompatProbe will attempt to automatically convert function // names in kprobe and kretprobe to maintain compatibility between kernel // versions. // See: https://github.com/iovisor/gobpf/issues/146 func (b *Module) EnableOptionCompatProbe() { b.compatProbe = true } // EnableKprobe enables a kprobe/kretprobe identified by secName. // For kretprobes, you can configure the maximum number of instances // of the function that can be probed simultaneously with maxactive. // If maxactive is 0 it will be set to the default value: if CONFIG_PREEMPT is // enabled, this is max(10, 2*NR_CPUS); otherwise, it is NR_CPUS. // For kprobes, maxactive is ignored. func (b *Module) EnableKprobe(secName string, maxactive int) error { var probeType, funcName string isKretprobe := strings.HasPrefix(secName, "kretprobe/") probe, ok := b.probes[secName] if !ok { return fmt.Errorf("no such kprobe %q", secName) } progFd := probe.fd var maxactiveStr string if isKretprobe { probeType = "r" funcName = strings.TrimPrefix(secName, "kretprobe/") if maxactive > 0 { maxactiveStr = fmt.Sprintf("%d", maxactive) } } else { probeType = "p" funcName = strings.TrimPrefix(secName, "kprobe/") } eventName := probeType + funcName kprobeId, err := writeKprobeEvent(probeType, eventName, funcName, maxactiveStr) // fallback without maxactive if err == kprobeIDNotExist { kprobeId, err = writeKprobeEvent(probeType, eventName, funcName, "") } if err != nil { return err } probe.efd, err = perfEventOpenTracepoint(kprobeId, progFd) return err } func writeTracepointEvent(category, name string) (int, error) { tracepointIdFile := fmt.Sprintf("/sys/kernel/debug/tracing/events/%s/%s/id", category, name) tracepointIdBytes, err := ioutil.ReadFile(tracepointIdFile) if err != nil { return -1, fmt.Errorf("cannot read tracepoint id %q: %v", tracepointIdFile, err) } tracepointId, err := strconv.Atoi(strings.TrimSpace(string(tracepointIdBytes))) if err != nil { return -1, fmt.Errorf("invalid tracepoint id: %v\n", err) } return tracepointId, nil } func (b *Module) EnableTracepoint(secName string) error { prog, ok := b.tracepointPrograms[secName] if !ok { return fmt.Errorf("no such tracepoint program %q", secName) } progFd := prog.fd tracepointGroup := strings.SplitN(secName, "/", 3) if len(tracepointGroup) != 3 { return fmt.Errorf("invalid section name %q, expected tracepoint/category/name", secName) } category := tracepointGroup[1] name := tracepointGroup[2] tracepointId, err := writeTracepointEvent(category, name) if err != nil { return err } prog.efd, err = perfEventOpenTracepoint(tracepointId, progFd) return err } // IterKprobes returns a channel that emits the kprobes that included in the // module. func (b *Module) IterKprobes() <-chan *Kprobe { ch := make(chan *Kprobe) go func() { for name := range b.probes { ch <- b.probes[name] } close(ch) }() return ch } // EnableKprobes enables all kprobes/kretprobes included in the module. The // value in maxactive will be applied to all the kretprobes. func (b *Module) EnableKprobes(maxactive int) error { var err error for _, kprobe := range b.probes { err = b.EnableKprobe(kprobe.Name, maxactive) if err != nil { return err } } return nil } // IterUprobes returns a channel that emits the uprobes included in the module. func (b *Module) IterUprobes() <-chan *Uprobe { ch := make(chan *Uprobe) go func() { for name := range b.uprobes { ch <- b.uprobes[name] } close(ch) }() return ch } func (b *Module) IterCgroupProgram() <-chan *CgroupProgram { ch := make(chan *CgroupProgram) go func() { for name := range b.cgroupPrograms { ch <- b.cgroupPrograms[name] } close(ch) }() return ch } func (b *Module) IterTracepointProgram() <-chan *TracepointProgram { ch := make(chan *TracepointProgram) go func() { for name := range b.tracepointPrograms { ch <- b.tracepointPrograms[name] } close(ch) }() return ch } func (b *Module) IterXDPProgram() <-chan *XDPProgram { ch := make(chan *XDPProgram) go func() { for name := range b.xdpPrograms { ch <- b.xdpPrograms[name] } close(ch) }() return ch } func (b *Module) CgroupProgram(name string) *CgroupProgram { return b.cgroupPrograms[name] } func (p *CgroupProgram) Fd() int { return p.fd } func (tp *TracepointProgram) Fd() int { return tp.fd } var safeEventRegexp = regexp.MustCompile("[^a-zA-Z0-9]") func safeEventName(event string) string { return safeEventRegexp.ReplaceAllString(event, "_") } // AttachUprobe attaches the uprobe's BPF script to the program or library // at the given path and offset. func AttachUprobe(uprobe *Uprobe, path string, offset uint64) error { var probeType string if strings.HasPrefix(uprobe.Name, "uretprobe/") { probeType = "r" } else { probeType = "p" } eventName := fmt.Sprintf("%s__%s_%x_gobpf_%d", probeType, safeEventName(path), offset, os.Getpid()) if _, ok := uprobe.efds[eventName]; ok { return errors.New("uprobe already attached") } uprobeID, err := writeUprobeEvent(probeType, eventName, path, offset) if err != nil { return err } efd, err := perfEventOpenTracepoint(uprobeID, uprobe.fd) if err != nil { return err } uprobe.efds[eventName] = efd return nil } func (b *Module) AttachXDP(devName string, secName string) error { xdp, ok := b.xdpPrograms[secName] if !ok { return fmt.Errorf("no such XDP hook %q", secName) } if err := attachXDP(devName, xdp.fd, 0, true); err != nil { return err } return nil } // AttachXDPWithFlags attaches an xdp section to a device with flags. func (b *Module) AttachXDPWithFlags(devName string, secName string, flags uint32) error { xdp, ok := b.xdpPrograms[secName] if !ok { return fmt.Errorf("no such XDP hook %q", secName) } return attachXDP(devName, xdp.fd, flags, true) } func (b *Module) RemoveXDP(devName string) error { if err := attachXDP(devName, -1, 0, false); err != nil { return err } return nil } func attachXDP(devName string, fd int, flags uint32, attach bool) error { devNameCS := C.CString(devName) res, err := C.bpf_attach_xdp(devNameCS, C.int(fd), C.uint32_t(flags)) defer C.free(unsafe.Pointer(devNameCS)) if res != 0 || err != nil { return fmt.Errorf(xdpFormat(attach), devName, err) } return nil } func xdpFormat(attach bool) string { if attach { return "failed to attach BPF xdp to device %s: %v" } return "failed to remove BPF xdp from device %s: %v" } func AttachCgroupProgram(cgroupProg *CgroupProgram, cgroupPath string, attachType AttachType) error { return AttachCgroupProgramFromFd(cgroupProg.fd, cgroupPath, attachType) } func AttachCgroupProgramFromFd(progFd int, cgroupPath string, attachType AttachType) error { f, err := os.Open(cgroupPath) if err != nil { return fmt.Errorf("error opening cgroup %q: %v", cgroupPath, err) } defer f.Close() ret, err := C.bpf_prog_attach(C.int(progFd), C.int(f.Fd()), uint32(attachType)) if ret < 0 { return fmt.Errorf("failed to attach prog to cgroup %q: %v", cgroupPath, err) } return nil } func DetachCgroupProgram(cgroupProg *CgroupProgram, cgroupPath string, attachType AttachType) error { f, err := os.Open(cgroupPath) if err != nil { return fmt.Errorf("error opening cgroup %q: %v", cgroupPath, err) } defer f.Close() progFd := C.int(cgroupProg.fd) cgroupFd := C.int(f.Fd()) ret, err := C.bpf_prog_detach(progFd, cgroupFd, uint32(attachType)) if ret < 0 { return fmt.Errorf("failed to detach prog from cgroup %q: %v", cgroupPath, err) } return nil } func (b *Module) IterSocketFilter() <-chan *SocketFilter { ch := make(chan *SocketFilter) go func() { for name := range b.socketFilters { ch <- b.socketFilters[name] } close(ch) }() return ch } func (b *Module) SocketFilter(name string) *SocketFilter { return b.socketFilters[name] } func AttachSocketFilter(socketFilter *SocketFilter, sockFd int) error { ret, err := C.bpf_attach_socket(C.int(sockFd), C.int(socketFilter.fd)) if ret != 0 { return fmt.Errorf("error attaching BPF socket filter: %v", err) } return nil } func (sf *SocketFilter) Fd() int { return sf.fd } func DetachSocketFilter(socketFilter *SocketFilter, sockFd int) error { ret, err := C.bpf_detach_socket(C.int(sockFd), C.int(socketFilter.fd)) if ret != 0 { return fmt.Errorf("error detaching BPF socket filter: %v", err) } return nil } func (b *Module) Kprobe(name string) *Kprobe { return b.probes[name] } func (kp *Kprobe) Fd() int { return kp.fd } func disableKprobe(eventName string) error { kprobeEventsFileName := "/sys/kernel/debug/tracing/kprobe_events" f, err := os.OpenFile(kprobeEventsFileName, os.O_APPEND|os.O_WRONLY, 0) if err != nil { return fmt.Errorf("cannot open kprobe_events: %v", err) } defer f.Close() cmd := fmt.Sprintf("-:%s\n", eventName) if _, err = f.WriteString(cmd); err != nil { pathErr, ok := err.(*os.PathError) if ok && pathErr.Err == syscall.ENOENT { // This can happen when for example two modules // use the same elf object and both call `Close()`. // The second will encounter the error as the // probe already has been cleared by the first. return nil } else { return fmt.Errorf("cannot write %q to kprobe_events: %v", cmd, err) } } return nil } func disableUprobe(eventName string) error { uprobeEventsFileName := "/sys/kernel/debug/tracing/uprobe_events" f, err := os.OpenFile(uprobeEventsFileName, os.O_APPEND|os.O_WRONLY, 0) if err != nil { return fmt.Errorf("cannot open uprobe_events: %v", err) } defer f.Close() cmd := fmt.Sprintf("-:%s\n", eventName) if _, err = f.WriteString(cmd); err != nil { return fmt.Errorf("cannot write %q to uprobe_events: %v", cmd, err) } return nil } func (b *Module) Uprobe(name string) *Uprobe { return b.uprobes[name] } func (up *Uprobe) Fd() int { return up.fd } // IterSchedProgram returns a channel that emits the sched programs included in the // module. func (b *Module) IterSchedProgram() <-chan *SchedProgram { ch := make(chan *SchedProgram) go func() { for name := range b.schedPrograms { ch <- b.schedPrograms[name] } close(ch) }() return ch } func (b *Module) SchedProgram(name string) *SchedProgram { return b.schedPrograms[name] } func (sp *SchedProgram) Fd() int { return sp.fd } func (b *Module) XDPProgram(name string) *XDPProgram { return b.xdpPrograms[name] } func (xdpp *XDPProgram) Fd() int { return xdpp.fd } func (b *Module) closeProbes() error { var funcName string for _, probe := range b.probes { if probe.efd != -1 { if err := syscall.Close(probe.efd); err != nil { return fmt.Errorf("error closing perf event fd: %v", err) } probe.efd = -1 } if err := syscall.Close(probe.fd); err != nil { return fmt.Errorf("error closing probe fd: %v", err) } name := probe.Name isKretprobe := strings.HasPrefix(name, "kretprobe/") var err error if isKretprobe { funcName = strings.TrimPrefix(name, "kretprobe/") err = disableKprobe("r" + funcName) } else { funcName = strings.TrimPrefix(name, "kprobe/") err = disableKprobe("p" + funcName) } if err != nil { return fmt.Errorf("error clearing probe: %v", err) } } return nil } func (b *Module) closeUprobes() error { for _, probe := range b.uprobes { for eventName, efd := range probe.efds { if err := syscall.Close(efd); err != nil { return fmt.Errorf("error closing uprobe's event fd: %v", err) } if err := disableUprobe(eventName); err != nil { return fmt.Errorf("error clearing probe: %v", err) } } if err := syscall.Close(probe.fd); err != nil { return fmt.Errorf("error closing uprobe fd: %v", err) } } return nil } func (b *Module) closeTracepointPrograms() error { for _, program := range b.tracepointPrograms { if program.efd != -1 { if err := syscall.Close(program.efd); err != nil { return fmt.Errorf("error closing perf event fd: %v", err) } program.efd = -1 } if err := syscall.Close(program.fd); err != nil { return fmt.Errorf("error closing tracepoint program fd: %v", err) } } return nil } func (b *Module) closeCgroupPrograms() error { for _, program := range b.cgroupPrograms { if err := syscall.Close(program.fd); err != nil { return fmt.Errorf("error closing cgroup program fd: %v", err) } } return nil } func (b *Module) closeSocketFilters() error { for _, filter := range b.socketFilters { if err := syscall.Close(filter.fd); err != nil { return fmt.Errorf("error closing socket filter fd: %v", err) } } return nil } func (b *Module) closeXDPPrograms() error { for _, xdp := range b.xdpPrograms { if err := syscall.Close(xdp.fd); err != nil { return fmt.Errorf("error closing XDP program fd: %v", err) } } return nil } func unpinMap(m *Map, pinPath string) error { mapPath, err := getMapPath(&m.m.def, m.Name, pinPath) if err != nil { return err } return syscall.Unlink(mapPath) } func (b *Module) closeMaps(options map[string]CloseOptions) error { for _, m := range b.maps { doUnpin := options[fmt.Sprintf("maps/%s", m.Name)].Unpin if doUnpin { mapDef := m.m.def var pinPath string if mapDef.pinning == PIN_CUSTOM_NS { closeOption, ok := options[fmt.Sprintf("maps/%s", m.Name)] if !ok { return fmt.Errorf("close option for maps/%s must have PinPath set", m.Name) } pinPath = closeOption.PinPath } else if mapDef.pinning == PIN_GLOBAL_NS { // mapDef.namespace is used for PIN_GLOBAL_NS maps pinPath = "" } else if mapDef.pinning == PIN_OBJECT_NS { return fmt.Errorf("unpinning with PIN_OBJECT_NS is to be implemented") } if err := unpinMap(m, pinPath); err != nil { return fmt.Errorf("error unpinning map %q: %v", m.Name, err) } } // unmap for _, base := range m.bases { err := syscall.Munmap(base) if err != nil { return fmt.Errorf("unmap error: %v", err) } } for _, fd := range m.pmuFDs { // disable _, _, err2 := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), C.PERF_EVENT_IOC_DISABLE, 0) if err2 != 0 { return fmt.Errorf("error disabling perf event: %v", err2) } // close if err := syscall.Close(int(fd)); err != nil { return fmt.Errorf("error closing perf event fd: %v", err) } } if err := syscall.Close(int(m.m.fd)); err != nil { return fmt.Errorf("error closing map fd: %v", err) } C.free(unsafe.Pointer(m.m)) } return nil } // CloseOptions can be used for custom `Close` parameters type CloseOptions struct { // Set Unpin to true to close pinned maps as well Unpin bool PinPath string } // Close takes care of terminating all underlying BPF programs and structures. // That is: // // * Closing map file descriptors and unpinning them where applicable // * Detaching BPF programs from kprobes and closing their file descriptors // * Closing cgroup-bpf file descriptors // * Closing socket filter file descriptors // * Closing XDP file descriptors // // It doesn't detach BPF programs from cgroups or sockets because they're // considered resources the user controls. // It also doesn't unpin pinned maps. Use CloseExt and set Unpin to do this. func (b *Module) Close() error { return b.CloseExt(nil) } // CloseExt takes a map "elf section -> CloseOptions" func (b *Module) CloseExt(options map[string]CloseOptions) error { if err := b.closeMaps(options); err != nil { return err } if err := b.closeProbes(); err != nil { return err } if err := b.closeUprobes(); err != nil { return err } if err := b.closeCgroupPrograms(); err != nil { return err } if err := b.closeTracepointPrograms(); err != nil { return err } if err := b.closeSocketFilters(); err != nil { return err } if err := b.closeXDPPrograms(); err != nil { return err } return nil } gobpf-0.2.0/elf/module_unsupported.go000066400000000000000000000041271404447410300176450ustar00rootroot00000000000000// +build !linux package elf import ( "io" "unsafe" ) type Module struct{} type Kprobe struct{} type CgroupProgram struct{} type AttachType struct{} type CloseOptions struct{} type SocketFilter struct{} type TracepointProgram struct{} type SchedProgram struct{} func NewModule(fileName string) *Module { return nil } func NewModuleFromReader(fileReader io.ReaderAt) *Module { return nil } func (b *Module) EnableKprobe(secName string, maxactive int) error { return errNotSupported } func (b *Module) IterKprobes() <-chan *Kprobe { return nil } func (b *Module) EnableKprobes(maxactive int) error { return errNotSupported } func (b *Module) IterCgroupProgram() <-chan *CgroupProgram { return nil } func (b *Module) CgroupProgram(name string) *CgroupProgram { return nil } func (b *Module) Kprobe(name string) *Kprobe { return nil } func (b *Module) AttachProgram(cgroupProg *CgroupProgram, cgroupPath string, attachType AttachType) error { return errNotSupported } func (b *Module) Close() error { return errNotSupported } func (b *Module) CloseExt(options map[string]CloseOptions) error { return errNotSupported } func (b *Module) DeleteElement(mp *Map, key unsafe.Pointer) error { return errNotSupported } func (b *Module) EnableTracepoint(secName string) error { return errNotSupported } func (b *Module) IterMaps() <-chan *Map { return nil } func (b *Module) IterSocketFilter() <-chan *SocketFilter { return nil } func (b *Module) IterTracepointProgram() <-chan *TracepointProgram { return nil } func (b *Module) Log() []byte { return nil } func (b *Module) LookupElement(mp *Map, key, value unsafe.Pointer) error { return errNotSupported } func (b *Module) LookupNextElement(mp *Map, key, nextKey, value unsafe.Pointer) (bool, error) { return false, errNotSupported } func (b *Module) Map(name string) *Map { return nil } func (b *Module) SchedProgram(name string) *SchedProgram { return nil } func (b *Module) SocketFilter(name string) *SocketFilter { return nil } func (b *Module) UpdateElement(mp *Map, key, value unsafe.Pointer, flags uint64) error { return errNotSupported } gobpf-0.2.0/elf/netlink.c000066400000000000000000000074501404447410300151730ustar00rootroot00000000000000// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* Copyright (c) 2018 Facebook */ #include #include #include #include #include #include #include #include #include #include #include "libbpf.h" #include "nlattr.h" #ifndef SOL_NETLINK #define SOL_NETLINK 270 #endif typedef int (*__dump_nlmsg_t)(struct nlmsghdr *nlmsg, libbpf_dump_nlmsg_t, void *cookie); static int bpf_netlink_recv(int sock, __u32 nl_pid, int seq, __dump_nlmsg_t _fn, libbpf_dump_nlmsg_t fn, void *cookie) { bool multipart = true; struct nlmsgerr *err; struct nlmsghdr *nh; char buf[4096]; int len, ret; while (multipart) { multipart = false; len = recv(sock, buf, sizeof(buf), 0); if (len < 0) { ret = -errno; goto done; } if (len == 0) break; for (nh = (struct nlmsghdr *)buf; NLMSG_OK(nh, len); nh = NLMSG_NEXT(nh, len)) { if (nh->nlmsg_pid != nl_pid) { ret = -LIBBPF_ERRNO__WRNGPID; goto done; } if (nh->nlmsg_seq != seq) { ret = -LIBBPF_ERRNO__INVSEQ; goto done; } if (nh->nlmsg_flags & NLM_F_MULTI) multipart = true; switch (nh->nlmsg_type) { case NLMSG_ERROR: err = (struct nlmsgerr *)NLMSG_DATA(nh); if (!err->error) continue; ret = err->error; libbpf_nla_dump_errormsg(nh); goto done; case NLMSG_DONE: return 0; default: break; } if (_fn) { ret = _fn(nh, fn, cookie); if (ret) return ret; } } } ret = 0; done: return ret; } int libbpf_netlink_open(__u32 *nl_pid) { struct sockaddr_nl sa; socklen_t addrlen; int one = 1, ret; int sock; memset(&sa, 0, sizeof(sa)); sa.nl_family = AF_NETLINK; sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE); if (sock < 0) return -errno; if (setsockopt(sock, SOL_NETLINK, NETLINK_EXT_ACK, &one, sizeof(one)) < 0) { fprintf(stderr, "Netlink error reporting not supported\n"); } if (bind(sock, (struct sockaddr *)&sa, sizeof(sa)) < 0) { ret = -errno; goto cleanup; } addrlen = sizeof(sa); if (getsockname(sock, (struct sockaddr *)&sa, &addrlen) < 0) { ret = -errno; goto cleanup; } if (addrlen != sizeof(sa)) { ret = -LIBBPF_ERRNO__INTERNAL; goto cleanup; } *nl_pid = sa.nl_pid; return sock; cleanup: close(sock); return ret; } int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags) { int sock, seq = 0, ret; struct nlattr *nla, *nla_xdp; struct { struct nlmsghdr nh; struct ifinfomsg ifinfo; char attrbuf[64]; } req; __u32 nl_pid; sock = libbpf_netlink_open(&nl_pid); if (sock < 0) return sock; memset(&req, 0, sizeof(req)); req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)); req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK; req.nh.nlmsg_type = RTM_SETLINK; req.nh.nlmsg_pid = 0; req.nh.nlmsg_seq = ++seq; req.ifinfo.ifi_family = AF_UNSPEC; req.ifinfo.ifi_index = ifindex; nla = (struct nlattr *)(((char *)&req) + NLMSG_ALIGN(req.nh.nlmsg_len)); nla->nla_type = NLA_F_NESTED | IFLA_XDP; nla->nla_len = NLA_HDRLEN; nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len); nla_xdp->nla_type = IFLA_XDP_FD; nla_xdp->nla_len = NLA_HDRLEN + sizeof(int); memcpy((char *)nla_xdp + NLA_HDRLEN, &fd, sizeof(fd)); nla->nla_len += nla_xdp->nla_len; if (flags) { nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len); nla_xdp->nla_type = IFLA_XDP_FLAGS; nla_xdp->nla_len = NLA_HDRLEN + sizeof(flags); memcpy((char *)nla_xdp + NLA_HDRLEN, &flags, sizeof(flags)); nla->nla_len += nla_xdp->nla_len; } req.nh.nlmsg_len += NLA_ALIGN(nla->nla_len); if (send(sock, &req, req.nh.nlmsg_len, 0) < 0) { ret = -errno; goto cleanup; } ret = bpf_netlink_recv(sock, nl_pid, seq, NULL, NULL, NULL); cleanup: close(sock); return ret; } gobpf-0.2.0/elf/nlattr.c000066400000000000000000000117701404447410300150330ustar00rootroot00000000000000// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* * NETLINK Netlink attributes * * Copyright (c) 2003-2013 Thomas Graf */ #include #include "libbpf.h" #include "nlattr.h" #include #include #include typedef int (*__dump_nlmsg_t)(struct nlmsghdr *nlmsg, libbpf_dump_nlmsg_t, void *cookie); static uint16_t nla_attr_minlen[LIBBPF_NLA_TYPE_MAX+1] = { [LIBBPF_NLA_U8] = sizeof(uint8_t), [LIBBPF_NLA_U16] = sizeof(uint16_t), [LIBBPF_NLA_U32] = sizeof(uint32_t), [LIBBPF_NLA_U64] = sizeof(uint64_t), [LIBBPF_NLA_STRING] = 1, [LIBBPF_NLA_FLAG] = 0, }; static struct nlattr *nla_next(const struct nlattr *nla, int *remaining) { int totlen = NLA_ALIGN(nla->nla_len); *remaining -= totlen; return (struct nlattr *) ((char *) nla + totlen); } static int nla_ok(const struct nlattr *nla, int remaining) { return remaining >= sizeof(*nla) && nla->nla_len >= sizeof(*nla) && nla->nla_len <= remaining; } static int nla_type(const struct nlattr *nla) { return nla->nla_type & NLA_TYPE_MASK; } static int validate_nla(struct nlattr *nla, int maxtype, struct libbpf_nla_policy *policy) { struct libbpf_nla_policy *pt; unsigned int minlen = 0; int type = nla_type(nla); if (type < 0 || type > maxtype) return 0; pt = &policy[type]; if (pt->type > LIBBPF_NLA_TYPE_MAX) return 0; if (pt->minlen) minlen = pt->minlen; else if (pt->type != LIBBPF_NLA_UNSPEC) minlen = nla_attr_minlen[pt->type]; if (libbpf_nla_len(nla) < minlen) return -1; if (pt->maxlen && libbpf_nla_len(nla) > pt->maxlen) return -1; if (pt->type == LIBBPF_NLA_STRING) { char *data = libbpf_nla_data(nla); if (data[libbpf_nla_len(nla) - 1] != '\0') return -1; } return 0; } static inline int nlmsg_len(const struct nlmsghdr *nlh) { return nlh->nlmsg_len - NLMSG_HDRLEN; } /** * Create attribute index based on a stream of attributes. * @arg tb Index array to be filled (maxtype+1 elements). * @arg maxtype Maximum attribute type expected and accepted. * @arg head Head of attribute stream. * @arg len Length of attribute stream. * @arg policy Attribute validation policy. * * Iterates over the stream of attributes and stores a pointer to each * attribute in the index array using the attribute type as index to * the array. Attribute with a type greater than the maximum type * specified will be silently ignored in order to maintain backwards * compatibility. If \a policy is not NULL, the attribute will be * validated using the specified policy. * * @see nla_validate * @return 0 on success or a negative error code. */ int libbpf_nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, int len, struct libbpf_nla_policy *policy) { struct nlattr *nla; int rem, err; memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); libbpf_nla_for_each_attr(nla, head, len, rem) { int type = nla_type(nla); if (type > maxtype) continue; if (policy) { err = validate_nla(nla, maxtype, policy); if (err < 0) goto errout; } if (tb[type]) fprintf(stderr, "Attribute of type %#x found multiple times in message, " "previous attribute is being ignored.\n", type); tb[type] = nla; } err = 0; errout: return err; } /** * Create attribute index based on nested attribute * @arg tb Index array to be filled (maxtype+1 elements). * @arg maxtype Maximum attribute type expected and accepted. * @arg nla Nested Attribute. * @arg policy Attribute validation policy. * * Feeds the stream of attributes nested into the specified attribute * to libbpf_nla_parse(). * * @see libbpf_nla_parse * @return 0 on success or a negative error code. */ int libbpf_nla_parse_nested(struct nlattr *tb[], int maxtype, struct nlattr *nla, struct libbpf_nla_policy *policy) { return libbpf_nla_parse(tb, maxtype, libbpf_nla_data(nla), libbpf_nla_len(nla), policy); } /* dump netlink extended ack error message */ int libbpf_nla_dump_errormsg(struct nlmsghdr *nlh) { struct libbpf_nla_policy extack_policy[NLMSGERR_ATTR_MAX + 1] = { [NLMSGERR_ATTR_MSG] = { .type = LIBBPF_NLA_STRING }, [NLMSGERR_ATTR_OFFS] = { .type = LIBBPF_NLA_U32 }, }; struct nlattr *tb[NLMSGERR_ATTR_MAX + 1], *attr; struct nlmsgerr *err; char *errmsg = NULL; int hlen, alen; /* no TLVs, nothing to do here */ if (!(nlh->nlmsg_flags & NLM_F_ACK_TLVS)) return 0; err = (struct nlmsgerr *)NLMSG_DATA(nlh); hlen = sizeof(*err); /* if NLM_F_CAPPED is set then the inner err msg was capped */ if (!(nlh->nlmsg_flags & NLM_F_CAPPED)) hlen += nlmsg_len(&err->msg); attr = (struct nlattr *) ((void *) err + hlen); alen = nlh->nlmsg_len - hlen; if (libbpf_nla_parse(tb, NLMSGERR_ATTR_MAX, attr, alen, extack_policy) != 0) { fprintf(stderr, "Failed to parse extended error attributes\n"); return 0; } if (tb[NLMSGERR_ATTR_MSG]) errmsg = (char *) libbpf_nla_data(tb[NLMSGERR_ATTR_MSG]); fprintf(stderr, "Kernel error message: %s\n", errmsg); return 0; } gobpf-0.2.0/elf/perf.go000066400000000000000000000272631404447410300146520ustar00rootroot00000000000000// +build linux // Copyright 2016 Cilium Project // Copyright 2016 Sylvain Afchain // Copyright 2016 Kinvolk // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package elf import ( "fmt" "os" "sort" "syscall" "unsafe" "github.com/iovisor/gobpf/pkg/cpuonline" ) /* #include #include #include #include #include #include #include // from https://github.com/cilium/cilium/blob/master/pkg/bpf/perf.go struct event_sample { struct perf_event_header header; uint32_t size; uint8_t data[]; }; struct read_state { void *buf; int buf_len; // These two fields are for backward reading: as opposed to normal ring buffers, // backward read buffers don't update the read pointer when reading. // So we keep the state externally here. uint64_t data_head_initialized; uint64_t data_head; uint64_t wrapped; }; static int perf_event_read(int page_count, int page_size, void *_state, void *_header, void *_sample_ptr, void *_lost_ptr) { volatile struct perf_event_mmap_page *header = _header; uint64_t data_head = *((volatile uint64_t *) &header->data_head); uint64_t data_tail = header->data_tail; uint64_t raw_size = (uint64_t)page_count * page_size; void *base = ((uint8_t *)header) + page_size; struct read_state *state = _state; struct event_sample *e; void *begin, *end; void **sample_ptr = (void **) _sample_ptr; void **lost_ptr = (void **) _lost_ptr; // No data to read on this ring __sync_synchronize(); if (data_head == data_tail) return 0; begin = base + data_tail % raw_size; e = begin; end = base + (data_tail + e->header.size) % raw_size; if (state->buf_len < e->header.size || !state->buf) { state->buf = realloc(state->buf, e->header.size); state->buf_len = e->header.size; } if (end < begin) { uint64_t len = base + raw_size - begin; memcpy(state->buf, begin, len); memcpy((char *) state->buf + len, base, e->header.size - len); e = state->buf; } else { memcpy(state->buf, begin, e->header.size); } switch (e->header.type) { case PERF_RECORD_SAMPLE: *sample_ptr = state->buf; break; case PERF_RECORD_LOST: *lost_ptr = state->buf; break; } __sync_synchronize(); header->data_tail += e->header.size; return e->header.type; } static int perf_event_dump_backward(int page_count, int page_size, void *_state, void *_header, void *_sample_ptr) { volatile struct perf_event_mmap_page *header = _header; uint64_t data_head = header->data_head; uint64_t raw_size = (uint64_t)page_count * page_size; void *base = ((uint8_t *)header) + page_size; struct read_state *state = _state; struct perf_event_header *p, *head; void **sample_ptr = (void **) _sample_ptr; void *begin, *end; uint64_t new_head; if (state->data_head_initialized == 0) { state->data_head_initialized = 1; state->data_head = data_head & (raw_size - 1); } if ((state->wrapped && state->data_head >= data_head) || state->wrapped > 1) { return 0; } begin = p = base + state->data_head; if (p->type != PERF_RECORD_SAMPLE) return 0; new_head = (state->data_head + p->size) & (raw_size - 1); end = base + new_head; if (state->buf_len < p->size || !state->buf) { state->buf = realloc(state->buf, p->size); state->buf_len = p->size; } if (end < begin) { uint64_t len = base + raw_size - begin; memcpy(state->buf, begin, len); memcpy((char *) state->buf + len, base, p->size - len); } else { memcpy(state->buf, begin, p->size); } *sample_ptr = state->buf; if (new_head <= state->data_head) { state->wrapped++; } state->data_head = new_head; return p->type; } */ import "C" type PerfMap struct { name string program *Module pageCount int receiverChan chan []byte lostChan chan uint64 pollStop chan struct{} timestamp func(*[]byte) uint64 } // Matching 'struct perf_event_sample in kernel sources type PerfEventSample struct { PerfEventHeader Size uint32 data byte // Size bytes of data } func InitPerfMap(b *Module, mapName string, receiverChan chan []byte, lostChan chan uint64) (*PerfMap, error) { m, ok := b.maps[mapName] if !ok { return nil, fmt.Errorf("no map with name %s", mapName) } if receiverChan == nil { return nil, fmt.Errorf("receiverChan is nil") } // Maps are initialized in b.Load(), nothing to do here return &PerfMap{ name: mapName, program: b, pageCount: m.pageCount, receiverChan: receiverChan, lostChan: lostChan, pollStop: make(chan struct{}), }, nil } func (pm *PerfMap) SwapAndDumpBackward() (out [][]byte) { m, ok := pm.program.maps[pm.name] if !ok { // should not happen or only when pm.program is // suddenly changed panic(fmt.Sprintf("cannot find map %q", pm.name)) } // step 1: create a new perf ring buffer pmuFds, headers, bases, err := createPerfRingBuffer(true, true, pm.pageCount) if err != nil { return } cpus, err := cpuonline.Get() if err != nil { return } // step 2: swap file descriptors // after it the ebpf programs will write to the new map for index, cpu := range cpus { // assign perf fd to map err := pm.program.UpdateElement(m, unsafe.Pointer(&cpu), unsafe.Pointer(&pmuFds[index]), 0) if err != nil { return } } // step 3: dump old buffer out = pm.DumpBackward() // step4: close old buffer // unmap for _, base := range m.bases { err := syscall.Munmap(base) if err != nil { return } } for _, fd := range m.pmuFDs { // disable _, _, err2 := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), C.PERF_EVENT_IOC_DISABLE, 0) if err2 != 0 { return } // close if err := syscall.Close(int(fd)); err != nil { return } } // update file descriptors to new perf ring buffer m.pmuFDs = pmuFds m.headers = headers m.bases = bases return } func (pm *PerfMap) DumpBackward() (out [][]byte) { incoming := OrderedBytesArray{timestamp: pm.timestamp} m, ok := pm.program.maps[pm.name] if !ok { // should not happen or only when pm.program is // suddenly changed panic(fmt.Sprintf("cannot find map %q", pm.name)) } cpuCount := len(m.pmuFDs) pageSize := os.Getpagesize() for cpu := 0; cpu < cpuCount; cpu++ { state := C.struct_read_state{} ringBufferLoop: for { var sample *PerfEventSample ok := C.perf_event_dump_backward(C.int(pm.pageCount), C.int(pageSize), unsafe.Pointer(&state), unsafe.Pointer(m.headers[cpu]), unsafe.Pointer(&sample)) switch ok { case 0: break ringBufferLoop // nothing to read case C.PERF_RECORD_SAMPLE: size := sample.Size - 4 b := C.GoBytes(unsafe.Pointer(&sample.data), C.int(size)) incoming.bytesArray = append(incoming.bytesArray, b) } } } if incoming.timestamp != nil { sort.Sort(incoming) } return incoming.bytesArray } // SetTimestampFunc registers a timestamp callback that will be used to // reorder the perf events chronologically. // // If not set, the order of events sent through receiverChan is not guaranteed. // // Typically, the ebpf program will use bpf_ktime_get_ns() to get a timestamp // and store it in the perf event. The perf event struct is opaque to this // package, hence the need for a callback. func (pm *PerfMap) SetTimestampFunc(timestamp func(*[]byte) uint64) { pm.timestamp = timestamp } func (pm *PerfMap) PollStart() { incoming := OrderedBytesArray{timestamp: pm.timestamp} m, ok := pm.program.maps[pm.name] if !ok { // should not happen or only when pm.program is // suddenly changed panic(fmt.Sprintf("cannot find map %q", pm.name)) } go func() { cpuCount := len(m.pmuFDs) pageSize := os.Getpagesize() state := C.struct_read_state{} defer func() { close(pm.receiverChan) if pm.lostChan != nil { close(pm.lostChan) } }() for { select { case <-pm.pollStop: break default: perfEventPoll(m.pmuFDs) } harvestLoop: for { select { case <-pm.pollStop: return default: } var harvestCount C.int beforeHarvest := NowNanoseconds() for cpu := 0; cpu < cpuCount; cpu++ { ringBufferLoop: for { var sample *PerfEventSample var lost *PerfEventLost ok := C.perf_event_read(C.int(pm.pageCount), C.int(pageSize), unsafe.Pointer(&state), unsafe.Pointer(m.headers[cpu]), unsafe.Pointer(&sample), unsafe.Pointer(&lost)) switch ok { case 0: break ringBufferLoop // nothing to read case C.PERF_RECORD_SAMPLE: size := sample.Size - 4 b := C.GoBytes(unsafe.Pointer(&sample.data), C.int(size)) incoming.bytesArray = append(incoming.bytesArray, b) harvestCount++ if pm.timestamp == nil { continue ringBufferLoop } if incoming.timestamp(&b) > beforeHarvest { // see comment below break ringBufferLoop } case C.PERF_RECORD_LOST: if pm.lostChan != nil { select { case pm.lostChan <- lost.Lost: case <-pm.pollStop: return } } default: // ignore unknown events } } } if incoming.timestamp != nil { sort.Sort(incoming) } for incoming.Len() > 0 { if incoming.timestamp != nil && incoming.timestamp(&incoming.bytesArray[0]) > beforeHarvest { // This record has been sent after the beginning of the harvest. Stop // processing here to keep the order. "incoming" is sorted, so the next // elements also must not be processed now. break harvestLoop } select { case pm.receiverChan <- incoming.bytesArray[0]: case <-pm.pollStop: return } // remove first element incoming.bytesArray = incoming.bytesArray[1:] } if harvestCount == 0 && len(incoming.bytesArray) == 0 { break harvestLoop } } } }() } // PollStop stops the goroutine that polls the perf event map. // Callers must not close receiverChan or lostChan: they will be automatically // closed on the sender side. func (pm *PerfMap) PollStop() { close(pm.pollStop) } func perfEventPoll(fds []C.int) error { var pfds []C.struct_pollfd for i, _ := range fds { var pfd C.struct_pollfd pfd.fd = fds[i] pfd.events = C.POLLIN pfds = append(pfds, pfd) } _, err := C.poll(&pfds[0], C.nfds_t(len(fds)), 500) if err != nil { return fmt.Errorf("error polling: %v", err.(syscall.Errno)) } return nil } // Assume the timestamp is at the beginning of the user struct type OrderedBytesArray struct { bytesArray [][]byte timestamp func(*[]byte) uint64 } func (a OrderedBytesArray) Len() int { return len(a.bytesArray) } func (a OrderedBytesArray) Swap(i, j int) { a.bytesArray[i], a.bytesArray[j] = a.bytesArray[j], a.bytesArray[i] } func (a OrderedBytesArray) Less(i, j int) bool { return a.timestamp(&a.bytesArray[i]) < a.timestamp(&a.bytesArray[j]) } // Matching 'struct perf_event_header in type PerfEventHeader struct { Type uint32 Misc uint16 TotalSize uint16 } // Matching 'struct perf_event_lost in kernel sources type PerfEventLost struct { PerfEventHeader Id uint64 Lost uint64 } // NowNanoseconds returns a time that can be compared to bpf_ktime_get_ns() func NowNanoseconds() uint64 { var ts syscall.Timespec syscall.Syscall(syscall.SYS_CLOCK_GETTIME, 1 /* CLOCK_MONOTONIC */, uintptr(unsafe.Pointer(&ts)), 0) sec, nsec := ts.Unix() return 1000*1000*1000*uint64(sec) + uint64(nsec) } gobpf-0.2.0/elf/perf_unsupported.go000066400000000000000000000005761404447410300173200ustar00rootroot00000000000000// +build !linux package elf type PerfMap struct{} func InitPerfMap(b *Module, mapName string, receiverChan chan []byte, lostChan chan uint64) (*PerfMap, error) { return nil, errNotSupported } func (pm *PerfMap) SetTimestampFunc(timestamp func(*[]byte) uint64) {} func (pm *PerfMap) PollStart() {} func (pm *PerfMap) PollStop() {} func NowNanoseconds() uint64 { return 0 } gobpf-0.2.0/elf/pinning.go000066400000000000000000000040741404447410300153530ustar00rootroot00000000000000// +build linux package elf import ( "fmt" "os" "path/filepath" "strings" "unsafe" "github.com/iovisor/gobpf/pkg/bpffs" ) /* #include #include #include #include #include extern __u64 ptr_to_u64(void *); int bpf_pin_object(int fd, const char *pathname) { union bpf_attr attr; memset(&attr, 0, sizeof(attr)); attr.pathname = ptr_to_u64((void *)pathname); attr.bpf_fd = fd; return syscall(__NR_bpf, BPF_OBJ_PIN, &attr, sizeof(attr)); } */ import "C" const ( BPFDirGlobals = "globals" // as in iproute2's BPF_DIR_GLOBALS BPFFSPath = "/sys/fs/bpf/" ) func validPinPath(PinPath string) bool { if !strings.HasPrefix(PinPath, BPFFSPath) { return false } return filepath.Clean(PinPath) == PinPath } func pinObject(fd int, pinPath string) error { mounted, err := bpffs.IsMounted() if err != nil { return fmt.Errorf("error checking if %q is mounted: %v", BPFFSPath, err) } if !mounted { return fmt.Errorf("bpf fs not mounted at %q", BPFFSPath) } err = os.MkdirAll(filepath.Dir(pinPath), 0755) if err != nil { return fmt.Errorf("error creating directory %q: %v", filepath.Dir(pinPath), err) } _, err = os.Stat(pinPath) if err == nil { return fmt.Errorf("aborting, found file at %q", pinPath) } if err != nil && !os.IsNotExist(err) { return fmt.Errorf("failed to stat %q: %v", pinPath, err) } pinPathC := C.CString(pinPath) defer C.free(unsafe.Pointer(pinPathC)) ret, err := C.bpf_pin_object(C.int(fd), pinPathC) if ret != 0 { return fmt.Errorf("error pinning object to %q: %v", pinPath, err) } return nil } // PinObjectGlobal pins and object to a name in a namespaces // e.g. `/sys/fs/bpf/my-namespace/globals/my-name` func PinObjectGlobal(fd int, namespace, name string) error { pinPath := filepath.Join(BPFFSPath, namespace, BPFDirGlobals, name) return pinObject(fd, pinPath) } // PinObject pins an object to a path func PinObject(fd int, pinPath string) error { if !validPinPath(pinPath) { return fmt.Errorf("not a valid pin path: %s", pinPath) } return pinObject(fd, pinPath) } gobpf-0.2.0/elf/table.go000066400000000000000000000116631404447410300150020ustar00rootroot00000000000000// +build linux // Copyright 2016 Cilium Project // Copyright 2016 Sylvain Afchain // Copyright 2016 Kinvolk // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package elf import ( "fmt" "syscall" "unsafe" ) /* #include #include extern __u64 ptr_to_u64(void *); // from https://github.com/cilium/cilium/blob/master/pkg/bpf/bpf.go // Apache License, Version 2.0 static void create_bpf_update_elem(int fd, void *key, void *value, unsigned long long flags, void *attr) { union bpf_attr* ptr_bpf_attr; ptr_bpf_attr = (union bpf_attr*)attr; ptr_bpf_attr->map_fd = fd; ptr_bpf_attr->key = ptr_to_u64(key); ptr_bpf_attr->value = ptr_to_u64(value); ptr_bpf_attr->flags = flags; } static void create_bpf_lookup_elem(int fd, void *key, void *value, void *attr) { union bpf_attr* ptr_bpf_attr; ptr_bpf_attr = (union bpf_attr*)attr; ptr_bpf_attr->map_fd = fd; ptr_bpf_attr->key = ptr_to_u64(key); ptr_bpf_attr->value = ptr_to_u64(value); } static int next_bpf_elem(int fd, void *key, void *next_key, void *attr) { union bpf_attr* ptr_bpf_attr; ptr_bpf_attr = (union bpf_attr*)attr; ptr_bpf_attr->map_fd = fd; ptr_bpf_attr->key = ptr_to_u64(key); ptr_bpf_attr->next_key = ptr_to_u64(next_key); } */ import "C" // UpdateElement stores value in key in the map stored in mp. // The flags can have the following values (if you include "uapi/linux/bpf.h"): // C.BPF_ANY to create new element or update existing; // C.BPF_NOEXIST to create new element if it didn't exist; // C.BPF_EXIST to update existing element. func (b *Module) UpdateElement(mp *Map, key, value unsafe.Pointer, flags uint64) error { uba := C.union_bpf_attr{} C.create_bpf_update_elem( C.int(mp.m.fd), key, value, C.ulonglong(flags), unsafe.Pointer(&uba), ) ret, _, err := syscall.Syscall( C.__NR_bpf, C.BPF_MAP_UPDATE_ELEM, uintptr(unsafe.Pointer(&uba)), unsafe.Sizeof(uba), ) if ret != 0 || err != 0 { return fmt.Errorf("unable to update element: %s", err) } return nil } // LookupElement looks up the given key in the the map stored in mp. // The value is stored in the value unsafe.Pointer. func (b *Module) LookupElement(mp *Map, key, value unsafe.Pointer) error { uba := C.union_bpf_attr{} C.create_bpf_lookup_elem( C.int(mp.m.fd), key, value, unsafe.Pointer(&uba), ) ret, _, err := syscall.Syscall( C.__NR_bpf, C.BPF_MAP_LOOKUP_ELEM, uintptr(unsafe.Pointer(&uba)), unsafe.Sizeof(uba), ) if ret != 0 || err != 0 { return fmt.Errorf("unable to lookup element: %s", err) } return nil } // LookupAndDeleteElement picks up and delete the element in the the map stored in mp. // The value is stored in the value unsafe.Pointer. func (b *Module) LookupAndDeleteElement(mp *Map, value unsafe.Pointer) error { uba := C.union_bpf_attr{} C.create_bpf_lookup_elem( C.int(mp.m.fd), unsafe.Pointer(nil), value, unsafe.Pointer(&uba), ) ret, _, err := syscall.Syscall( C.__NR_bpf, C.BPF_MAP_LOOKUP_AND_DELETE_ELEM, uintptr(unsafe.Pointer(&uba)), unsafe.Sizeof(uba), ) if ret != 0 || err != 0 { return fmt.Errorf("unable to lookup and delete element: %s", err) } return nil } // DeleteElement deletes the given key in the the map stored in mp. // The key is stored in the key unsafe.Pointer. func (b *Module) DeleteElement(mp *Map, key unsafe.Pointer) error { uba := C.union_bpf_attr{} value := unsafe.Pointer(nil) C.create_bpf_lookup_elem( C.int(mp.m.fd), key, value, unsafe.Pointer(&uba), ) ret, _, err := syscall.Syscall( C.__NR_bpf, C.BPF_MAP_DELETE_ELEM, uintptr(unsafe.Pointer(&uba)), unsafe.Sizeof(uba), ) if ret != 0 || err != 0 { return fmt.Errorf("unable to delete element: %s", err) } return nil } // LookupNextElement looks up the next element in mp using the given key. // The next key and the value are stored in the nextKey and value parameter. // Returns false at the end of the mp. func (b *Module) LookupNextElement(mp *Map, key, nextKey, value unsafe.Pointer) (bool, error) { uba := C.union_bpf_attr{} C.next_bpf_elem( C.int(mp.m.fd), key, nextKey, unsafe.Pointer(&uba), ) ret, _, err := syscall.Syscall( C.__NR_bpf, C.BPF_MAP_GET_NEXT_KEY, uintptr(unsafe.Pointer(&uba)), unsafe.Sizeof(uba), ) if err == syscall.ENOENT { return false, nil } if err != 0 { return false, fmt.Errorf("unable to find next element: %s", err) } if ret != 0 { return false, nil } if err := b.LookupElement(mp, nextKey, value); err != nil { return false, err } return true, nil } gobpf-0.2.0/elf/utsname_int8.go000066400000000000000000000003651404447410300163260ustar00rootroot00000000000000// +build linux,386 linux,amd64 linux,arm64 package elf func utsnameStr(in []int8) string { out := make([]byte, len(in)) for i := 0; i < len(in); i++ { if in[i] == 0 { break } out = append(out, byte(in[i])) } return string(out) } gobpf-0.2.0/elf/utsname_uint8.go000066400000000000000000000003761404447410300165150ustar00rootroot00000000000000// +build linux,arm linux,ppc64 linux,ppc64le s390x package elf func utsnameStr(in []uint8) string { out := make([]byte, len(in)) for i := 0; i < len(in); i++ { if in[i] == 0 { break } out = append(out, byte(in[i])) } return string(out) } gobpf-0.2.0/examples/000077500000000000000000000000001404447410300144255ustar00rootroot00000000000000gobpf-0.2.0/examples/bcc/000077500000000000000000000000001404447410300151545ustar00rootroot00000000000000gobpf-0.2.0/examples/bcc/bash_readline/000077500000000000000000000000001404447410300177345ustar00rootroot00000000000000gobpf-0.2.0/examples/bcc/bash_readline/bash_readline.go000066400000000000000000000050121404447410300230410ustar00rootroot00000000000000// Copyright 2017 Louis McCormack // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "bytes" "encoding/binary" "fmt" "os" "os/signal" bpf "github.com/iovisor/gobpf/bcc" ) const source string = ` #include struct readline_event_t { u32 pid; char str[80]; } __attribute__((packed)); BPF_PERF_OUTPUT(readline_events); int get_return_value(struct pt_regs *ctx) { struct readline_event_t event = {}; u32 pid; if (!PT_REGS_RC(ctx)) return 0; pid = bpf_get_current_pid_tgid(); event.pid = pid; bpf_probe_read(&event.str, sizeof(event.str), (void *)PT_REGS_RC(ctx)); readline_events.perf_submit(ctx, &event, sizeof(event)); return 0; } ` type readlineEvent struct { Pid uint32 Str [80]byte } func main() { m := bpf.NewModule(source, []string{}) defer m.Close() readlineUretprobe, err := m.LoadUprobe("get_return_value") if err != nil { fmt.Fprintf(os.Stderr, "Failed to load get_return_value: %s\n", err) os.Exit(1) } err = m.AttachUretprobe("/bin/bash", "readline", readlineUretprobe, -1) if err != nil { fmt.Fprintf(os.Stderr, "Failed to attach return_value: %s\n", err) os.Exit(1) } table := bpf.NewTable(m.TableId("readline_events"), m) channel := make(chan []byte) perfMap, err := bpf.InitPerfMap(table, channel, nil) if err != nil { fmt.Fprintf(os.Stderr, "Failed to init perf map: %s\n", err) os.Exit(1) } sig := make(chan os.Signal, 1) signal.Notify(sig, os.Interrupt, os.Kill) fmt.Printf("%10s\t%s\n", "PID", "COMMAND") go func() { var event readlineEvent for { data := <-channel err := binary.Read(bytes.NewBuffer(data), binary.LittleEndian, &event) if err != nil { fmt.Printf("failed to decode received data: %s\n", err) continue } // Convert C string (null-terminated) to Go string comm := string(event.Str[:bytes.IndexByte(event.Str[:], 0)]) fmt.Printf("%10d\t%s\n", event.Pid, comm) } }() perfMap.Start() <-sig perfMap.Stop() } gobpf-0.2.0/examples/bcc/execsnoop/000077500000000000000000000000001404447410300171575ustar00rootroot00000000000000gobpf-0.2.0/examples/bcc/execsnoop/execsnoop.go000066400000000000000000000201411404447410300215070ustar00rootroot00000000000000// Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "bufio" "bytes" "encoding/binary" "flag" "fmt" "os" "os/signal" "strconv" "strings" "unsafe" bpf "github.com/iovisor/gobpf/bcc" ) import "C" type EventType int32 const ( eventArg EventType = iota eventRet ) const source string = ` #include #include #include #define ARGSIZE 128 enum event_type { EVENT_ARG, EVENT_RET, }; struct data_t { u64 pid; // PID as in the userspace term (i.e. task->tgid in kernel) u64 ppid; // Parent PID as in the userspace term (i.e task->real_parent->tgid in kernel) char comm[TASK_COMM_LEN]; enum event_type type; char argv[ARGSIZE]; int retval; }; BPF_PERF_OUTPUT(events); static int __submit_arg(struct pt_regs *ctx, void *ptr, struct data_t *data) { bpf_probe_read(data->argv, sizeof(data->argv), ptr); events.perf_submit(ctx, data, sizeof(struct data_t)); return 1; } static int submit_arg(struct pt_regs *ctx, void *ptr, struct data_t *data) { const char *argp = NULL; bpf_probe_read(&argp, sizeof(argp), ptr); if (argp) { return __submit_arg(ctx, (void *)(argp), data); } return 0; } int syscall__execve(struct pt_regs *ctx, const char __user *filename, const char __user *const __user *__argv, const char __user *const __user *__envp) { // create data here and pass to submit_arg to save stack space (#555) struct data_t data = {}; struct task_struct *task; data.pid = bpf_get_current_pid_tgid() >> 32; task = (struct task_struct *)bpf_get_current_task(); // Some kernels, like Ubuntu 4.13.0-generic, return 0 // as the real_parent->tgid. // We use the getPpid function as a fallback in those cases. // See https://github.com/iovisor/bcc/issues/1883. data.ppid = task->real_parent->tgid; bpf_get_current_comm(&data.comm, sizeof(data.comm)); data.type = EVENT_ARG; __submit_arg(ctx, (void *)filename, &data); // skip first arg, as we submitted filename #pragma unroll for (int i = 1; i < MAX_ARGS; i++) { if (submit_arg(ctx, (void *)&__argv[i], &data) == 0) goto out; } // handle truncated argument list char ellipsis[] = "..."; __submit_arg(ctx, (void *)ellipsis, &data); out: return 0; } int do_ret_sys_execve(struct pt_regs *ctx) { struct data_t data = {}; struct task_struct *task; data.pid = bpf_get_current_pid_tgid() >> 32; task = (struct task_struct *)bpf_get_current_task(); // Some kernels, like Ubuntu 4.13.0-generic, return 0 // as the real_parent->tgid. // We use the getPpid function as a fallback in those cases. // See https://github.com/iovisor/bcc/issues/1883. data.ppid = task->real_parent->tgid; bpf_get_current_comm(&data.comm, sizeof(data.comm)); data.type = EVENT_RET; data.retval = PT_REGS_RC(ctx); events.perf_submit(ctx, &data, sizeof(data)); return 0; } ` type execveEvent struct { Pid uint64 Ppid uint64 Comm [16]byte Type int32 Argv [128]byte RetVal int32 } type eventPayload struct { Time string `json:"time,omitempty"` Comm string `json:"comm"` Pid uint64 `json:"pid"` Ppid string `json:"ppid"` Argv string `json:"argv"` RetVal int32 `json:"retval"` } // getPpid is a fallback to read the parent PID from /proc. // Some kernel versions, like 4.13.0 return 0 getting the parent PID // from the current task, so we need to use this fallback to have // the parent PID in any kernel. func getPpid(pid uint64) uint64 { f, err := os.OpenFile(fmt.Sprintf("/proc/%d/status", pid), os.O_RDONLY, os.ModePerm) if err != nil { return 0 } defer f.Close() sc := bufio.NewScanner(f) for sc.Scan() { text := sc.Text() if strings.Contains(text, "PPid:") { f := strings.Fields(text) i, _ := strconv.ParseUint(f[len(f)-1], 10, 64) return i } } return 0 } func main() { run() } func run() { traceFailed := flag.Bool("x", false, "trace failed exec()s") timestamps := flag.Bool("t", false, "include timestamps") quotemarks := flag.Bool("q", false, `add "quotemarks" around arguments`) filterComm := flag.String("n", "", `only print command lines containing a name, for example "main"`) filterArg := flag.String("l", "", `only print command where arguments contain an argument, for example "tpkg"`) format := flag.String("o", "table", "output format, either table or json") pretty := flag.Bool("p", false, "pretty print json output") maxArgs := flag.Uint64("m", 20, "maximum number of arguments parsed and displayed, defaults to 20") flag.Parse() m := bpf.NewModule(strings.Replace(source, "MAX_ARGS", strconv.FormatUint(*maxArgs, 10), -1), []string{}) defer m.Close() fnName := bpf.GetSyscallFnName("execve") kprobe, err := m.LoadKprobe("syscall__execve") if err != nil { fmt.Fprintf(os.Stderr, "Failed to load syscall__execve: %s\n", err) os.Exit(1) } // passing -1 for maxActive signifies to use the default // according to the kernel kprobes documentation if err := m.AttachKprobe(fnName, kprobe, -1); err != nil { fmt.Fprintf(os.Stderr, "Failed to attach syscall__execve: %s\n", err) os.Exit(1) } kretprobe, err := m.LoadKprobe("do_ret_sys_execve") if err != nil { fmt.Fprintf(os.Stderr, "Failed to load do_ret_sys_execve: %s\n", err) os.Exit(1) } // passing -1 for maxActive signifies to use the default // according to the kernel kretprobes documentation if err := m.AttachKretprobe(fnName, kretprobe, -1); err != nil { fmt.Fprintf(os.Stderr, "Failed to attach do_ret_sys_execve: %s\n", err) os.Exit(1) } table := bpf.NewTable(m.TableId("events"), m) channel := make(chan []byte, 1000) perfMap, err := bpf.InitPerfMap(table, channel, nil) if err != nil { fmt.Fprintf(os.Stderr, "Failed to init perf map: %s\n", err) os.Exit(1) } sig := make(chan os.Signal, 1) signal.Notify(sig, os.Interrupt, os.Kill) go func() { out := newOutput(*format, *pretty, *timestamps) out.PrintHeader() args := make(map[uint64][]string) for { data := <-channel var event execveEvent err := binary.Read(bytes.NewBuffer(data), bpf.GetHostByteOrder(), &event) if err != nil { fmt.Printf("failed to decode received data: %s\n", err) continue } if eventArg == EventType(event.Type) { e, ok := args[event.Pid] if !ok { e = make([]string, 0) } argv := (*C.char)(unsafe.Pointer(&event.Argv)) e = append(e, C.GoString(argv)) args[event.Pid] = e } else { if event.RetVal != 0 && !*traceFailed { delete(args, event.Pid) continue } comm := C.GoString((*C.char)(unsafe.Pointer(&event.Comm))) if *filterComm != "" && !strings.Contains(comm, *filterComm) { delete(args, event.Pid) continue } argv, ok := args[event.Pid] if !ok { continue } if *filterArg != "" && !strings.Contains(strings.Join(argv, " "), *filterArg) { delete(args, event.Pid) continue } p := eventPayload{ Pid: event.Pid, Ppid: "?", Comm: comm, RetVal: event.RetVal, } if event.Ppid == 0 { event.Ppid = getPpid(event.Pid) } if event.Ppid != 0 { p.Ppid = strconv.FormatUint(event.Ppid, 10) } if *quotemarks { var b bytes.Buffer for i, a := range argv { b.WriteString(strings.Replace(a, `"`, `\"`, -1)) if i != len(argv)-1 { b.WriteString(" ") } } p.Argv = b.String() } else { p.Argv = strings.Join(argv, " ") } p.Argv = strings.TrimSpace(strings.Replace(p.Argv, "\n", "\\n", -1)) out.PrintLine(p) delete(args, event.Pid) } } }() perfMap.Start() <-sig perfMap.Stop() } gobpf-0.2.0/examples/bcc/execsnoop/output.go000066400000000000000000000033111404447410300210440ustar00rootroot00000000000000package main import ( "encoding/json" "fmt" "time" ) type output interface { PrintHeader() PrintLine(eventPayload) } type timing struct { start time.Time } func newTiming() timing { return timing{time.Now()} } func (t timing) Now() float64 { return time.Now().Sub(t.start).Seconds() } func newOutput(name string, pretty, timestamp bool) output { switch name { case "json": return newJSONOutput(pretty, timestamp) } return newTableOutput(timestamp) } type tableOutput struct { timing timing timestamp bool } func (t tableOutput) PrintHeader() { header := "%-16s %-6s %-6s %3s %s\n" args := []interface{}{"PCOMM", "PID", "PPID", "RET", "ARGS"} if t.timestamp { header = "%-8s" + header args = []interface{}{"TIME(s)", "PCOMM", "PID", "PPID", "RET", "ARGS"} } fmt.Printf(header, args...) } func (t tableOutput) PrintLine(e eventPayload) { header := "%-16s %-6d %-6s %3d %s\n" args := []interface{}{e.Comm, e.Pid, e.Ppid, e.RetVal, e.Argv} if t.timestamp { header = "%-8.3f" + header args = append([]interface{}{t.timing.Now()}, args...) } fmt.Printf(header, args...) } func newTableOutput(timestamp bool) output { return &tableOutput{newTiming(), timestamp} } type jsonOutput struct { timing timing pretty bool timestamp bool } func (jsonOutput) PrintHeader() { // jsonOutput doesn't have any header } func (j jsonOutput) PrintLine(e eventPayload) { if j.timestamp { e.Time = fmt.Sprintf("%.3f", j.timing.Now()) } var m []byte if j.pretty { m, _ = json.MarshalIndent(e, "", " ") } else { m, _ = json.Marshal(e) } if len(m) > 0 { fmt.Println(string(m)) } } func newJSONOutput(pretty, timestamp bool) output { return jsonOutput{newTiming(), pretty, timestamp} } gobpf-0.2.0/examples/bcc/perf/000077500000000000000000000000001404447410300161105ustar00rootroot00000000000000gobpf-0.2.0/examples/bcc/perf/.gitignore000066400000000000000000000000051404447410300200730ustar00rootroot00000000000000perf gobpf-0.2.0/examples/bcc/perf/perf.go000066400000000000000000000071161404447410300174000ustar00rootroot00000000000000// Copyright 2016 Kinvolk // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "bytes" "encoding/binary" "fmt" "os" "os/signal" "unsafe" bpf "github.com/iovisor/gobpf/bcc" ) import "C" const source string = ` #include #include typedef struct { u32 pid; uid_t uid; gid_t gid; int ret; char filename[256]; } chown_event_t; BPF_PERF_OUTPUT(chown_events); BPF_HASH(chowncall, u64, chown_event_t); int kprobe__sys_fchownat(struct pt_regs *ctx, int dfd, const char *filename, uid_t uid, gid_t gid, int flag) { u64 pid = bpf_get_current_pid_tgid(); chown_event_t event = { .pid = pid >> 32, .uid = uid, .gid = gid, }; bpf_probe_read(&event.filename, sizeof(event.filename), (void *)filename); chowncall.update(&pid, &event); return 0; } int kretprobe__sys_fchownat(struct pt_regs *ctx) { int ret = PT_REGS_RC(ctx); u64 pid = bpf_get_current_pid_tgid(); chown_event_t *eventp = chowncall.lookup(&pid); if (eventp == 0) { return 0; } chown_event_t event = *eventp; event.ret = ret; chown_events.perf_submit(ctx, &event, sizeof(event)); chowncall.delete(&pid); return 0; }; ` type chownEvent struct { Pid uint32 Uid uint32 Gid uint32 ReturnValue int32 Filename [256]byte } func main() { m := bpf.NewModule(source, []string{}) defer m.Close() chownKprobe, err := m.LoadKprobe("kprobe__sys_fchownat") if err != nil { fmt.Fprintf(os.Stderr, "Failed to load kprobe__sys_fchownat: %s\n", err) os.Exit(1) } syscallName := bpf.GetSyscallFnName("fchownat") // passing -1 for maxActive signifies to use the default // according to the kernel kprobes documentation err = m.AttachKprobe(syscallName, chownKprobe, -1) if err != nil { fmt.Fprintf(os.Stderr, "Failed to attach kprobe__sys_fchownat: %s\n", err) os.Exit(1) } chownKretprobe, err := m.LoadKprobe("kretprobe__sys_fchownat") if err != nil { fmt.Fprintf(os.Stderr, "Failed to load kretprobe__sys_fchownat: %s\n", err) os.Exit(1) } // passing -1 for maxActive signifies to use the default // according to the kernel kretprobes documentation err = m.AttachKretprobe(syscallName, chownKretprobe, -1) if err != nil { fmt.Fprintf(os.Stderr, "Failed to attach kretprobe__sys_fchownat: %s\n", err) os.Exit(1) } table := bpf.NewTable(m.TableId("chown_events"), m) channel := make(chan []byte) perfMap, err := bpf.InitPerfMap(table, channel, nil) if err != nil { fmt.Fprintf(os.Stderr, "Failed to init perf map: %s\n", err) os.Exit(1) } sig := make(chan os.Signal, 1) signal.Notify(sig, os.Interrupt, os.Kill) go func() { var event chownEvent for { data := <-channel err := binary.Read(bytes.NewBuffer(data), binary.LittleEndian, &event) if err != nil { fmt.Printf("failed to decode received data: %s\n", err) continue } filename := (*C.char)(unsafe.Pointer(&event.Filename)) fmt.Printf("uid %d gid %d pid %d called fchownat(2) on %s (return value: %d)\n", event.Uid, event.Gid, event.Pid, C.GoString(filename), event.ReturnValue) } }() perfMap.Start() <-sig perfMap.Stop() } gobpf-0.2.0/examples/bcc/strlen_count/000077500000000000000000000000001404447410300176735ustar00rootroot00000000000000gobpf-0.2.0/examples/bcc/strlen_count/strlen_count.go000066400000000000000000000036561404447410300227530ustar00rootroot00000000000000// Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "encoding/binary" "flag" "fmt" "os" "os/signal" "regexp" bpf "github.com/iovisor/gobpf/bcc" ) import "C" const source string = ` #include typedef char strlenkey_t[80]; BPF_HASH(counts, strlenkey_t); int count(struct pt_regs *ctx) { if (!PT_REGS_PARM1(ctx)) return 0; strlenkey_t key; u64 zero = 0, *val; bpf_probe_read(&key, sizeof(key), (void *)PT_REGS_PARM1(ctx)); val = counts.lookup_or_init(&key, &zero); (*val)++; return 0; } ` var ansiEscape = regexp.MustCompile(`[[:cntrl:]]`) func main() { pid := flag.Int("pid", -1, "attach to pid, default is all processes") flag.Parse() m := bpf.NewModule(source, []string{}) defer m.Close() strlenUprobe, err := m.LoadUprobe("count") if err != nil { fmt.Fprintf(os.Stderr, "Failed to load uprobe count: %s\n", err) os.Exit(1) } err = m.AttachUprobe("c", "strlen", strlenUprobe, *pid) if err != nil { fmt.Fprintf(os.Stderr, "Failed to attach uprobe to strlen: %s\n", err) os.Exit(1) } table := bpf.NewTable(m.TableId("counts"), m) fmt.Println("Tracing strlen()... hit Ctrl-C to end.") sig := make(chan os.Signal, 1) signal.Notify(sig, os.Interrupt) <-sig fmt.Printf("%10s %s\n", "COUNT", "STRING") for it := table.Iter(); it.Next(); { k := ansiEscape.ReplaceAll(it.Key(), []byte{}) v := binary.LittleEndian.Uint64(it.Leaf()) fmt.Printf("%10d \"%s\"\n", v, k) } } gobpf-0.2.0/examples/bcc/xdp/000077500000000000000000000000001404447410300157475ustar00rootroot00000000000000gobpf-0.2.0/examples/bcc/xdp/xdp_drop.go000066400000000000000000000076611404447410300201270ustar00rootroot00000000000000// xdp_drop.go Drop incoming packets on XDP layer and count for which // protocol type. Based on: // https://github.com/iovisor/bcc/blob/master/examples/networking/xdp/xdp_drop_count.py // // Copyright (c) 2017 GustavoKatel // Licensed under the Apache License, Version 2.0 (the "License") package main import ( "fmt" "os" "os/signal" bpf "github.com/iovisor/gobpf/bcc" ) /* #cgo CFLAGS: -I/usr/include/bcc/compat #cgo LDFLAGS: -lbcc #include #include void perf_reader_free(void *ptr); */ import "C" const source string = ` #define KBUILD_MODNAME "foo" #include #include #include #include #include #include #include BPF_TABLE("array", int, long, dropcnt, 256); static inline int parse_ipv4(void *data, u64 nh_off, void *data_end) { struct iphdr *iph = data + nh_off; if ((void*)&iph[1] > data_end) return 0; return iph->protocol; } static inline int parse_ipv6(void *data, u64 nh_off, void *data_end) { struct ipv6hdr *ip6h = data + nh_off; if ((void*)&ip6h[1] > data_end) return 0; return ip6h->nexthdr; } int xdp_prog1(struct CTXTYPE *ctx) { void* data_end = (void*)(long)ctx->data_end; void* data = (void*)(long)ctx->data; struct ethhdr *eth = data; // drop packets int rc = RETURNCODE; // let pass XDP_PASS or redirect to tx via XDP_TX long *value; uint16_t h_proto; uint64_t nh_off = 0; int index; nh_off = sizeof(*eth); if (data + nh_off > data_end) return rc; h_proto = eth->h_proto; // While the following code appears to be duplicated accidentally, // it's intentional to handle double tags in ethernet frames. if (h_proto == htons(ETH_P_8021Q) || h_proto == htons(ETH_P_8021AD)) { struct vlan_hdr *vhdr; vhdr = data + nh_off; nh_off += sizeof(struct vlan_hdr); if (data + nh_off > data_end) return rc; h_proto = vhdr->h_vlan_encapsulated_proto; } if (h_proto == htons(ETH_P_8021Q) || h_proto == htons(ETH_P_8021AD)) { struct vlan_hdr *vhdr; vhdr = data + nh_off; nh_off += sizeof(struct vlan_hdr); if (data + nh_off > data_end) return rc; h_proto = vhdr->h_vlan_encapsulated_proto; } if (h_proto == htons(ETH_P_IP)) index = parse_ipv4(data, nh_off, data_end); else if (h_proto == htons(ETH_P_IPV6)) index = parse_ipv6(data, nh_off, data_end); else index = 0; value = dropcnt.lookup(&index); if (value) lock_xadd(value, 1); return rc; } ` func usage() { fmt.Printf("Usage: %v \n", os.Args[0]) fmt.Printf("e.g.: %v eth0\n", os.Args[0]) os.Exit(1) } func main() { var device string if len(os.Args) != 2 { usage() } device = os.Args[1] ret := "XDP_DROP" ctxtype := "xdp_md" module := bpf.NewModule(source, []string{ "-w", "-DRETURNCODE=" + ret, "-DCTXTYPE=" + ctxtype, }) defer module.Close() fn, err := module.Load("xdp_prog1", C.BPF_PROG_TYPE_XDP, 1, 65536) if err != nil { fmt.Fprintf(os.Stderr, "Failed to load xdp prog: %v\n", err) os.Exit(1) } err = module.AttachXDP(device, fn) if err != nil { fmt.Fprintf(os.Stderr, "Failed to attach xdp prog: %v\n", err) os.Exit(1) } defer func() { if err := module.RemoveXDP(device); err != nil { fmt.Fprintf(os.Stderr, "Failed to remove XDP from %s: %v\n", device, err) } }() fmt.Println("Dropping packets, hit CTRL+C to stop") sig := make(chan os.Signal, 1) signal.Notify(sig, os.Interrupt, os.Kill) dropcnt := bpf.NewTable(module.TableId("dropcnt"), module) <-sig fmt.Printf("\n{IP protocol-number}: {total dropped pkts}\n") for it := dropcnt.Iter(); it.Next(); { key := bpf.GetHostByteOrder().Uint32(it.Key()) value := bpf.GetHostByteOrder().Uint64(it.Leaf()) if value > 0 { fmt.Printf("%v: %v pkts\n", key, value) } } } gobpf-0.2.0/examples/tracepipe/000077500000000000000000000000001404447410300164015ustar00rootroot00000000000000gobpf-0.2.0/examples/tracepipe/.gitignore000066400000000000000000000000121404447410300203620ustar00rootroot00000000000000tracepipe gobpf-0.2.0/examples/tracepipe/tracepipe.go000066400000000000000000000017211404447410300207050ustar00rootroot00000000000000// Copyright 2017 Kinvolk // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "fmt" "os" "github.com/iovisor/gobpf/pkg/tracepipe" ) func main() { tp, err := tracepipe.New() if err != nil { fmt.Fprintf(os.Stderr, "%s\n", err) os.Exit(1) } defer tp.Close() channel, errorChannel := tp.Channel() for { select { case event := <-channel: fmt.Printf("%+v\n", event) case err := <-errorChannel: fmt.Printf("%+v\n", err) } } } gobpf-0.2.0/go.mod000066400000000000000000000000511404447410300137110ustar00rootroot00000000000000module github.com/iovisor/gobpf go 1.15 gobpf-0.2.0/pkg/000077500000000000000000000000001404447410300133705ustar00rootroot00000000000000gobpf-0.2.0/pkg/bpffs/000077500000000000000000000000001404447410300144705ustar00rootroot00000000000000gobpf-0.2.0/pkg/bpffs/fs.go000066400000000000000000000027701404447410300154350ustar00rootroot00000000000000package bpffs import ( "fmt" "syscall" "unsafe" ) const BPFFSPath = "/sys/fs/bpf" var FsMagicBPFFS int32 func init() { // https://github.com/coreutils/coreutils/blob/v8.27/src/stat.c#L275 // https://github.com/torvalds/linux/blob/v4.8/include/uapi/linux/magic.h#L80 magic := uint32(0xCAFE4A11) // 0xCAFE4A11 overflows an int32, which is what's expected by Statfs_t.Type in 32bit platforms. // To avoid conditional compilation for all 32bit/64bit platforms, we use an unsafe cast FsMagicBPFFS = *(*int32)(unsafe.Pointer(&magic)) } // IsMountedAt checks if the BPF fs is mounted already in the custom location func IsMountedAt(mountpoint string) (bool, error) { var data syscall.Statfs_t if err := syscall.Statfs(mountpoint, &data); err != nil { return false, fmt.Errorf("cannot statfs %q: %v", mountpoint, err) } return int32(data.Type) == FsMagicBPFFS, nil } // IsMounted checks if the BPF fs is mounted already in the default location func IsMounted() (bool, error) { return IsMountedAt(BPFFSPath) } // MountAt mounts the BPF fs in the custom location (if not already mounted) func MountAt(mountpoint string) error { mounted, err := IsMountedAt(mountpoint) if err != nil { return err } if mounted { return nil } if err := syscall.Mount(mountpoint, mountpoint, "bpf", 0, ""); err != nil { return fmt.Errorf("error mounting %q: %v", mountpoint, err) } return nil } // Mount mounts the BPF fs in the default location (if not already mounted) func Mount() error { return MountAt(BPFFSPath) } gobpf-0.2.0/pkg/cpuonline/000077500000000000000000000000001404447410300153645ustar00rootroot00000000000000gobpf-0.2.0/pkg/cpuonline/cpuonline.go000066400000000000000000000005471404447410300177150ustar00rootroot00000000000000package cpuonline import ( "io/ioutil" "github.com/iovisor/gobpf/pkg/cpurange" ) const cpuOnline = "/sys/devices/system/cpu/online" // Get returns a slice with the online CPUs, for example `[0, 2, 3]` func Get() ([]uint, error) { buf, err := ioutil.ReadFile(cpuOnline) if err != nil { return nil, err } return cpurange.ReadCPURange(string(buf)) } gobpf-0.2.0/pkg/cpupossible/000077500000000000000000000000001404447410300157205ustar00rootroot00000000000000gobpf-0.2.0/pkg/cpupossible/cpupossible.go000066400000000000000000000005571404447410300206060ustar00rootroot00000000000000package cpupossible import ( "io/ioutil" "github.com/iovisor/gobpf/pkg/cpurange" ) const cpuPossible = "/sys/devices/system/cpu/possible" // Get returns a slice with the online CPUs, for example `[0, 2, 3]` func Get() ([]uint, error) { buf, err := ioutil.ReadFile(cpuPossible) if err != nil { return nil, err } return cpurange.ReadCPURange(string(buf)) } gobpf-0.2.0/pkg/cpurange/000077500000000000000000000000001404447410300151745ustar00rootroot00000000000000gobpf-0.2.0/pkg/cpurange/cpu_range.go000066400000000000000000000013321404447410300174650ustar00rootroot00000000000000package cpurange import ( "strconv" "strings" ) // loosely based on https://github.com/iovisor/bcc/blob/v0.3.0/src/python/bcc/utils.py#L15 func ReadCPURange(cpuRangeStr string) ([]uint, error) { var cpus []uint cpuRangeStr = strings.Trim(cpuRangeStr, "\n ") for _, cpuRange := range strings.Split(cpuRangeStr, ",") { rangeOp := strings.SplitN(cpuRange, "-", 2) first, err := strconv.ParseUint(rangeOp[0], 10, 32) if err != nil { return nil, err } if len(rangeOp) == 1 { cpus = append(cpus, uint(first)) continue } last, err := strconv.ParseUint(rangeOp[1], 10, 32) if err != nil { return nil, err } for n := first; n <= last; n++ { cpus = append(cpus, uint(n)) } } return cpus, nil } gobpf-0.2.0/pkg/cpurange/cpu_range_test.go000066400000000000000000000017571404447410300205370ustar00rootroot00000000000000package cpurange import ( "testing" ) func TestGetOnlineCPUs(t *testing.T) { tests := []struct { data string expected []uint valid bool }{ { "", nil, false, }, { "0-3\n", []uint{0, 1, 2, 3}, true, }, { " 0-2,5", []uint{0, 1, 2, 5}, true, }, { "0,2,4-5,7-9", []uint{0, 2, 4, 5, 7, 8, 9}, true, }, { "0,2", []uint{0, 2}, true, }, { "0", []uint{0}, true, }, { "-2,5", nil, false, }, { "2-@,5", nil, false, }, { "-", nil, false, }, } for _, test := range tests { cpus, err := ReadCPURange(test.data) if test.valid && err != nil { t.Errorf("expected input %q to not return an error but got: %v\n", test.data, err) } if !test.valid && err == nil { t.Errorf("expected input %q to return an error\n", test.data) } for i := range cpus { if cpus[i] != test.expected[i] { t.Errorf("expected %q but got %q\n", test.expected, cpus) break } } } } gobpf-0.2.0/pkg/ksym/000077500000000000000000000000001404447410300143535ustar00rootroot00000000000000gobpf-0.2.0/pkg/ksym/ksym.go000066400000000000000000000017131404447410300156670ustar00rootroot00000000000000package ksym import ( "bufio" "errors" "io" "os" "strings" "sync" ) const ( KALLSYMS = "/proc/kallsyms" ) type ksymCache struct { sync.RWMutex ksym map[string]string } var cache ksymCache // Ksym translates a kernel memory address into a kernel function name // using `/proc/kallsyms` func Ksym(addr string) (string, error) { if cache.ksym == nil { cache.ksym = make(map[string]string) } cache.Lock() defer cache.Unlock() if _, ok := cache.ksym[addr]; !ok { fd, err := os.Open(KALLSYMS) if err != nil { return "", err } defer fd.Close() fn := ksym(addr, fd) if fn == "" { return "", errors.New("kernel function not found for " + addr) } cache.ksym[addr] = fn } return cache.ksym[addr], nil } func ksym(addr string, r io.Reader) string { s := bufio.NewScanner(r) for s.Scan() { l := s.Text() ar := strings.Split(l, " ") if len(ar) != 3 { continue } if ar[0] == addr { return ar[2] } } return "" } gobpf-0.2.0/pkg/ksym/ksym_test.go000066400000000000000000000004011404447410300167170ustar00rootroot00000000000000package ksym import ( "strings" "testing" ) func TestKsym(t *testing.T) { data := "ffffffff91b2a340 T cgroup_freezing" r := strings.NewReader(data) fn := ksym("ffffffff91b2a340", r) if fn != "cgroup_freezing" { t.Error("unexpected result") } } gobpf-0.2.0/pkg/progtestrun/000077500000000000000000000000001404447410300157645ustar00rootroot00000000000000gobpf-0.2.0/pkg/progtestrun/prog_test_run.go000066400000000000000000000041131404447410300212040ustar00rootroot00000000000000package progtestrun import ( "fmt" "unsafe" ) /* #include #include #include #include #include #include static __u64 ptr_to_u64(void *ptr) { return (__u64) (unsigned long) ptr; } int bpf_prog_test_run(int fd, int repeat, char *data, int data_size, char *data_out, int *data_out_size, int *retval, int *duration) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0) union bpf_attr attr; int ret; memset(&attr, 0, sizeof(attr)); attr.test.prog_fd = fd; attr.test.data_in = ptr_to_u64((void *) data); attr.test.data_out = ptr_to_u64((void *) data_out); attr.test.data_size_in = data_size; attr.test.repeat = repeat; ret = syscall(__NR_bpf, BPF_PROG_TEST_RUN, &attr, sizeof(attr)); if (data_out_size) *data_out_size = attr.test.data_size_out; if (retval) *retval = attr.test.retval; if (duration) *duration = attr.test.duration; return ret; #else errno = ENOSYS; return -1; #endif } */ import "C" // Run exposes BPF_PROG_TEST_RUN to test xdp and skp programs. // `data` will be passed to your program as `__sk_buff *ptr`. // `dataOut` (optional) will hold `skb->data` after run, if large enough. func Run(progFd, repeat int, data []byte, dataOut []byte) (int, int, int, error) { if data == nil { // http://elixir.free-electrons.com/linux/v4.12/source/net/bpf/test_run.c#L78 // http://elixir.free-electrons.com/linux/v4.12/source/include/uapi/linux/if_ether.h#L32 return -1, 0, 0, fmt.Errorf("data must be at least 14 bytes (corresponding to ETH_HLEN)") } var ( dataOutPtr *C.char dataOutLen C.int returnValue C.int duration C.int dataPtr = (*C.char)(unsafe.Pointer(&data[0])) dataLen = C.int(len(data)) ) if dataOut != nil { dataOutPtr = (*C.char)(unsafe.Pointer(&dataOut[0])) } ret, err := C.bpf_prog_test_run(C.int(progFd), C.int(repeat), dataPtr, dataLen, dataOutPtr, &dataOutLen, &returnValue, &duration) if ret != 0 { return -1, 0, 0, fmt.Errorf("bpf_prog_test_run failed: %v (%d)", err, ret) } return int(returnValue), int(duration), int(dataOutLen), nil } gobpf-0.2.0/pkg/tracepipe/000077500000000000000000000000001404447410300153445ustar00rootroot00000000000000gobpf-0.2.0/pkg/tracepipe/trace_pipe.go000066400000000000000000000061341404447410300200120ustar00rootroot00000000000000// Copyright 2017 Kinvolk // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package tracepipe import ( "bufio" "fmt" "io" "os" "regexp" "strings" ) const tracePipeFile = "/sys/kernel/debug/tracing/trace_pipe" // TracePipe to read from /sys/kernel/debug/tracing/trace_pipe // Note that data can be read only once, i.e. if you have more than // one tracer / channel, only one will receive an event: // "Once data is read from this file, it is consumed, and will not be // read again with a sequential read." // https://www.kernel.org/doc/Documentation/trace/ftrace.txt type TracePipe struct { file *os.File reader *bufio.Reader stop chan struct{} } // TraceEvent contains the raw event as well as the contents of // every field as string, as defined under "Output format" in // https://www.kernel.org/doc/Documentation/trace/ftrace.txt type TraceEvent struct { Raw string Task string PID string CPU string Flags string Timestamp string Function string Message string } func New() (*TracePipe, error) { f, err := os.Open(tracePipeFile) if err != nil { return nil, err } return &TracePipe{ file: f, reader: bufio.NewReader(f), stop: make(chan struct{}), }, nil } // A line from trace_pipe looks like (leading spaces included): // ` chromium-15581 [000] d... 92783.722567: : Hello, World!` var traceLineRegexp = regexp.MustCompile(`(.{16})-(\d+) +\[(\d{3})\] (.{4}) +(\d+\.\d+)\: (.*?)\: (.*)`) func parseTraceLine(raw string) (*TraceEvent, error) { fields := traceLineRegexp.FindStringSubmatch(raw) if len(fields) != 8 { return nil, fmt.Errorf("received unexpected input %q", raw) } return &TraceEvent{ Raw: raw, Task: strings.Trim(fields[1], " "), PID: fields[2], CPU: fields[3], Flags: fields[4], Timestamp: fields[5], Function: fields[6], Message: fields[7], }, nil } func (t *TracePipe) ReadLine() (*TraceEvent, error) { line, err := t.reader.ReadString('\n') if err != nil { return nil, err } traceEvent, err := parseTraceLine(line) if err != nil { return nil, err } return traceEvent, nil } func (t *TracePipe) Channel() (<-chan *TraceEvent, <-chan error) { channelEvents := make(chan *TraceEvent) channelErrors := make(chan error) go func() { for { select { case <-t.stop: return default: } traceEvent, err := t.ReadLine() if err != nil { if err == io.EOF { continue } channelErrors <- err } else { channelEvents <- traceEvent } } }() return channelEvents, channelErrors } func (t *TracePipe) Close() error { close(t.stop) return t.file.Close() } gobpf-0.2.0/pkg/tracepipe/trace_pipe_test.go000066400000000000000000000031671404447410300210540ustar00rootroot00000000000000package tracepipe import ( "testing" ) func TestParseTraceLine(t *testing.T) { testEvents := []struct { input string expected TraceEvent }{ { " chromium-15581 [000] d... 92783.722567: : Hello, World!", TraceEvent{ Task: "chromium", Function: "", Message: "Hello, World!", }, }, { " curl-18597 [000] dN.. 463.471554: : kretprobe__tcp_v4_connect - pid_tgid 79873506822309\n", TraceEvent{ Task: "curl", Function: "", Message: "kretprobe__tcp_v4_connect - pid_tgid 79873506822309", }, }, { " trace_pipe-23553 [000] .... 205825.968557: sys_enter: NR 0 (3, c420098000, 1000, 0, 0, 0)\n", TraceEvent{ Task: "trace_pipe", Function: "sys_enter", Message: "NR 0 (3, c420098000, 1000, 0, 0, 0)", }, }, { " trace_pipe-23553 [000] .... 205825.968557: sys_enter: hello: world\n", TraceEvent{ Task: "trace_pipe", Function: "sys_enter", Message: "hello: world", }, }, } for _, testEvent := range testEvents { result, err := parseTraceLine(testEvent.input) if err != nil { t.Errorf("%q could not be parsed", testEvent.input) } if testEvent.expected.Task != result.Task { t.Errorf("result task %q doesn't match expected %q", result.Task, testEvent.expected.Task) } if testEvent.expected.Function != result.Function { t.Errorf("result function %q doesn't match expected %q", result.Function, testEvent.expected.Function) } if testEvent.expected.Message != result.Message { t.Errorf("result message %q doesn't match expected %q", result.Message, testEvent.expected.Message) } } } gobpf-0.2.0/tests/000077500000000000000000000000001404447410300137515ustar00rootroot00000000000000gobpf-0.2.0/tests/build000077500000000000000000000005021404447410300147730ustar00rootroot00000000000000#!/bin/bash set -euo pipefail DUMMY_SRC=dummy.c DUMMY_OBJ=dummy.o clang -O2 -emit-llvm -c ${DUMMY_SRC} -o - | llc -march=bpf -filetype=obj -o ${DUMMY_OBJ} for kernel in 46 48 410 414; do clang -DKERNEL_VERSION="${kernel}" -O2 -emit-llvm -c ${DUMMY_SRC} -o - | llc -march=bpf -filetype=obj -o "dummy-${kernel}.o" done gobpf-0.2.0/tests/dummy-410.o000066400000000000000000000123101404447410300155630ustar00rootroot00000000000000ELF@@d`r RA  "0[  socket__dummycgroup_sock__dummyuprobe__dummyuretprobe__dummykretprobe__dummykprobe__dummycgroup_skb__dummysocket/dummyuprobe/dummyuretprobe/dummykretprobe/dummykprobe/dummymaps/dummy_arraymaps/dummy_percpu_arraymaps/dummy_cgroup_arraymaps/dummy_prog_array.textxdp_passtracepoint__raw_sys_entertracepoint/raw_syscalls/sys_enterxdp_drop_versionmaps/dummy_array_customcgroup/sockmaps/dummy_hashmaps/dummy_percpu_hashmaps/dummy_perfmaps/dummy_stack_tracecgroup/skb.strtab.symtabxdp/prog2xdp/prog1 @@P`p9r(@Xpm e  gobpf-0.2.0/tests/dummy-414.o000066400000000000000000000123101404447410300155670ustar00rootroot00000000000000ELF@@d`r RA  "0[  socket__dummycgroup_sock__dummyuprobe__dummyuretprobe__dummykretprobe__dummykprobe__dummycgroup_skb__dummysocket/dummyuprobe/dummyuretprobe/dummykretprobe/dummykprobe/dummymaps/dummy_arraymaps/dummy_percpu_arraymaps/dummy_cgroup_arraymaps/dummy_prog_array.textxdp_passtracepoint__raw_sys_entertracepoint/raw_syscalls/sys_enterxdp_drop_versionmaps/dummy_array_customcgroup/sockmaps/dummy_hashmaps/dummy_percpu_hashmaps/dummy_perfmaps/dummy_stack_tracecgroup/skb.strtab.symtabxdp/prog2xdp/prog1 @@P`p9r(@Xpm e  gobpf-0.2.0/tests/dummy-46.o000066400000000000000000000102301404447410300155070ustar00rootroot00000000000000ELF @@   &  6?.socket__dummyuprobe__dummyuretprobe__dummykretprobe__dummykprobe__dummysocket/dummyuprobe/dummyuretprobe/dummykretprobe/dummykprobe/dummymaps/dummy_arraymaps/dummy_percpu_arraymaps/dummy_prog_array.text_versionmaps/dummy_array_custommaps/dummy_hashmaps/dummy_percpu_hashmaps/dummy_perfmaps/dummy_stack_trace.strtab.symtabH X@@wPZ`gpM! 1 8P PX hgobpf-0.2.0/tests/dummy-48.o000066400000000000000000000117001404447410300155140ustar00rootroot00000000000000ELF@@@? Me u ?.6  socket__dummyuprobe__dummyuretprobe__dummykretprobe__dummykprobe__dummysocket/dummyuprobe/dummyuretprobe/dummykretprobe/dummykprobe/dummymaps/dummy_arraymaps/dummy_percpu_arraymaps/dummy_cgroup_arraymaps/dummy_prog_array.textxdp_passtracepoint__raw_sys_entertracepoint/raw_syscalls/sys_enterxdp_drop_versionmaps/dummy_array_custommaps/dummy_hashmaps/dummy_percpu_hashmaps/dummy_perfmaps/dummy_stack_trace.strtab.symtabxdp/prog2xdp/prog1h @@wPZ`gpM`p 8PhH @  gobpf-0.2.0/tests/dummy.c000066400000000000000000000057421404447410300152600ustar00rootroot00000000000000/* * Compiled with './build' */ #include "../elf/include/uapi/linux/bpf.h" #include "../elf/include/bpf_map.h" #define SEC(NAME) __attribute__((section(NAME), used)) #define PERF_MAX_STACK_DEPTH 127 #define KERNEL_VERSION_GTE(X) (KERNEL_VERSION >= X) struct pt_regs{}; struct bpf_map_def SEC("maps/dummy_hash") dummy_hash = { .type = BPF_MAP_TYPE_HASH, .key_size = sizeof(int), .value_size = sizeof(unsigned int), .max_entries = 128, }; struct bpf_map_def SEC("maps/dummy_array") dummy_array = { .type = BPF_MAP_TYPE_ARRAY, .key_size = sizeof(int), .value_size = sizeof(unsigned int), .max_entries = 128, }; struct bpf_map_def SEC("maps/dummy_prog_array") dummy_prog_array = { .type = BPF_MAP_TYPE_PROG_ARRAY, .key_size = sizeof(int), .value_size = sizeof(unsigned int), .max_entries = 128, }; struct bpf_map_def SEC("maps/dummy_perf") dummy_perf = { .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY, .key_size = sizeof(int), .value_size = sizeof(unsigned int), .max_entries = 128, }; #if KERNEL_VERSION_GTE(46) struct bpf_map_def SEC("maps/dummy_percpu_hash") dummy_percpu_hash = { .type = BPF_MAP_TYPE_PERCPU_HASH, .key_size = sizeof(int), .value_size = sizeof(unsigned int), .max_entries = 128, }; struct bpf_map_def SEC("maps/dummy_percpu_array") dummy_percpu_array = { .type = BPF_MAP_TYPE_PERCPU_ARRAY, .key_size = sizeof(int), .value_size = sizeof(unsigned int), .max_entries = 128, }; struct bpf_map_def SEC("maps/dummy_stack_trace") dummy_stack_trace = { .type = BPF_MAP_TYPE_STACK_TRACE, .key_size = sizeof(int), .value_size = PERF_MAX_STACK_DEPTH * sizeof(unsigned long long), .max_entries = 128, }; #endif #if KERNEL_VERSION_GTE(48) struct bpf_map_def SEC("maps/dummy_cgroup_array") dummy_cgroup_array = { .type = BPF_MAP_TYPE_CGROUP_ARRAY, .key_size = sizeof(int), .value_size = sizeof(unsigned int), .max_entries = 128, }; #endif struct bpf_map_def SEC("maps/dummy_array_custom") dummy_array_custom = { .type = BPF_MAP_TYPE_ARRAY, .key_size = sizeof(int), .value_size = sizeof(unsigned int), .max_entries = 128, .pinning = PIN_CUSTOM_NS, }; SEC("kprobe/dummy") int kprobe__dummy(struct pt_regs *ctx) { return 0; } SEC("kretprobe/dummy") int kretprobe__dummy(struct pt_regs *ctx) { return 0; } SEC("uprobe/dummy") int uprobe__dummy(struct pt_regs *ctx) { return 0; } SEC("uretprobe/dummy") int uretprobe__dummy(struct pt_regs *ctx) { return 0; } #if KERNEL_VERSION_GTE(410) SEC("cgroup/skb") int cgroup_skb__dummy(struct __sk_buff *skb) { return 1; } SEC("cgroup/sock") int cgroup_sock__dummy(struct __sk_buff *skb) { return 0; } #endif #if KERNEL_VERSION_GTE(47) SEC("tracepoint/raw_syscalls/sys_enter") int tracepoint__raw_sys_enter() { return 0; } #endif SEC("socket/dummy") int socket__dummy(struct __sk_buff *skb) { return 0; } #if KERNEL_VERSION_GTE(48) SEC("xdp/prog1") int xdp_drop(struct xdp_md *ctx) { return XDP_DROP; } SEC("xdp/prog2") int xdp_pass(struct xdp_md *ctx) { return XDP_PASS; } #endif unsigned int _version SEC("version") = 0xFFFFFFFE; gobpf-0.2.0/tests/dummy.o000066400000000000000000000060101404447410300152610ustar00rootroot00000000000000ELFH@@     ?.socket__dummyuprobe__dummyuretprobe__dummykretprobe__dummykprobe__dummysocket/dummyuprobe/dummyuretprobe/dummykretprobe/dummykprobe/dummymaps/dummy_arraymaps/dummy_prog_array.text_versionmaps/dummy_array_custommaps/dummy_hashmaps/dummy_perf.strtab.symtab0@@wPZ`gpM