pax_global_header00006660000000000000000000000064145632602060014516gustar00rootroot0000000000000052 comment=7c083cda5f00d0ac206e638c3356728c0b6255ac golang-github-cloudflare-cbpfc-0.0~git20231012.992ed75/000077500000000000000000000000001456326020600220215ustar00rootroot00000000000000golang-github-cloudflare-cbpfc-0.0~git20231012.992ed75/.github/000077500000000000000000000000001456326020600233615ustar00rootroot00000000000000golang-github-cloudflare-cbpfc-0.0~git20231012.992ed75/.github/workflows/000077500000000000000000000000001456326020600254165ustar00rootroot00000000000000golang-github-cloudflare-cbpfc-0.0~git20231012.992ed75/.github/workflows/test.yml000066400000000000000000000024761456326020600271310ustar00rootroot00000000000000# Anytime we push to any branch on: push jobs: test: name: Test clang-${{ matrix.clang }} runs-on: ubuntu-20.04 strategy: matrix: clang: ["9", "14", "16"] steps: - name: Set up Go 1.x uses: actions/setup-go@v2 with: go-version: ^1.20 - name: Check out code into the Go module directory uses: actions/checkout@v2 - name: Add apt.llvm.org repo # clang-9 is in the upstream focal repo, but not apt.llvm.org. if: matrix.clang != 9 run: | wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key 2>/dev/null | sudo apt-key add - echo 'deb http://apt.llvm.org/focal/ llvm-toolchain-focal-${{ matrix.clang }} main' | sudo tee /etc/apt/sources.list.d/clang.list # Only update the llvm repo, this is a lot faster. sudo apt-get update -o Dir::Etc::sourcelist="sources.list.d/clang.list" -o Dir::Etc::sourceparts="-" -o APT::Get::List-Cleanup="0" - name: Install clang run: sudo apt-get install -y clang-${{ matrix.clang }} - name: Check lint # gofmt doesn't report any changes run: test -z $(gofmt -l ./ | tee /dev/stderr) - name: Run tests env: CLANG: clang-${{ matrix.clang }} # tests need to run as root to load XDP programs run: sudo -E env "PATH=$PATH" go test ./... golang-github-cloudflare-cbpfc-0.0~git20231012.992ed75/LICENSE000066400000000000000000000027121456326020600230300ustar00rootroot00000000000000Copyright (c) 2019, Cloudflare. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. golang-github-cloudflare-cbpfc-0.0~git20231012.992ed75/README.md000066400000000000000000000012441456326020600233010ustar00rootroot00000000000000# cbpfc [![GoDoc](https://godoc.org/github.com/cloudflare/cbpfc?status.svg)](https://godoc.org/github.com/cloudflare/cbpfc) cbpfc is a classic BPF (cBPF) to extended BPF (eBPF) compiler. It can compile cBPF to eBPF, or to C, and the generated code should be accepted by the kernel verifier. [cbpfc/clang](https://godoc.org/github.com/cloudflare/cbpfc/clang) is a simple clang wrapper for compiling C to eBPF. ## Tests ### Dependencies * `clang` * Path can be set via environment variable `$CLANG` ### Unprivileged * `go test -short` ### Full * Requires: * `root` or `CAP_SYS_ADMIN` to load XDP programs * Recent (4.14+) Linux kernel * `sudo go test` golang-github-cloudflare-cbpfc-0.0~git20231012.992ed75/c.go000066400000000000000000000150231456326020600225730ustar00rootroot00000000000000package cbpfc import ( "fmt" "regexp" "strings" "text/template" "github.com/pkg/errors" "golang.org/x/net/bpf" ) const funcTemplate = ` // True if packet matches, false otherwise {{- if not .NoInline}} __attribute__((__always_inline__)) static inline {{- end}} uint32_t {{.Name}}(const uint8_t *const data, const uint8_t *const data_end) { __attribute__((unused)) uint32_t a, x, m[16]; __attribute__((unused)) const uint8_t *indirect; {{range $i, $b := .Blocks}} {{$b.Label}}: __attribute__((unused)); {{- range $i, $s := $b.Statements}} {{$s}} {{- end}} {{end}} }` type cFunction struct { Name string NoInline bool Blocks []cBlock } // cBPF reg to C symbol var regToCSym = map[bpf.Register]string{ bpf.RegA: "a", bpf.RegX: "x", } // alu operation to C operator var aluToCOp = map[bpf.ALUOp]string{ bpf.ALUOpAdd: "+", bpf.ALUOpSub: "-", bpf.ALUOpMul: "*", bpf.ALUOpDiv: "/", bpf.ALUOpOr: "|", bpf.ALUOpAnd: "&", bpf.ALUOpShiftLeft: "<<", bpf.ALUOpShiftRight: ">>", bpf.ALUOpMod: "%", bpf.ALUOpXor: "^", } // jump test to a C fmt string for condition var condToCFmt = map[bpf.JumpTest]string{ bpf.JumpEqual: "a == %v", bpf.JumpNotEqual: "a != %v", bpf.JumpGreaterThan: "a > %v", bpf.JumpLessThan: "a < %v", bpf.JumpGreaterOrEqual: "a >= %v", bpf.JumpLessOrEqual: "a <= %v", bpf.JumpBitsSet: "a & %v", bpf.JumpBitsNotSet: "!(a & %v)", } var funcNameRegex = regexp.MustCompile(`^[A-Za-z_][0-9A-Za-z_]*$`) // cBLock is a block of compiled C type cBlock struct { *block Statements []string } type COpts struct { // FunctionName is the symbol to use as the generated C function. Must match regex: // [A-Za-z_][0-9A-Za-z_]* FunctionName string // NoInline doesn't force the generated function to be inlined, allowing clang to emit // a BPF to BPF call. // Requires at least kernel 5.10 (for x86, later for other architectures) if used with tail-calls. NoInline bool } // ToC compiles a cBPF filter to a C function with a signature of: // // uint32_t opts.FunctionName(const uint8_t *const data, const uint8_t *const data_end) // // The function returns the filter's return value: // 0 if the packet does not match the cBPF filter, // non 0 if the packet does match. func ToC(filter []bpf.Instruction, opts COpts) (string, error) { if !funcNameRegex.MatchString(opts.FunctionName) { return "", errors.Errorf("invalid FunctionName %q", opts.FunctionName) } blocks, err := compile(filter) if err != nil { return "", err } fun := cFunction{ Name: opts.FunctionName, Blocks: make([]cBlock, len(blocks)), } // Compile blocks to C for i, block := range blocks { fun.Blocks[i], err = blockToC(block) if err != nil { return "", err } } // Fill in the template tmpl, err := template.New("cbfp_func").Parse(funcTemplate) if err != nil { return "", errors.Wrapf(err, "unable to parse func template") } c := strings.Builder{} if err := tmpl.Execute(&c, fun); err != nil { return "", errors.Wrapf(err, "unable to execute func template") } return c.String(), nil } // blockToC compiles a block to C. func blockToC(blk *block) (cBlock, error) { cBlk := cBlock{ block: blk, } for _, insn := range blk.insns { stat, err := insnToC(insn, blk) if err != nil { return cBlk, errors.Wrapf(err, "unable to compile %v", insn) } cBlk.Statements = append(cBlk.Statements, stat...) } return cBlk, nil } // insnToC compiles an instruction to a single C line / statement. func insnToC(insn instruction, blk *block) ([]string, error) { switch i := insn.Instruction.(type) { case bpf.LoadConstant: return stat("%s = %d;", regToCSym[i.Dst], i.Val) case bpf.LoadScratch: return stat("%s = m[%d];", regToCSym[i.Dst], i.N) case bpf.LoadAbsolute: return packetLoadToC(i.Size, "data + %d", i.Off) case bpf.LoadIndirect: return packetLoadToC(i.Size, "indirect + %d", i.Off) case bpf.LoadMemShift: return stat("x = 4*(*(data + %d) & 0xf);", i.Off) case bpf.StoreScratch: return stat("m[%d] = %s;", i.N, regToCSym[i.Src]) case bpf.LoadExtension: if i.Num != bpf.ExtLen { return nil, errors.Errorf("unsupported BPF extension %v", i) } return stat("a = data_end - data;") case bpf.ALUOpConstant: return stat("a %s= %d;", aluToCOp[i.Op], i.Val) case bpf.ALUOpX: return stat("a %s= x;", aluToCOp[i.Op]) case bpf.NegateA: return stat("a = -a;") case bpf.Jump: return stat("goto %s;", blk.skipToBlock(skip(i.Skip)).Label()) case bpf.JumpIf: return condToC(skip(i.SkipTrue), skip(i.SkipFalse), blk, condToCFmt[i.Cond], i.Val) case bpf.JumpIfX: return condToC(skip(i.SkipTrue), skip(i.SkipFalse), blk, condToCFmt[i.Cond], "x") case bpf.RetA: return stat("return a;") case bpf.RetConstant: return stat("return %d;", i.Val) case bpf.TXA: return stat("a = x;") case bpf.TAX: return stat("x = a;") case packetGuardAbsolute: return stat("if (data + %d > data_end) return 0;", i.end) case packetGuardIndirect: return []string{ // Sign extend RegX to 64bits. fmt.Sprintf("indirect = (uint8_t *) (((int64_t) (int32_t) x) + %d);", i.start), fmt.Sprintf("if ((uint64_t)indirect >= %d) return false;", i.maxStartOffset()), fmt.Sprintf("indirect = data + (uint64_t)indirect;"), // Prevent clang from calculating indirect + delta() directly from the packet start when RegX is constant: // only indirect has the correct bounds check. fmt.Sprintf(`asm volatile("" : : "r" (indirect));`), fmt.Sprintf("if (indirect + %d > data_end) return false;", i.length()), }, nil case checkXNotZero: return stat("if (x == 0) return 0;") default: return nil, errors.Errorf("unsupported instruction %v", insn) } } func packetLoadToC(size int, offsetFmt string, offsetArgs ...interface{}) ([]string, error) { offset := fmt.Sprintf(offsetFmt, offsetArgs...) switch size { case 1: return stat("a = *(%s);", offset) case 2: return stat("a = ntohs(*((uint16_t *) (%s)));", offset) case 4: return stat("a = ntohl(*((uint32_t *) (%s)));", offset) } return nil, errors.Errorf("unsupported load size %d", size) } func condToC(skipTrue, skipFalse skip, blk *block, condFmt string, condArgs ...interface{}) ([]string, error) { cond := fmt.Sprintf(condFmt, condArgs...) if skipFalse == 0 { return stat("if (%s) goto %s;", cond, blk.skipToBlock(skipTrue).Label()) } return stat("if (%s) goto %s; else goto %s;", cond, blk.skipToBlock(skipTrue).Label(), blk.skipToBlock(skipFalse).Label()) } func stat(format string, a ...interface{}) ([]string, error) { return []string{fmt.Sprintf(format, a...)}, nil } golang-github-cloudflare-cbpfc-0.0~git20231012.992ed75/c_example_test.go000066400000000000000000000053631456326020600253530ustar00rootroot00000000000000package cbpfc import ( "bytes" "os" "text/template" "github.com/cloudflare/cbpfc/clang" "github.com/pkg/errors" "golang.org/x/net/bpf" ) var testTemplate = template.Must(template.New(entryPoint).Parse(` #define __section(NAME) __attribute__((section(NAME), used)) char __license[] __section("license") = "BSD"; // Shim out all the definitions required by cbpfc // Real programs should use the proper headers typedef unsigned long long uint64_t; typedef long long int64_t; typedef unsigned int uint32_t; typedef int int32_t; typedef unsigned short uint16_t; typedef unsigned char uint8_t; typedef char bool; #define false 0 #define true 1 #define ntohs __builtin_bswap16 #define ntohl __builtin_bswap32 struct xdp_md { uint32_t data; uint32_t data_end; }; enum xdp_action { XDP_DROP = 1, XDP_PASS, }; {{.Filter}} __section("xdp") int {{.ProgramName}}(struct xdp_md *ctx) { uint8_t *data = (uint8_t *)(long)ctx->data; uint8_t const *data_end = (uint8_t *)(long)ctx->data_end; if ({{.FilterName}}(data, data_end)) { return XDP_DROP; } return XDP_PASS; } `)) type testTemplateOpts struct { // Definition of the filter Filter string // Function name of the filter FilterName string // Name of the eBPF program ProgramName string } // ExampleToC demonstrates how to use ToC() to embed a cBPF filter // in a C program, and compile it to eBPF. func ExampleToC() { // simple cBPF filter that matches all packets filter := []bpf.Instruction{ bpf.RetConstant{Val: 1}, } elf, err := buildC(filter, "example", COpts{FunctionName: "example_filter"}) if err != nil { panic(err) } // ELF with a single eBPF program 'example' // Can be loaded with cilium/ebpf or libbpf _ = elf } // buildC compiles a cBPF filter to C, embeds it in a C template, // and compiles the resulting C program to eBPF / XDP using clang. // The XDP program XDP_DROP's incoming packets that match the filter. // Returns the compiled ELF func buildC(filter []bpf.Instruction, programName string, opts COpts) ([]byte, error) { // convert filter to C ebpfFilter, err := ToC(filter, opts) if err != nil { return nil, errors.Wrap(err, "converting filter to C") } // embed filter in C template c := bytes.Buffer{} err = testTemplate.Execute(&c, testTemplateOpts{ Filter: ebpfFilter, FilterName: opts.FunctionName, ProgramName: programName, }) if err != nil { return nil, errors.Wrap(err, "executing template with C filter") } // lookup clang binary to use clangBin, ok := os.LookupEnv("CLANG") if !ok { clangBin = "/usr/bin/clang" } // compile C program elf, err := clang.Compile(c.Bytes(), entryPoint, clang.Opts{ Clang: clangBin, EmitDebug: true, // For BTF }) if err != nil { return nil, errors.Wrap(err, "compiling C") } return elf, nil } golang-github-cloudflare-cbpfc-0.0~git20231012.992ed75/c_test.go000066400000000000000000000027361456326020600236410ustar00rootroot00000000000000package cbpfc import ( "bytes" "testing" "github.com/cilium/ebpf" "golang.org/x/net/bpf" ) func TestFunctionName(t *testing.T) { checkName := func(t *testing.T, name string, valid bool) { t.Helper() _, err := ToC([]bpf.Instruction{bpf.RetA{}}, COpts{ FunctionName: name, }) if valid && err != nil { t.Fatalf("valid function name %s rejected: %v", name, err) } if !valid { requireError(t, err, "invalid FunctionName") } } checkName(t, "", false) checkName(t, "0foo", false) checkName(t, "0foo\nfoo", false) checkName(t, "foo_bar2", true) checkName(t, "a2", true) } func TestNoInline(t *testing.T) { elf, err := buildC([]bpf.Instruction{ bpf.RetConstant{Val: 1}, }, entryPoint, COpts{ FunctionName: "filter", NoInline: true, }) if err != nil { t.Fatal(err) } spec, err := ebpf.LoadCollectionSpecFromReader(bytes.NewReader(elf)) if err != nil { t.Fatal(err) } if res := testProg(t, spec.Programs[entryPoint], []byte{1}); res != match { t.Fatalf("expected match, got %v", res) } } const entryPoint = "xdp_filter" // cBackend compiles classic BPF to C, which is compiled with clang func cBackend(tb testing.TB, insns []bpf.Instruction, in []byte) result { elf, err := buildC(insns, entryPoint, COpts{FunctionName: "filter"}) if err != nil { tb.Fatal(err) } // load ELF spec, err := ebpf.LoadCollectionSpecFromReader(bytes.NewReader(elf)) if err != nil { tb.Fatal(err) } return testProg(tb, spec.Programs[entryPoint], in) } golang-github-cloudflare-cbpfc-0.0~git20231012.992ed75/cbpfc.go000066400000000000000000000734031456326020600234340ustar00rootroot00000000000000// Package cbpfc implements a cBPF (classic BPF) to eBPF // (extended BPF, not be confused with cBPF extensions) compiler. // // cbpfc can compile cBPF filters to: // - C, which can be compiled to eBPF with Clang // - eBPF // // Both the C and eBPF output are intended to be accepted by the kernel verifier: // - All packet loads are guarded with runtime packet length checks // - RegA and RegX are zero initialized as required // - Division by zero is guarded by runtime checks // // The generated C / eBPF is intended to be embedded into a larger C / eBPF program. package cbpfc import ( "fmt" "sort" "github.com/pkg/errors" "golang.org/x/net/bpf" ) // maxPacketOffset is the maximum packet offset the verifier allows. // https://elixir.bootlin.com/linux/v5.14.8/source/kernel/bpf/verifier.c#L3223 const maxPacketOffset = 0xFFFF // Map conditionals to their inverse var condToInverse = map[bpf.JumpTest]bpf.JumpTest{ bpf.JumpEqual: bpf.JumpNotEqual, bpf.JumpNotEqual: bpf.JumpEqual, bpf.JumpGreaterThan: bpf.JumpLessOrEqual, bpf.JumpLessThan: bpf.JumpGreaterOrEqual, bpf.JumpGreaterOrEqual: bpf.JumpLessThan, bpf.JumpLessOrEqual: bpf.JumpGreaterThan, bpf.JumpBitsSet: bpf.JumpBitsNotSet, bpf.JumpBitsNotSet: bpf.JumpBitsSet, } // pos stores the absolute position of a cBPF instruction type pos uint // skips store cBPF jumps, which are relative type skip uint // instruction wraps a bpf instruction with it's // original position type instruction struct { bpf.Instruction id pos } func (i instruction) String() string { return fmt.Sprintf("%d: %v", i.id, i.Instruction) } // block contains a linear flow on instructions: // - Nothing jumps into the middle of a block // - Nothing jumps out of the middle of a block // // A block may start or end with any instruction, as any instruction // can be the target of a jump. // // A block also knows what blocks it jumps to. This forms a DAG of blocks. type block struct { // Should not be directly modified, instead copy instructions to new slice insns []instruction // Map of absolute instruction positions the last instruction // of this block can jump to, to the corresponding block jumps map[pos]*block // id of the instruction that started this block // Unique, but not guaranteed to match insns[0].id after blocks are modified id pos } // newBlock creates a block with copy of insns func newBlock(insns []instruction) *block { return &block{ insns: insns, jumps: make(map[pos]*block), id: insns[0].id, } } func (b *block) Label() string { return fmt.Sprintf("block_%d", b.id) } func (b *block) skipToPos(s skip) pos { return b.last().id + 1 + pos(s) } // Get the target block of a skip func (b *block) skipToBlock(s skip) *block { return b.jumps[b.skipToPos(s)] } func (b *block) last() instruction { return b.insns[len(b.insns)-1] } // packetGuard is a "fake" cBPF instruction // that checks packet bounds before data is read from the packet. type packetGuard interface { bpf.Instruction // Extend returns a guard that is the union of the current guard and o. extend(o packetGuard) packetGuard // Restrict returns a guard that is the intersection of the current guard and o. restrict(o packetGuard) packetGuard // Adjust any instructions that are covered by this guard as required. adjustInsns(insns []instruction) } // packetGuardAbsolute checks packet bounds for absolute packet loads (constant offset). // We only need to track the last / greatest byte read to ensure it isn't past the packet end. type packetGuardAbsolute struct { // The furthest (exclusive) byte read. end int32 } func newPacketGuardAbsolute(off uint32, size int) packetGuardAbsolute { if off > maxPacketOffset { panic("can't create absolute packet guard for offset") } // Absolute offsets are limited to maxPacketOffset so this can't overflow. return packetGuardAbsolute{int32(off) + int32(size)} } func (a packetGuardAbsolute) extend(o packetGuard) packetGuard { n := a if b := o.(packetGuardAbsolute); b.end > a.end { n.end = b.end } return n } func (a packetGuardAbsolute) restrict(o packetGuard) packetGuard { n := a if b := o.(packetGuardAbsolute); b.end < a.end { n.end = b.end } return n } // We don't need to adjust instructions for absolute guards. func (a packetGuardAbsolute) adjustInsns(insns []instruction) {} // Assemble implements the Instruction Assemble method. func (p packetGuardAbsolute) Assemble() (bpf.RawInstruction, error) { return bpf.RawInstruction{}, errors.Errorf("unsupported") } // packetGuardIndirect checks packet bounds for indirect packet loads (RegX + constant offset). // RegX and offset are both allowed to be negative, but RegX + Offset must be >= 0 (the verifier does not allow // adding negative offsets to packet pointers). // // This requires tracking both the first and last byte read (relative to RegX) to check: // - RegX + start >= 0 // - RegX + end < maxPacketOffset // - packet_start + RegX + end < packet_end // // Bounds / range information is propagated in the verifier by copying a packet pointer, // adding a constant (which yields a "derived" packet pointer with the same ID), and checking it against the packet_end. // Subsequent LoadIndirects that are covered by this guard need to use a packet pointer with same ID as the guard to // take advantage of the bounds. // Ideally we would use packet_start + RegX and let each LoadIndirect instruction add its own offset, // but the verifier doesn't allow the use of packet pointers with a negative offset (even if the offset // would make the read positive: https://elixir.bootlin.com/linux/v5.14.12/source/kernel/bpf/verifier.c#L3287) // // So instead we check: // - RegX + start >= 0 // - RegX + start < maxPacketOffset - length // - packet_start + RegX + start + length < packet_end // // This lets us reuse packet_start + RegX + start as the packet pointer for LoadIndirect, // but means we need to rewrite the offsets of LoadIndirect instructions covered by this guard to subtract length. type packetGuardIndirect struct { // First byte read (inclusive). start int32 // Last byte read (exclusive). // int64 to avoid overflows with INT32_MAX + size end int64 } func newPacketGuardIndirect(off uint32, size int) packetGuardIndirect { // cBPF offsets are uint32, but are signed in reality // LoadIndirect offsets are encoded as uint32 by x/net/bpf, but are signed in reality. // Unlike LoadAbsolute, restrictions only apply to RegX + Offset and not Offset alone, // so we have to allow INT32_MAX / INT32_MIN offsets. return packetGuardIndirect{ start: int32(off), end: int64(int32(off)) + int64(size), } } func (a packetGuardIndirect) extend(o packetGuard) packetGuard { b := o.(packetGuardIndirect) // A 0 guard means no guard, we shouldn't extend it to cover {0,0} if a == (packetGuardIndirect{}) { return b } if b == (packetGuardIndirect{}) { return a } n := a if b.start < a.start { n.start = b.start } if b.end > a.end { n.end = b.end } return n } func (a packetGuardIndirect) restrict(o packetGuard) packetGuard { b := o.(packetGuardIndirect) // A 0 guard means no guard, that restricts everything to no guard. if a == (packetGuardIndirect{}) || b == (packetGuardIndirect{}) { return packetGuardIndirect{} } n := a if b.start > a.start { n.start = b.start } if b.end < a.end { n.end = b.end } return n } // int32(RegX) + p.start must be < to maxStartOffset(). // This checks that it is positive, and int32(RegX) + p.end doesn't exceed maxPacketOffset. // Returns 0 (check will always be false) if there is no way for the start and end of the guard to be < maxPacketOffset. func (p packetGuardIndirect) maxStartOffset() int32 { length := p.end - int64(p.start) // If length exceeds maxPacketOffset, there's no way for RegX + start >= 0 and RegX + end < maxPacketOffset. // Return 0 so the check fails, and we return noMatch. if length > maxPacketOffset { return 0 } // +1 as it needs to be strictly less than. // This lets us return 0 above to get noMatch. return int32(maxPacketOffset) - int32(length) + 1 } // packet_start + (int32(x) + p.start) + p.length() must be <= packet_end. // This lets us reuse the (int32(x) + p.start) from the maxStartOffset() check, to keep the bounds info. func (p packetGuardIndirect) length() int32 { // This can overflow, but it doesn't matter as we'll already have checked maxStartOffset() // and caught the overflow there. return int32(p.end - int64(p.start)) } // Once we've determined the guard that applies for a given set of insns, // asjust the offsets so they're relative to the smallest / start of the guard. func (p packetGuardIndirect) adjustInsns(insns []instruction) { for i := range insns { switch insn := insns[i].Instruction.(type) { case bpf.LoadIndirect: insns[i].Instruction = bpf.LoadIndirect{ Off: uint32(int32(insn.Off) - p.start), Size: insn.Size, } } } } // Assemble implements the Instruction Assemble method. func (p packetGuardIndirect) Assemble() (bpf.RawInstruction, error) { return bpf.RawInstruction{}, errors.Errorf("unsupported") } // checksXNotZero is a "fake" instruction // that returns no match if X is 0 type checkXNotZero struct { } // Assemble implements the Instruction Assemble method. func (c checkXNotZero) Assemble() (bpf.RawInstruction, error) { return bpf.RawInstruction{}, errors.Errorf("unsupported") } // compile compiles a cBPF program to an ordered slice of blocks, with: // - Registers zero initialized as required // - Required packet access guards added // - JumpIf and JumpIfX instructions normalized (see normalizeJumps) func compile(insns []bpf.Instruction) ([]*block, error) { err := validateInstructions(insns) if err != nil { return nil, err } instructions := toInstructions(insns) normalizeJumps(instructions) // Split into blocks blocks, err := splitBlocks(instructions) if err != nil { return nil, errors.Wrapf(err, "unable to compute blocks") } // Initialize registers err = initializeMemory(blocks) if err != nil { return nil, err } // Check we don't divide by zero err = addDivideByZeroGuards(blocks) if err != nil { return nil, err } rewriteLargePacketOffsets(&blocks) // Guard packet loads addAbsolutePacketGuards(blocks) addIndirectPacketGuards(blocks) return blocks, nil } // validateInstructions checks the instructions are valid, and we support them func validateInstructions(insns []bpf.Instruction) error { // Can't do anything meaningful with no instructions if len(insns) == 0 { return errors.New("can't compile 0 instructions") } for pc, insn := range insns { // Assemble does some input validation _, err := insn.Assemble() if err != nil { return errors.Errorf("can't assemble instruction %d: %v", pc, insn) } switch i := insn.(type) { case bpf.RawInstruction: return errors.Errorf("unsupported instruction %d: %v", pc, insn) // Negative constant offsets are used for extensions (and if they're supported, x/net/bpf will parse them) // and other packet addressing modes we don't support: https://elixir.bootlin.com/linux/v5.14.10/source/kernel/bpf/core.c#L65 case bpf.LoadAbsolute: if int32(i.Off) < 0 { return errors.Errorf("LoadAbsolute negative offset %v", int32(i.Off)) } case bpf.LoadMemShift: if int32(i.Off) < 0 { return errors.Errorf("LoadMemShift negative offset %v", int32(i.Off)) } case bpf.LoadExtension: switch i.Num { case bpf.ExtLen: break default: return errors.Errorf("unsupported BPF extension %d: %v", pc, insn) } } } return nil } func toInstructions(insns []bpf.Instruction) []instruction { instructions := make([]instruction, len(insns)) for pc, insn := range insns { instructions[pc] = instruction{ Instruction: insn, id: pos(pc), } } return instructions } // normalizeJumps normalizes conditional jumps to always use skipTrue: // Jumps that only use skipTrue (skipFalse == 0) are unchanged. // Jumps that use both skipTrue and skipFalse are unchanged. // Jumps that only use skipFalse (skipTrue == 0) are inverted to only use skipTrue. func normalizeJumps(insns []instruction) { for pc := range insns { switch i := insns[pc].Instruction.(type) { case bpf.JumpIf: if !shouldInvert(i.SkipTrue, i.SkipFalse) { continue } insns[pc].Instruction = bpf.JumpIf{Cond: condToInverse[i.Cond], Val: i.Val, SkipTrue: i.SkipFalse, SkipFalse: i.SkipTrue} case bpf.JumpIfX: if !shouldInvert(i.SkipTrue, i.SkipFalse) { continue } insns[pc].Instruction = bpf.JumpIfX{Cond: condToInverse[i.Cond], SkipTrue: i.SkipFalse, SkipFalse: i.SkipTrue} } } } // Check if a conditional jump should be inverted func shouldInvert(skipTrue, skipFalse uint8) bool { return skipTrue == 0 && skipFalse != 0 } // Traverse instructions until end of first block. Target is absolute start of block. // Return block-relative jump targets func visitBlock(insns []instruction, target pos) (*block, []skip) { for pc, insn := range insns { // Relative jumps from this instruction var skips []skip switch i := insn.Instruction.(type) { case bpf.Jump: skips = []skip{skip(i.Skip)} case bpf.JumpIf: skips = []skip{skip(i.SkipTrue), skip(i.SkipFalse)} case bpf.JumpIfX: skips = []skip{skip(i.SkipTrue), skip(i.SkipFalse)} case bpf.RetA, bpf.RetConstant: // No extra targets to visit default: // Regular instruction, next please! continue } // every insn including this one return newBlock(insns[:pc+1]), skips } // Try to fall through to next block return newBlock(insns), []skip{0} } // splitBlocks splits the cBPF into an ordered list of blocks. // // The blocks are preserved in the order they are found as this guarantees that // a block only targets later blocks (cBPF jumps are positive, relative offsets). // This also mimics the layout of the original cBPF, which is good for debugging. func splitBlocks(instructions []instruction) ([]*block, error) { // Blocks we've visited already blocks := []*block{} // map of targets to blocks that target them // target 0 is for the base case targets := map[pos][]*block{ 0: nil, } // As long as we have un visited targets for len(targets) > 0 { sortedTargets := sortTargets(targets) // Get the first one (not really breadth first, but close enough!) target := sortedTargets[0] end := len(instructions) // If there's a next target, ensure we stop before it if len(sortedTargets) > 1 { end = int(sortedTargets[1]) } next, nextSkips := visitBlock(instructions[target:end], target) // Add skips to our list of things to visit for _, s := range nextSkips { // Convert relative skip to absolute pos t := next.skipToPos(s) if t >= pos(len(instructions)) { return nil, errors.Errorf("instruction %v flows past last instruction", next.last()) } targets[t] = append(targets[t], next) } jmpBlocks := targets[target] // Mark all the blocks that jump to the block we've just visited as doing so for _, jmpBlock := range jmpBlocks { jmpBlock.jumps[target] = next } blocks = append(blocks, next) // Target is now a block! delete(targets, target) } return blocks, nil } // sortTargets sorts the target positions (keys), lowest first func sortTargets(targets map[pos][]*block) []pos { keys := make([]pos, len(targets)) i := 0 for k := range targets { keys[i] = k i++ } sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] }) return keys } // addDivideByZeroGuards adds runtime guards / checks to ensure // the program returns no match when it would otherwise divide by zero. func addDivideByZeroGuards(blocks []*block) error { isDivision := func(op bpf.ALUOp) bool { return op == bpf.ALUOpDiv || op == bpf.ALUOpMod } // Is RegX known to be none 0 at the start of each block // We can't divide by RegA, only need to check RegX. xNotZero := make(map[*block]bool) for _, block := range blocks { notZero := xNotZero[block] // newInsns to replace those in the block newInsns := []instruction{} for _, insn := range block.insns { switch i := insn.Instruction.(type) { case bpf.ALUOpConstant: if isDivision(i.Op) && i.Val == 0 { return errors.Errorf("instruction %v divides by 0", insn) } case bpf.ALUOpX: if isDivision(i.Op) && !notZero { newInsns = append(newInsns, instruction{Instruction: checkXNotZero{}}) notZero = true } } newInsns = append(newInsns, insn) // check if X clobbered - check is invalidated if memWrites(insn.Instruction).regs[bpf.RegX] { notZero = false } } block.insns = newInsns // update the status of every block this one jumps to for _, target := range block.jumps { targetNotZero, ok := xNotZero[target] if !ok { xNotZero[target] = notZero continue } // x needs to be not zero from every possible path xNotZero[target] = targetNotZero && notZero } } return nil } // rewriteLargePacketOffsets replaces packet loads that have constant offsets // greater than the verifier allows with return 0 (no match) to mimick // what the kernel does for cBPF. // While cBPF allows bigger offsets, in practice they cannot match a packet. // This doesn't work for LoadIndirect as the actual offset is LoadIndirect.Off + RegX, // we instead rely on runtime checks (see packetGuardIndirect). func rewriteLargePacketOffsets(blocks *[]*block) { // All blocks are reachable when we start. // But some blocks can become unreachable once we've rewritten load instructions to returns. // The verifier rejects unreachable instructions, track how many other blocks go to a given block // so we can remove newly unreachable blocks. blockRefs := make(map[*block]int) var newBlocks []*block for i, block := range *blocks { // No other blocks jump into this block anymore, skip it. if i != 0 && blockRefs[block] == 0 { continue } newBlocks = append(newBlocks, block) for _, insn := range block.insns { var ( offset uint32 size int ) // LoadIndirect is handled by runtime checks as only RegX + offset is subject to maxPacketOffset. switch i := insn.Instruction.(type) { case bpf.LoadAbsolute: offset = i.Off size = i.Size case bpf.LoadMemShift: offset = i.Off size = 1 default: continue } // A packetGuard will have to add size to the packet pointer, so it counts towards the limit. // We've already validate offset isn't signed, so this can't overflow. if offset+uint32(size) > maxPacketOffset { // Mimick an out of bounds load in cBPF, returning 0 / no match. // The block now unconditionally returns, the other instructions in it don't matter. block.insns = []instruction{ {Instruction: bpf.RetConstant{Val: 0}}, } // This block doesn't jump to any others anymore. block.jumps = nil break } } // cBPF can't jump backwards, so we can build this up as we go. for _, target := range block.jumps { blockRefs[target]++ } } *blocks = newBlocks } // addAbsolutePacketGuard adds required packet guards for absolute packet accesses to blocks. func addAbsolutePacketGuards(blocks []*block) { addPacketGuards(blocks, packetGuardOpts{ requiredGuard: func(insns []instruction) requiredGuard { var biggestGuard packetGuard = packetGuardAbsolute{} for _, insn := range insns { switch i := insn.Instruction.(type) { case bpf.LoadAbsolute: biggestGuard = biggestGuard.extend(newPacketGuardAbsolute(i.Off, i.Size)) case bpf.LoadMemShift: biggestGuard = biggestGuard.extend(newPacketGuardAbsolute(i.Off, 1)) } } // Guard covers all instructions. return requiredGuard{ guard: biggestGuard, alwaysValid: true, } }, zeroGuard: func() packetGuard { return packetGuardAbsolute{} }, }) } // addIndirectPacketGuard adds required packet guards for indirect packet accesses to blocks. func addIndirectPacketGuards(blocks []*block) { addPacketGuards(blocks, packetGuardOpts{ requiredGuard: func(insns []instruction) requiredGuard { var ( insnCount int biggestGuard packetGuard = packetGuardIndirect{} ) for _, insn := range insns { insnCount++ switch i := insn.Instruction.(type) { case bpf.LoadIndirect: biggestGuard = biggestGuard.extend(newPacketGuardIndirect(i.Off, i.Size)) } // Check if we clobbered x - this invalidates the guard if memWrites(insn.Instruction).regs[bpf.RegX] { return requiredGuard{ guard: biggestGuard, validForInsns: insnCount, } } } return requiredGuard{ guard: biggestGuard, alwaysValid: true, } }, zeroGuard: func() packetGuard { return packetGuardIndirect{} }, }) } type packetGuardOpts struct { // requiredGuard returns the packetGuard needed by insns, and what insns it is valid for. requiredGuard func(insns []instruction) requiredGuard // zeroGuard returns an empty guard of the right type. zeroGuard func() packetGuard } type requiredGuard struct { guard packetGuard // The guard covers all the requested instructions, // and is still valid afterwards. alwaysValid bool // The guard covers n instructions, // and isn't valid for the subsequent n+1: instructions (eg RegX was clobbered for indirect guards). validForInsns int } // addPacketGuards adds packet guards as required. // // Traversing the DAG of blocks (by visiting the blocks a block jumps to), // we know all packet guards that exist at the start of a given block. // We can check if the block requires a longer / bigger guard than // the shortest / least existing guard. func addPacketGuards(blocks []*block, opts packetGuardOpts) { // Guards in effect at the start of each block // Can't jump backwards so we only need to traverse blocks once guards := make(map[*block][]packetGuard) for _, block := range blocks { blockGuard := addBlockGuards(block, leastGuard(opts.zeroGuard(), guards[block]), opts) for _, target := range block.jumps { guards[target] = append(guards[target], blockGuard) } } } // addBlockGuards add the guards required for the instructions in block. func addBlockGuards(block *block, currentGuard packetGuard, opts packetGuardOpts) packetGuard { insns := block.insns block.insns = nil for len(insns) != 0 { required := opts.requiredGuard(insns) // Need a bigger guard for these insns. Don't use the bigger guard on it's own, // extend the current one so we keep as much information as we have. if newGuard := currentGuard.extend(required.guard); newGuard != currentGuard { currentGuard = newGuard // Last guard we need for this block -> what our children / target blocks will start with if required.alwaysValid { // If packets must go through a bigger guard (guaranteed guard) to match, we can use the guaranteed guard here, // without changing the return value of the program: // - packets smaller than the guaranteed guard cannot match anyways, we can safely reject them earlier // - packets bigger than the guaranteed guard won't be affected by it currentGuard = currentGuard.extend(guaranteedGuard(block.jumps, opts)) } block.insns = append(block.insns, instruction{Instruction: currentGuard}) } coveredInsns := insns if !required.alwaysValid { coveredInsns = insns[:required.validForInsns] } currentGuard.adjustInsns(coveredInsns) block.insns = append(block.insns, coveredInsns...) if required.alwaysValid { // Guard covers remainder of block, and is still valid at the end. return currentGuard } else { // Guard isn't valid anymore. currentGuard = opts.zeroGuard() insns = insns[required.validForInsns:] } } return currentGuard } // guaranteedGuard performs a recursive depth first search of blocks in target to determine // the greatest packet guard that must be made for a packet to match // // If the DAG of blocks needs these packet guards: // // [4] // / \ // false [6] // / \ // true [8] // / \ // false true // // A packet can only match ("true") by going through guards 4 and 6. It does not have to go through guard 8. // guaranteedGuard would return 6. func guaranteedGuard(targets map[pos]*block, opts packetGuardOpts) packetGuard { // Inner implementation - Uses memoization return guaranteedGuardCached(targets, opts, make(map[*block]packetGuard)) } // 'cache' is used in order to not calculate guard more than once for the same block. func guaranteedGuardCached(targets map[pos]*block, opts packetGuardOpts, cache map[*block]packetGuard) packetGuard { targetGuards := []packetGuard{} for _, target := range targets { // Block can't match the packet, ignore it if blockNeverMatches(target) { continue } if guard, ok := cache[target]; ok { targetGuards = append(targetGuards, guard) continue } required := opts.requiredGuard(target.insns) // Guard invalidated by block, stop exploring if !required.alwaysValid { targetGuards = append(targetGuards, required.guard) continue } guard := required.guard.extend(guaranteedGuardCached(target.jumps, opts, cache)) cache[target] = guard targetGuards = append(targetGuards, guard) } return leastGuard(opts.zeroGuard(), targetGuards) } // leastGuard returns the smallest guard from guards. // zero if there are no guards. func leastGuard(zero packetGuard, guards []packetGuard) packetGuard { least := zero for i, guard := range guards { if i == 0 { least = guard } else { least = least.restrict(guard) } } return least } // blockNeverMatches returns true IFF the insns in block will never match the input packet func blockNeverMatches(block *block) bool { for _, insn := range block.insns { switch i := insn.Instruction.(type) { case bpf.RetConstant: if i.Val == 0 { return true } } } return false } // memStatus represents a context defined status of registers & scratch type memStatus struct { // indexed by bpf.Register regs [2]bool scratch [16]bool } // merge merges this status with the other by applying policy to regs and scratch func (r memStatus) merge(other memStatus, policy func(this, other bool) bool) memStatus { newStatus := memStatus{} for i := range newStatus.regs { newStatus.regs[i] = policy(r.regs[i], other.regs[i]) } for i := range newStatus.scratch { newStatus.scratch[i] = policy(r.scratch[i], other.scratch[i]) } return newStatus } // and merges this status with the other by logical AND func (r memStatus) and(other memStatus) memStatus { return r.merge(other, func(this, other bool) bool { return this && other }) } // and merges this status with the other by logical OR func (r memStatus) or(other memStatus) memStatus { return r.merge(other, func(this, other bool) bool { return this || other }) } // initializeMemory zero initializes all the registers that the BPF program reads from before writing to. Returns an error if any scratch memory is used uninitialized. func initializeMemory(blocks []*block) error { // memory initialized at the start of each block statuses := make(map[*block]memStatus) // uninitialized memory used so far uninitialized := memStatus{} for _, block := range blocks { status := statuses[block] for _, insn := range block.insns { insnUninitialized := memUninitializedReads(insn.Instruction, status) // Check no uninitialized scratch registers are read for scratch, uninit := range insnUninitialized.scratch { if uninit { return errors.Errorf("instruction %v reads potentially uninitialized scratch register M[%d]", insn, scratch) } } uninitialized = uninitialized.or(insnUninitialized) status = status.or(memWrites(insn.Instruction)) } // update the status of every block this one jumps to for _, target := range block.jumps { targetStatus, ok := statuses[target] if !ok { statuses[target] = status continue } // memory needs to be initialized from every possible path statuses[target] = targetStatus.and(status) } } // new instructions we need to prepend to initialize uninitialized registers initInsns := []instruction{} for reg, uninit := range uninitialized.regs { if !uninit { continue } initInsns = append(initInsns, instruction{ Instruction: bpf.LoadConstant{ Dst: bpf.Register(reg), Val: 0, }, }) } blocks[0].insns = append(initInsns, blocks[0].insns...) return nil } // memUninitializedReads returns the memory read by insn that has not yet been initialized according to initialized. func memUninitializedReads(insn bpf.Instruction, initialized memStatus) memStatus { return memReads(insn).merge(initialized, func(read, init bool) bool { return read && !init }) } // memReads returns the memory read by insn func memReads(insn bpf.Instruction) memStatus { read := memStatus{} switch i := insn.(type) { case bpf.ALUOpConstant: read.regs[bpf.RegA] = true case bpf.ALUOpX: read.regs[bpf.RegA] = true read.regs[bpf.RegX] = true case bpf.JumpIf: read.regs[bpf.RegA] = true case bpf.JumpIfX: read.regs[bpf.RegA] = true read.regs[bpf.RegX] = true case bpf.LoadIndirect: read.regs[bpf.RegX] = true case bpf.LoadScratch: read.scratch[i.N] = true case bpf.NegateA: read.regs[bpf.RegA] = true case bpf.RetA: read.regs[bpf.RegA] = true case bpf.StoreScratch: read.regs[i.Src] = true case bpf.TAX: read.regs[bpf.RegA] = true case bpf.TXA: read.regs[bpf.RegX] = true } return read } // memWrites returns the memory written by insn func memWrites(insn bpf.Instruction) memStatus { write := memStatus{} switch i := insn.(type) { case bpf.ALUOpConstant: write.regs[bpf.RegA] = true case bpf.ALUOpX: write.regs[bpf.RegA] = true case bpf.LoadAbsolute: write.regs[bpf.RegA] = true case bpf.LoadConstant: write.regs[i.Dst] = true case bpf.LoadExtension: write.regs[bpf.RegA] = true case bpf.LoadIndirect: write.regs[bpf.RegA] = true case bpf.LoadMemShift: write.regs[bpf.RegX] = true case bpf.LoadScratch: write.regs[i.Dst] = true case bpf.NegateA: write.regs[bpf.RegA] = true case bpf.StoreScratch: write.scratch[i.N] = true case bpf.TAX: write.regs[bpf.RegX] = true case bpf.TXA: write.regs[bpf.RegA] = true } return write } golang-github-cloudflare-cbpfc-0.0~git20231012.992ed75/cbpfc_test.go000066400000000000000000001171161456326020600244730ustar00rootroot00000000000000package cbpfc import ( "reflect" "strings" "testing" "golang.org/x/net/bpf" ) // requireError ensures an error is not nil, and it contains contains. func requireError(tb testing.TB, err error, contains string) { tb.Helper() if err == nil { tb.Fatalf("expected error %s", contains) } if !strings.Contains(err.Error(), contains) { tb.Fatalf("error %v does not contain %s", err, contains) } } // Make sure we bail out with 0 instructions func TestZero(t *testing.T) { _, err := compile([]bpf.Instruction{}) requireError(t, err, "can't compile 0 instructions") } func TestRaw(t *testing.T) { _, err := compile([]bpf.Instruction{ bpf.RawInstruction{}, }) requireError(t, err, "unsupported instruction 0:") } // Absolute / constant loads can't use negative offsets, they're for extensions. func TestLoadAbsoluteNegativeOffset(t *testing.T) { off := (^uint32(1)) + 1 // -1 for _, insn := range []bpf.Instruction{ bpf.LoadAbsolute{Off: off, Size: 1}, bpf.LoadMemShift{Off: off}, } { _, err := compile([]bpf.Instruction{ insn, bpf.RetA{}, }) requireError(t, err, "negative offset -1") } } func TestExtension(t *testing.T) { // No extensions > 256 right now for i := 0; i < 256; i++ { ext := bpf.Extension(i) _, err := compile([]bpf.Instruction{ bpf.LoadExtension{Num: ext}, bpf.RetA{}, }) switch ext { case bpf.ExtLen: if err != nil { t.Fatal("ExtLen not accepted", err) } default: requireError(t, err, "unsupported BPF extension 0:") } } } // Test out of bound jumps func TestJumpOut(t *testing.T) { _, err := compile([]bpf.Instruction{ bpf.LoadConstant{Dst: bpf.RegX, Val: 0}, bpf.Jump{Skip: 0}, }) requireError(t, err, "instruction 1: ja 0 flows past last instruction") } func TestJumpIfOut(t *testing.T) { _, err := compile([]bpf.Instruction{ bpf.LoadConstant{Dst: bpf.RegA, Val: 0}, bpf.JumpIf{Cond: bpf.JumpEqual, Val: 2, SkipTrue: 0, SkipFalse: 1}, }) requireError(t, err, "instruction 1: jneq #2,1 flows past last instruction") } func TestJumpIfXOut(t *testing.T) { _, err := compile([]bpf.Instruction{ bpf.LoadConstant{Dst: bpf.RegA, Val: 0}, bpf.LoadConstant{Dst: bpf.RegX, Val: 3}, bpf.JumpIfX{Cond: bpf.JumpEqual, SkipTrue: 1, SkipFalse: 0}, }) requireError(t, err, "instruction 2: jeq x,1 flows past last instruction") } // Out of bounds fall through - last block doesn't end in return func TestFallthroughOut(t *testing.T) { _, err := compile([]bpf.Instruction{ bpf.LoadConstant{Dst: bpf.RegA, Val: 0}, }) requireError(t, err, "instruction 0: ld #0 flows past last instruction") } // Jump normalization func TestNormalizeJumps(t *testing.T) { insns := func(skipTrue, skipFalse uint8) []instruction { return toInstructions([]bpf.Instruction{ bpf.JumpIf{Cond: bpf.JumpEqual, SkipTrue: skipTrue, SkipFalse: skipFalse}, bpf.JumpIfX{Cond: bpf.JumpEqual, SkipTrue: skipTrue, SkipFalse: skipFalse}, bpf.JumpIf{Cond: bpf.JumpNotEqual, SkipTrue: skipTrue, SkipFalse: skipFalse}, bpf.JumpIfX{Cond: bpf.JumpNotEqual, SkipTrue: skipTrue, SkipFalse: skipFalse}, bpf.JumpIf{Cond: bpf.JumpGreaterThan, SkipTrue: skipTrue, SkipFalse: skipFalse}, bpf.JumpIfX{Cond: bpf.JumpGreaterThan, SkipTrue: skipTrue, SkipFalse: skipFalse}, bpf.JumpIf{Cond: bpf.JumpLessThan, SkipTrue: skipTrue, SkipFalse: skipFalse}, bpf.JumpIfX{Cond: bpf.JumpLessThan, SkipTrue: skipTrue, SkipFalse: skipFalse}, bpf.JumpIf{Cond: bpf.JumpGreaterOrEqual, SkipTrue: skipTrue, SkipFalse: skipFalse}, bpf.JumpIfX{Cond: bpf.JumpGreaterOrEqual, SkipTrue: skipTrue, SkipFalse: skipFalse}, bpf.JumpIf{Cond: bpf.JumpLessOrEqual, SkipTrue: skipTrue, SkipFalse: skipFalse}, bpf.JumpIfX{Cond: bpf.JumpLessOrEqual, SkipTrue: skipTrue, SkipFalse: skipFalse}, bpf.JumpIf{Cond: bpf.JumpBitsSet, SkipTrue: skipTrue, SkipFalse: skipFalse}, bpf.JumpIfX{Cond: bpf.JumpBitsSet, SkipTrue: skipTrue, SkipFalse: skipFalse}, bpf.JumpIf{Cond: bpf.JumpBitsNotSet, SkipTrue: skipTrue, SkipFalse: skipFalse}, bpf.JumpIfX{Cond: bpf.JumpBitsNotSet, SkipTrue: skipTrue, SkipFalse: skipFalse}, }) } // same insns, but with the conditions inverted invertedInsns := func(skipTrue, skipFalse uint8) []instruction { return toInstructions([]bpf.Instruction{ bpf.JumpIf{Cond: bpf.JumpNotEqual, SkipTrue: skipTrue, SkipFalse: skipFalse}, bpf.JumpIfX{Cond: bpf.JumpNotEqual, SkipTrue: skipTrue, SkipFalse: skipFalse}, bpf.JumpIf{Cond: bpf.JumpEqual, SkipTrue: skipTrue, SkipFalse: skipFalse}, bpf.JumpIfX{Cond: bpf.JumpEqual, SkipTrue: skipTrue, SkipFalse: skipFalse}, bpf.JumpIf{Cond: bpf.JumpLessOrEqual, SkipTrue: skipTrue, SkipFalse: skipFalse}, bpf.JumpIfX{Cond: bpf.JumpLessOrEqual, SkipTrue: skipTrue, SkipFalse: skipFalse}, bpf.JumpIf{Cond: bpf.JumpGreaterOrEqual, SkipTrue: skipTrue, SkipFalse: skipFalse}, bpf.JumpIfX{Cond: bpf.JumpGreaterOrEqual, SkipTrue: skipTrue, SkipFalse: skipFalse}, bpf.JumpIf{Cond: bpf.JumpLessThan, SkipTrue: skipTrue, SkipFalse: skipFalse}, bpf.JumpIfX{Cond: bpf.JumpLessThan, SkipTrue: skipTrue, SkipFalse: skipFalse}, bpf.JumpIf{Cond: bpf.JumpGreaterThan, SkipTrue: skipTrue, SkipFalse: skipFalse}, bpf.JumpIfX{Cond: bpf.JumpGreaterThan, SkipTrue: skipTrue, SkipFalse: skipFalse}, bpf.JumpIf{Cond: bpf.JumpBitsNotSet, SkipTrue: skipTrue, SkipFalse: skipFalse}, bpf.JumpIfX{Cond: bpf.JumpBitsNotSet, SkipTrue: skipTrue, SkipFalse: skipFalse}, bpf.JumpIf{Cond: bpf.JumpBitsSet, SkipTrue: skipTrue, SkipFalse: skipFalse}, bpf.JumpIfX{Cond: bpf.JumpBitsSet, SkipTrue: skipTrue, SkipFalse: skipFalse}, }) } check := func(t *testing.T, input []instruction, expected []instruction) { normalizeJumps(input) if !reflect.DeepEqual(input, expected) { t.Fatalf("\nGot:\n%v\n\nExpected:\n%v", input, expected) } } // skipTrue only - no change check(t, insns(1, 0), insns(1, 0)) // skipFalse & skipTrue - no change check(t, insns(1, 3), insns(1, 3)) // skipFalse only - inverted check(t, insns(0, 3), invertedInsns(3, 0)) } // instruction read / writes func TestInstructionReadsRegA(t *testing.T) { checkMemoryStatus(t, map[bpf.Instruction]bool{ bpf.ALUOpConstant{}: true, bpf.ALUOpX{}: true, bpf.Jump{}: false, bpf.JumpIf{}: true, bpf.JumpIfX{}: true, bpf.LoadAbsolute{}: false, bpf.LoadConstant{Dst: bpf.RegA}: false, bpf.LoadConstant{Dst: bpf.RegX}: false, bpf.LoadExtension{}: false, bpf.LoadIndirect{}: false, bpf.LoadMemShift{}: false, bpf.LoadScratch{Dst: bpf.RegA}: false, bpf.LoadScratch{Dst: bpf.RegX}: false, bpf.NegateA{}: true, bpf.RetA{}: true, bpf.RetConstant{}: false, bpf.StoreScratch{Src: bpf.RegA}: true, bpf.StoreScratch{Src: bpf.RegX}: false, bpf.TAX{}: true, bpf.TXA{}: false, }, func(insn bpf.Instruction) bool { return memReads(insn).regs[bpf.RegA] }) } func TestInstructionWritesRegA(t *testing.T) { checkMemoryStatus(t, map[bpf.Instruction]bool{ bpf.ALUOpConstant{}: true, bpf.ALUOpX{}: true, bpf.Jump{}: false, bpf.JumpIf{}: false, bpf.JumpIfX{}: false, bpf.LoadAbsolute{}: true, bpf.LoadConstant{Dst: bpf.RegA}: true, bpf.LoadConstant{Dst: bpf.RegX}: false, bpf.LoadExtension{}: true, bpf.LoadIndirect{}: true, bpf.LoadMemShift{}: false, bpf.LoadScratch{Dst: bpf.RegA}: true, bpf.LoadScratch{Dst: bpf.RegX}: false, bpf.NegateA{}: true, bpf.RetA{}: false, bpf.RetConstant{}: false, bpf.StoreScratch{Src: bpf.RegA}: false, bpf.StoreScratch{Src: bpf.RegX}: false, bpf.TAX{}: false, bpf.TXA{}: true, }, func(insn bpf.Instruction) bool { return memWrites(insn).regs[bpf.RegA] }) } func TestInstructionReadsRegX(t *testing.T) { checkMemoryStatus(t, map[bpf.Instruction]bool{ bpf.ALUOpConstant{}: false, bpf.ALUOpX{}: true, bpf.Jump{}: false, bpf.JumpIf{}: false, bpf.JumpIfX{}: true, bpf.LoadAbsolute{}: false, bpf.LoadConstant{Dst: bpf.RegA}: false, bpf.LoadConstant{Dst: bpf.RegX}: false, bpf.LoadExtension{}: false, bpf.LoadIndirect{}: true, bpf.LoadMemShift{}: false, bpf.LoadScratch{Dst: bpf.RegA}: false, bpf.LoadScratch{Dst: bpf.RegX}: false, bpf.NegateA{}: false, bpf.RetA{}: false, bpf.RetConstant{}: false, bpf.StoreScratch{Src: bpf.RegA}: false, bpf.StoreScratch{Src: bpf.RegX}: true, bpf.TAX{}: false, bpf.TXA{}: true, }, func(insn bpf.Instruction) bool { return memReads(insn).regs[bpf.RegX] }) } func TestInstructionWritesRegX(t *testing.T) { checkMemoryStatus(t, map[bpf.Instruction]bool{ bpf.ALUOpConstant{}: false, bpf.ALUOpX{}: false, bpf.Jump{}: false, bpf.JumpIf{}: false, bpf.JumpIfX{}: false, bpf.LoadAbsolute{}: false, bpf.LoadConstant{Dst: bpf.RegA}: false, bpf.LoadConstant{Dst: bpf.RegX}: true, bpf.LoadExtension{}: false, bpf.LoadIndirect{}: false, bpf.LoadMemShift{}: true, bpf.LoadScratch{Dst: bpf.RegA}: false, bpf.LoadScratch{Dst: bpf.RegX}: true, bpf.NegateA{}: false, bpf.RetA{}: false, bpf.RetConstant{}: false, bpf.StoreScratch{Src: bpf.RegA}: false, bpf.StoreScratch{Src: bpf.RegX}: false, bpf.TAX{}: true, bpf.TXA{}: false, }, func(insn bpf.Instruction) bool { return memWrites(insn).regs[bpf.RegX] }) } func TestInstructionReadsScratch(t *testing.T) { checkMemoryStatus(t, map[bpf.Instruction]bool{ bpf.ALUOpConstant{}: false, bpf.ALUOpX{}: false, bpf.Jump{}: false, bpf.JumpIf{}: false, bpf.JumpIfX{}: false, bpf.LoadAbsolute{}: false, bpf.LoadConstant{Dst: bpf.RegA}: false, bpf.LoadConstant{Dst: bpf.RegX}: false, bpf.LoadExtension{}: false, bpf.LoadIndirect{}: false, bpf.LoadMemShift{}: false, bpf.LoadScratch{Dst: bpf.RegA, N: 3}: true, bpf.LoadScratch{Dst: bpf.RegX, N: 3}: true, bpf.NegateA{}: false, bpf.RetA{}: false, bpf.RetConstant{}: false, bpf.StoreScratch{Src: bpf.RegA, N: 3}: false, bpf.StoreScratch{Src: bpf.RegX, N: 3}: false, bpf.TAX{}: false, bpf.TXA{}: false, }, func(insn bpf.Instruction) bool { return memReads(insn).scratch[3] }) } func TestInstructionWritesScratch(t *testing.T) { checkMemoryStatus(t, map[bpf.Instruction]bool{ bpf.ALUOpConstant{}: false, bpf.ALUOpX{}: false, bpf.Jump{}: false, bpf.JumpIf{}: false, bpf.JumpIfX{}: false, bpf.LoadAbsolute{}: false, bpf.LoadConstant{Dst: bpf.RegA}: false, bpf.LoadConstant{Dst: bpf.RegX}: false, bpf.LoadExtension{}: false, bpf.LoadIndirect{}: false, bpf.LoadMemShift{}: false, bpf.LoadScratch{Dst: bpf.RegA, N: 3}: false, bpf.LoadScratch{Dst: bpf.RegX, N: 3}: false, bpf.NegateA{}: false, bpf.RetA{}: false, bpf.RetConstant{}: false, bpf.StoreScratch{Src: bpf.RegA, N: 3}: true, bpf.StoreScratch{Src: bpf.RegX, N: 3}: true, bpf.TAX{}: false, bpf.TXA{}: false, }, func(insn bpf.Instruction) bool { return memWrites(insn).scratch[3] }) } func checkMemoryStatus(t *testing.T, expected map[bpf.Instruction]bool, test func(bpf.Instruction) bool) { t.Helper() for insn, value := range expected { if test(insn) != value { t.Fatalf("Instruction %v expected %v got %v", insn, value, test(insn)) } } } // reg uninitialized and used in one block func TestUninitializedReg(t *testing.T) { insns := toInstructions([]bpf.Instruction{ // block 0 /* 0 */ bpf.RetA{}, }) blocks := mustSplitBlocks(t, 1, insns) err := initializeMemory(blocks) if err != nil { t.Fatal(err) } matchBlock(t, blocks[0], join( []instruction{{Instruction: bpf.LoadConstant{Dst: bpf.RegA, Val: 0}}}, insns, ), nil) } // reg initialized in one branch, but not the other func TestPartiallyUninitializedReg(t *testing.T) { insns := toInstructions([]bpf.Instruction{ // block 0 /* 0 */ bpf.LoadConstant{Dst: bpf.RegA, Val: 3}, /* 1 */ bpf.JumpIf{Cond: bpf.JumpEqual, Val: 3, SkipTrue: 0, SkipFalse: 1}, // jump to block 1 or 2 // block 1 /* 2 */ bpf.TAX{}, // initialize RegX // fall through to block 2 // block 2 /* 3 */ bpf.TXA{}, // RegX used potentially uninitialized /* 4 */ bpf.RetA{}, }) blocks := mustSplitBlocks(t, 3, insns) err := initializeMemory(blocks) if err != nil { t.Fatal(err) } matchBlock(t, blocks[0], join( []instruction{{Instruction: bpf.LoadConstant{Dst: bpf.RegX, Val: 0}}}, insns[:2], ), nil) matchBlock(t, blocks[1], insns[2:3], nil) matchBlock(t, blocks[2], insns[3:], nil) } // scratch reg uninitialized and used in one block func TestUninitializedScratch(t *testing.T) { insns := toInstructions([]bpf.Instruction{ // block 0 /* 0 */ bpf.LoadScratch{Dst: bpf.RegA, N: 2}, /* 1 */ bpf.RetA{}, }) blocks := mustSplitBlocks(t, 1, insns) requireError(t, initializeMemory(blocks), "instruction 0: ld M[2] reads potentially uninitialized scratch register M[2]") } // scratch reg initialized in one branch, but not the other func TestPartiallyUninitializedScratch(t *testing.T) { insns := toInstructions([]bpf.Instruction{ // block 0 /* 0 */ bpf.LoadConstant{Dst: bpf.RegA, Val: 3}, /* 1 */ bpf.JumpIf{Cond: bpf.JumpEqual, Val: 3, SkipTrue: 0, SkipFalse: 1}, // jump to block 1 or 2 // block 1 /* 2 */ bpf.StoreScratch{Src: bpf.RegA, N: 5}, // initialize m[2] // fall through to block 2 // block 2 /* 3 */ bpf.LoadScratch{Dst: bpf.RegA, N: 5}, /* 4 */ bpf.RetA{}, }) blocks := mustSplitBlocks(t, 3, insns) requireError(t, initializeMemory(blocks), "instruction 3: ld M[5] reads potentially uninitialized scratch register M[5]") } // Test block splitting func TestBlocksJump(t *testing.T) { insns := toInstructions([]bpf.Instruction{ /* 0 */ bpf.LoadConstant{Dst: bpf.RegX, Val: 3}, /* 1 */ bpf.Jump{Skip: 1}, /* 2 */ bpf.RetConstant{Val: 0}, // unreachable /* 3 */ bpf.RetConstant{Val: 1}, }) blocks := mustSplitBlocks(t, 2, insns) // Unreachable code will never make it into a block matchBlock(t, blocks[0], insns[:2], map[pos]*block{3: blocks[1]}) matchBlock(t, blocks[1], insns[3:], map[pos]*block{}) } func TestBlocksJumpIf(t *testing.T) { insns := toInstructions([]bpf.Instruction{ /* 0 */ bpf.LoadConstant{Dst: bpf.RegA, Val: 0}, /* 1 */ bpf.JumpIf{Cond: bpf.JumpEqual, Val: 3, SkipTrue: 1, SkipFalse: 0}, /* 2 */ bpf.RetConstant{Val: 0}, /* 3 */ bpf.RetConstant{Val: 1}, }) blocks := mustSplitBlocks(t, 3, insns) matchBlock(t, blocks[0], insns[0:2], map[pos]*block{2: blocks[1], 3: blocks[2]}) matchBlock(t, blocks[1], insns[2:3], map[pos]*block{}) matchBlock(t, blocks[2], insns[3:4], map[pos]*block{}) } func TestBlocksJumpIfX(t *testing.T) { insns := toInstructions([]bpf.Instruction{ /* 0 */ bpf.LoadConstant{Dst: bpf.RegA, Val: 0}, /* 1 */ bpf.LoadConstant{Dst: bpf.RegX, Val: 3}, /* 2 */ bpf.JumpIfX{Cond: bpf.JumpEqual, SkipTrue: 1, SkipFalse: 0}, /* 3 */ bpf.RetConstant{Val: 0}, /* 4 */ bpf.RetConstant{Val: 1}, }) blocks := mustSplitBlocks(t, 3, insns) matchBlock(t, blocks[0], insns[0:3], map[pos]*block{3: blocks[1], 4: blocks[2]}) matchBlock(t, blocks[1], insns[3:4], map[pos]*block{}) matchBlock(t, blocks[2], insns[4:5], map[pos]*block{}) } // Division by constant 0 func TestDivisionByZeroImm(t *testing.T) { test := func(t *testing.T, op bpf.ALUOp) { t.Helper() blocks := mustSplitBlocks(t, 1, toInstructions([]bpf.Instruction{ bpf.ALUOpConstant{Op: op, Val: 0}, bpf.RetConstant{}, })) requireError(t, addDivideByZeroGuards(blocks), "divides by 0") } test(t, bpf.ALUOpDiv) test(t, bpf.ALUOpMod) } // Division by RegX func TestDivisionByZeroX(t *testing.T) { test := func(t *testing.T, op bpf.ALUOp) { t.Helper() insns := toInstructions([]bpf.Instruction{ bpf.LoadAbsolute{Size: 1, Off: 0}, bpf.TXA{}, bpf.ALUOpX{Op: op}, bpf.RetConstant{}, }) blocks := mustSplitBlocks(t, 1, insns) err := addDivideByZeroGuards(blocks) if err != nil { t.Fatal(err) } matchBlock(t, blocks[0], join( insns[:2], []instruction{{Instruction: checkXNotZero{}}}, insns[2:], ), nil) } test(t, bpf.ALUOpDiv) test(t, bpf.ALUOpMod) } // Division by RegX twice in same block func TestDivisionByZeroXTwice(t *testing.T) { test := func(t *testing.T, op bpf.ALUOp) { t.Helper() insns := toInstructions([]bpf.Instruction{ bpf.LoadAbsolute{Size: 1, Off: 0}, bpf.TXA{}, bpf.ALUOpX{Op: op}, bpf.ALUOpX{Op: op}, bpf.RetConstant{}, }) blocks := mustSplitBlocks(t, 1, insns) err := addDivideByZeroGuards(blocks) if err != nil { t.Fatal(err) } matchBlock(t, blocks[0], join( insns[:2], []instruction{{Instruction: checkXNotZero{}}}, insns[2:], ), nil) } test(t, bpf.ALUOpDiv) test(t, bpf.ALUOpMod) } // Division by RegX after RegX clobbered func TestDivisionByZeroXConstant(t *testing.T) { test := func(t *testing.T, op bpf.ALUOp) { t.Helper() insns := toInstructions([]bpf.Instruction{ bpf.LoadAbsolute{Size: 1, Off: 0}, bpf.TXA{}, bpf.ALUOpX{Op: op}, bpf.LoadConstant{Dst: bpf.RegX}, // Clobber X bpf.ALUOpX{Op: op}, bpf.RetConstant{}, }) blocks := mustSplitBlocks(t, 1, insns) err := addDivideByZeroGuards(blocks) if err != nil { t.Fatal(err) } matchBlock(t, blocks[0], join( insns[:2], []instruction{{Instruction: checkXNotZero{}}}, insns[2:4], []instruction{{Instruction: checkXNotZero{}}}, insns[4:], ), nil) } test(t, bpf.ALUOpDiv) test(t, bpf.ALUOpMod) } func TestDivisionByZeroXMemShift(t *testing.T) { test := func(t *testing.T, op bpf.ALUOp) { t.Helper() insns := toInstructions([]bpf.Instruction{ bpf.LoadAbsolute{Size: 1, Off: 0}, bpf.TXA{}, bpf.ALUOpX{Op: op}, bpf.LoadMemShift{Off: 2}, // Clobber X bpf.ALUOpX{Op: op}, bpf.RetConstant{}, }) blocks := mustSplitBlocks(t, 1, insns) err := addDivideByZeroGuards(blocks) if err != nil { t.Fatal(err) } matchBlock(t, blocks[0], join( insns[:2], []instruction{{Instruction: checkXNotZero{}}}, insns[2:4], []instruction{{Instruction: checkXNotZero{}}}, insns[4:], ), nil) } test(t, bpf.ALUOpDiv) test(t, bpf.ALUOpMod) } func TestDivisionByZeroXTXA(t *testing.T) { test := func(t *testing.T, op bpf.ALUOp) { t.Helper() insns := toInstructions([]bpf.Instruction{ bpf.LoadAbsolute{Size: 1, Off: 0}, bpf.TXA{}, bpf.ALUOpX{Op: op}, bpf.TAX{}, // Clobber X bpf.ALUOpX{Op: op}, bpf.RetConstant{}, }) blocks := mustSplitBlocks(t, 1, insns) err := addDivideByZeroGuards(blocks) if err != nil { t.Fatal(err) } matchBlock(t, blocks[0], join( insns[:2], []instruction{{Instruction: checkXNotZero{}}}, insns[2:4], []instruction{{Instruction: checkXNotZero{}}}, insns[4:], ), nil) } test(t, bpf.ALUOpDiv) test(t, bpf.ALUOpMod) } // Check we use parent guards func TestDivisionByZeroParentsOK(t *testing.T) { test := func(t *testing.T, op bpf.ALUOp) { t.Helper() insns := toInstructions([]bpf.Instruction{ // block 0 /* 0 */ bpf.LoadAbsolute{Size: 1, Off: 0}, /* 1 */ bpf.TXA{}, /* 2 */ bpf.ALUOpX{Op: op}, /* 3 */ bpf.JumpIf{Cond: bpf.JumpEqual, Val: 3, SkipTrue: 0, SkipFalse: 2}, // jump to block 1 or 2 // block 1 /* 4 */ bpf.LoadAbsolute{Size: 1, Off: 1}, /* 5 */ bpf.Jump{Skip: 1}, // jump to block 3 // block 2 /* 6 */ bpf.LoadAbsolute{Size: 1, Off: 2}, // fall through to block 3 // block 3 /* 7 */ bpf.ALUOpX{Op: op}, /* 8 */ bpf.RetConstant{}, }) blocks := mustSplitBlocks(t, 4, insns) err := addDivideByZeroGuards(blocks) if err != nil { t.Fatal(err) } matchBlock(t, blocks[0], join( insns[:2], []instruction{{Instruction: checkXNotZero{}}}, insns[2:4], ), nil) matchBlock(t, blocks[1], insns[4:6], nil) matchBlock(t, blocks[2], insns[6:7], nil) matchBlock(t, blocks[3], insns[7:], nil) } test(t, bpf.ALUOpDiv) test(t, bpf.ALUOpMod) } // Check we add new guards with partial parent guards func TestDivisionByZeroParentsNOK(t *testing.T) { test := func(t *testing.T, op bpf.ALUOp) { t.Helper() insns := toInstructions([]bpf.Instruction{ // block 0 /* 0 */ bpf.LoadAbsolute{Size: 1, Off: 0}, /* 1 */ bpf.TXA{}, /* 2 */ bpf.ALUOpX{Op: op}, /* 3 */ bpf.JumpIf{Cond: bpf.JumpEqual, Val: 3, SkipTrue: 0, SkipFalse: 2}, // jump to block 1 or 2 // block 1 /* 4 */ bpf.LoadMemShift{Off: 1}, // clobber X /* 5 */ bpf.Jump{Skip: 1}, // jump to block 3 // block 2 /* 6 */ bpf.LoadAbsolute{Size: 1, Off: 2}, // fall through to block 3 // block 3 /* 7 */ bpf.ALUOpX{Op: op}, /* 8 */ bpf.RetConstant{}, }) blocks := mustSplitBlocks(t, 4, insns) err := addDivideByZeroGuards(blocks) if err != nil { t.Fatal(err) } matchBlock(t, blocks[0], join( insns[:2], []instruction{{Instruction: checkXNotZero{}}}, insns[2:4], ), nil) matchBlock(t, blocks[1], insns[4:6], nil) matchBlock(t, blocks[2], insns[6:7], nil) matchBlock(t, blocks[3], join( []instruction{{Instruction: checkXNotZero{}}}, insns[7:], ), nil) } test(t, bpf.ALUOpDiv) test(t, bpf.ALUOpMod) } func TestRewriteLargePacketOffsets(t *testing.T) { testOK := func(t *testing.T, load bpf.Instruction) { t.Helper() insns := toInstructions([]bpf.Instruction{ load, bpf.RetA{}, }) blocks := mustSplitBlocks(t, 1, insns) rewriteLargePacketOffsets(&blocks) matchBlock(t, blocks[0], insns, nil) } testOOB := func(t *testing.T, load bpf.Instruction) { t.Helper() insns := toInstructions([]bpf.Instruction{ load, bpf.RetA{}, }) blocks := mustSplitBlocks(t, 1, insns) rewriteLargePacketOffsets(&blocks) matchBlock(t, blocks[0], []instruction{ {Instruction: bpf.RetConstant{}}, }, nil) } testOK(t, bpf.LoadAbsolute{Size: 1, Off: 65534}) testOOB(t, bpf.LoadAbsolute{Size: 1, Off: 65535}) testOK(t, bpf.LoadAbsolute{Size: 2, Off: 65533}) testOOB(t, bpf.LoadAbsolute{Size: 2, Off: 65534}) testOK(t, bpf.LoadAbsolute{Size: 4, Off: 65531}) testOOB(t, bpf.LoadAbsolute{Size: 4, Off: 65532}) testOK(t, bpf.LoadMemShift{Off: 65534}) testOOB(t, bpf.LoadMemShift{Off: 65535}) } // Test unreachable blocks due to large packet offsets are removed. func TestRewriteLargePacketOffsetsDeadBlock(t *testing.T) { filter := []bpf.Instruction{ // block 0 /* 0 */ bpf.LoadAbsolute{Size: 4, Off: 2}, /* 1 */ bpf.JumpIf{Cond: bpf.JumpGreaterThan, Val: 2, SkipTrue: 6}, // jump to block 1 or 6 // block 1 /* 2 */ bpf.JumpIf{Cond: bpf.JumpLessThan, Val: 2, SkipTrue: 3}, // jump to block 2 or 3 // block 2 /* 3 */ bpf.LoadAbsolute{Size: 1, Off: 65598}, /* 4 */ bpf.ALUOpConstant{Op: bpf.ALUOpMul, Val: 4}, /* 5 */ bpf.Jump{Skip: 1}, // jump to block 4 // block 3 /* 6 */ bpf.LoadAbsolute{Size: 4, Off: 65532}, // block 4 /* 7 */ bpf.ALUOpConstant{Op: bpf.ALUOpAdd, Val: 2}, // block 5 /* 8 */ bpf.RetA{}, } insns := toInstructions(filter) blocks := mustSplitBlocks(t, 6, insns) rewriteLargePacketOffsets(&blocks) if len(blocks) != 5 { t.Fatalf("expected 5 blocks, got %v", blocks) } matchBlock(t, blocks[0], insns[0:2], nil) matchBlock(t, blocks[1], insns[2:3], nil) matchBlock(t, blocks[2], []instruction{ {Instruction: bpf.RetConstant{}}, }, nil) matchBlock(t, blocks[3], []instruction{ {Instruction: bpf.RetConstant{}}, }, nil) // block 4 is unreachable and removed, block 5 replaces it matchBlock(t, blocks[4], insns[8:], nil) // Make sure this is accepted by the verifier. checkBackends(t, filter, []byte{}, noMatch) } // Test absolute guards func TestAbsoluteGuardSize(t *testing.T) { insns := toInstructions([]bpf.Instruction{ bpf.LoadAbsolute{Size: 4, Off: 10}, // guard 14 bpf.LoadAbsolute{Size: 1, Off: 10}, // guard 11 bpf.RetConstant{}, }) blocks := mustSplitBlocks(t, 1, insns) addAbsolutePacketGuards(blocks) matchBlock(t, blocks[0], join( []instruction{{Instruction: packetGuardAbsolute{end: 14}}}, insns, ), nil) } // Check we don't add a guard if there are no packet loads func TestNoAbsoluteGuard(t *testing.T) { insns := toInstructions([]bpf.Instruction{ bpf.LoadConstant{Dst: bpf.RegA, Val: 23}, bpf.RetA{}, }) blocks := mustSplitBlocks(t, 1, insns) addAbsolutePacketGuards(blocks) matchBlock(t, blocks[0], insns, nil) } // Check we use parent guards if they're big enough func TestAbsoluteGuardParentsOK(t *testing.T) { insns := toInstructions([]bpf.Instruction{ // block 0 /* 0 */ bpf.LoadAbsolute{Size: 4, Off: 10}, // guard 14 /* 1 */ bpf.JumpIf{Cond: bpf.JumpEqual, Val: 3, SkipTrue: 0, SkipFalse: 2}, // jump to block 1 or 2 // block 1 /* 2 */ bpf.LoadAbsolute{Size: 4, Off: 10}, // guard 14 /* 3 */ bpf.Jump{Skip: 1}, // jump to block 3 // block 2 /* 4 */ bpf.LoadAbsolute{Size: 2, Off: 8}, // guard 10 // fall through to block 3 // block 3 /* 5 */ bpf.LoadAbsolute{Size: 1, Off: 9}, // guard 10 /* 6 */ bpf.RetA{}, }) blocks := mustSplitBlocks(t, 4, insns) addAbsolutePacketGuards(blocks) matchBlock(t, blocks[0], join( []instruction{{Instruction: packetGuardAbsolute{end: 14}}}, insns[:2], ), nil) matchBlock(t, blocks[1], insns[2:4], nil) matchBlock(t, blocks[2], insns[4:5], nil) matchBlock(t, blocks[3], insns[5:], nil) } // Check the parent guard is extended to cover children that always return no match func TestAbsoluteGuardParentNoMatch(t *testing.T) { insns := toInstructions([]bpf.Instruction{ // block 0 /* 0 */ bpf.LoadAbsolute{Size: 4, Off: 10}, // guard 14 /* 1 */ bpf.JumpIf{Cond: bpf.JumpEqual, Val: 3, SkipTrue: 0, SkipFalse: 3}, // jump to block 1 or 3 // block 1 /* 2 */ bpf.LoadAbsolute{Size: 4, Off: 12}, // guard 16 /* 3 */ bpf.JumpIf{Cond: bpf.JumpEqual, Val: 3, SkipTrue: 0, SkipFalse: 1}, // jump to block 2 or 3 // block 2 /* 4 */ bpf.RetA{}, // potential match // block 3 /* 5 */ bpf.RetConstant{}, // no match }) blocks := mustSplitBlocks(t, 4, insns) addAbsolutePacketGuards(blocks) matchBlock(t, blocks[0], join( []instruction{{Instruction: packetGuardAbsolute{end: 16}}}, insns[:2], ), nil) matchBlock(t, blocks[1], insns[2:4], nil) matchBlock(t, blocks[2], insns[4:5], nil) matchBlock(t, blocks[3], insns[5:], nil) } // Check the parent guard is extended to cover indirect children that always return no match func TestAbsoluteGuardParentDeepNoMatch(t *testing.T) { insns := toInstructions([]bpf.Instruction{ // block 0 /* 0 */ bpf.LoadAbsolute{Size: 4, Off: 10}, // guard 14 /* 1 */ bpf.JumpIf{Cond: bpf.JumpEqual, Val: 3, SkipTrue: 0, SkipFalse: 5}, // jump to block 1 or 4 // block 1 /* 2 */ bpf.LoadAbsolute{Size: 4, Off: 12}, // guard 16 /* 3 */ bpf.JumpIf{Cond: bpf.JumpEqual, Val: 3, SkipTrue: 0, SkipFalse: 3}, // jump to block 2 or 4 // block 2 /* 4 */ bpf.LoadAbsolute{Size: 4, Off: 14}, // guard 18 /* 5 */ bpf.JumpIf{Cond: bpf.JumpEqual, Val: 3, SkipTrue: 0, SkipFalse: 1}, // jump to block 3 or 4 // block 3 /* 6 */ bpf.RetA{}, // potential match // block 4 /* 7 */ bpf.RetConstant{}, // no match }) blocks := mustSplitBlocks(t, 5, insns) addAbsolutePacketGuards(blocks) matchBlock(t, blocks[0], join( []instruction{{Instruction: packetGuardAbsolute{end: 18}}}, insns[:2], ), nil) matchBlock(t, blocks[1], insns[2:4], nil) matchBlock(t, blocks[2], insns[4:6], nil) matchBlock(t, blocks[3], insns[6:7], nil) matchBlock(t, blocks[4], insns[7:], nil) } // Check the parent guard isn't extended to cover children that could match func TestAbsoluteGuardParentMatch(t *testing.T) { insns := toInstructions([]bpf.Instruction{ // block 0 /* 0 */ bpf.LoadAbsolute{Size: 4, Off: 10}, // guard 14 /* 1 */ bpf.JumpIf{Cond: bpf.JumpEqual, Val: 3, SkipTrue: 0, SkipFalse: 2}, // jump to block 1 or 2 // block 1 /* 2 */ bpf.LoadAbsolute{Size: 4, Off: 11}, // guard 15 /* 3 */ bpf.RetA{}, // potential match // block 2 /* 4 */ bpf.LoadAbsolute{Size: 1, Off: 15}, // guard 16 /* 5 */ bpf.RetConstant{}, // no match }) blocks := mustSplitBlocks(t, 3, insns) addAbsolutePacketGuards(blocks) matchBlock(t, blocks[0], join( []instruction{{Instruction: packetGuardAbsolute{end: 15}}}, insns[:2], ), nil) matchBlock(t, blocks[1], insns[2:4], nil) matchBlock(t, blocks[2], join( []instruction{{Instruction: packetGuardAbsolute{end: 16}}}, insns[4:], ), nil) } func TestIndirectGuardSize(t *testing.T) { insns := toInstructions([]bpf.Instruction{ bpf.LoadIndirect{Size: 4, Off: 10}, // guard [10:14] bpf.LoadIndirect{Size: 1, Off: 8}, // guard [8:9] bpf.RetConstant{}, }) blocks := mustSplitBlocks(t, 1, insns) addIndirectPacketGuards(blocks) matchBlock(t, blocks[0], join( []instruction{{Instruction: packetGuardIndirect{start: 8, end: 14}}}, insns, ), nil) } // Check we don't add a guard if there are no packet loads func TestNoIndirectGuard(t *testing.T) { insns := toInstructions([]bpf.Instruction{ bpf.LoadConstant{Dst: bpf.RegA, Val: 23}, bpf.RetA{}, }) blocks := mustSplitBlocks(t, 1, insns) addIndirectPacketGuards(blocks) matchBlock(t, blocks[0], insns, nil) } // Check we add new guards if current is not big enough due to RegX clobber func TestIndirectGuardClobber(t *testing.T) { check := func(clobber bpf.Instruction) func(t *testing.T) { return func(t *testing.T) { insns := toInstructions([]bpf.Instruction{ bpf.LoadIndirect{Size: 4, Off: 10}, // guard [10:14] clobber, // clobber X, packet guard no longer valid bpf.LoadIndirect{Size: 2, Off: 8}, // guard [8:10] bpf.RetA{}, }) blocks := mustSplitBlocks(t, 1, insns) addIndirectPacketGuards(blocks) matchBlock(t, blocks[0], join( []instruction{{Instruction: packetGuardIndirect{start: 10, end: 14}}}, insns[:2], []instruction{{Instruction: packetGuardIndirect{start: 8, end: 10}}}, insns[2:], ), nil) } } t.Run("constant", check(bpf.LoadConstant{Dst: bpf.RegX})) t.Run("scratch", check(bpf.LoadScratch{Dst: bpf.RegX})) t.Run("memshift", check(bpf.LoadMemShift{Off: 2})) } // #20: we didn't always emit packet guards for the last instruction of a block. func TestIndirectGuardClobberLast(t *testing.T) { insns := toInstructions([]bpf.Instruction{ // block 0 /* 0 */ bpf.LoadIndirect{Size: 4, Off: 10}, // guard [10:14] /* 1 */ bpf.JumpIf{Cond: bpf.JumpEqual, Val: 3, SkipTrue: 0, SkipFalse: 1}, // jump to block 1 or 2 // block 1 /* 2 */ bpf.LoadConstant{Dst: bpf.RegX, Val: 23}, // fall through to block 2 // block 2 /* 3 */ bpf.LoadIndirect{Size: 1, Off: 10}, // guard [10:11] /* 4 */ bpf.TXA{}, /* 5 */ bpf.RetA{}, }) blocks := mustSplitBlocks(t, 3, insns) addIndirectPacketGuards(blocks) matchBlock(t, blocks[0], join( []instruction{{Instruction: packetGuardIndirect{start: 10, end: 14}}}, insns[:2], ), nil) matchBlock(t, blocks[1], insns[2:3], nil) matchBlock(t, blocks[2], join( []instruction{{Instruction: packetGuardIndirect{start: 10, end: 11}}}, insns[3:], ), nil) } // Check we use parent guards if they're big enough. func TestIndirectGuardParentsOK(t *testing.T) { insns := toInstructions([]bpf.Instruction{ // block 0 /* 0 */ bpf.LoadIndirect{Size: 4, Off: 10}, // guard [10:14] /* 1 */ bpf.JumpIf{Cond: bpf.JumpEqual, Val: 3, SkipTrue: 0, SkipFalse: 2}, // jump to block 1 or 2 // block 1 /* 2 */ bpf.LoadIndirect{Size: 4, Off: 10}, // guard [10:14] /* 3 */ bpf.Jump{Skip: 1}, // jump to block 3 // block 2 /* 4 */ bpf.LoadIndirect{Size: 2, Off: 12}, // guard [12:14] // fall through to block 3 // block 3 /* 5 */ bpf.LoadIndirect{Size: 1, Off: 10}, // guard [10:11] /* 6 */ bpf.RetA{}, }) blocks := mustSplitBlocks(t, 4, insns) addIndirectPacketGuards(blocks) matchBlock(t, blocks[0], join( []instruction{{Instruction: packetGuardIndirect{start: 10, end: 14}}}, insns[:2], ), nil) matchBlock(t, blocks[1], insns[2:4], nil) matchBlock(t, blocks[2], insns[4:5], nil) matchBlock(t, blocks[3], insns[5:], nil) } // Check the parent guard is extended to cover children that always return no match func TestIndirectGuardParentNoMatch(t *testing.T) { insns := toInstructions([]bpf.Instruction{ // block 0 /* 0 */ bpf.LoadIndirect{Size: 4, Off: 10}, // guard [10:14] /* 1 */ bpf.JumpIf{Cond: bpf.JumpEqual, Val: 3, SkipTrue: 0, SkipFalse: 3}, // jump to block 1 or 3 // block 1 /* 2 */ bpf.LoadIndirect{Size: 4, Off: 12}, // guard [12:16] /* 3 */ bpf.JumpIf{Cond: bpf.JumpEqual, Val: 3, SkipTrue: 0, SkipFalse: 1}, // jump to block 2 or 3 // block 2 /* 4 */ bpf.RetA{}, // potential match // block 3 /* 5 */ bpf.RetConstant{}, // no match }) blocks := mustSplitBlocks(t, 4, insns) addIndirectPacketGuards(blocks) matchBlock(t, blocks[0], join( []instruction{{Instruction: packetGuardIndirect{start: 10, end: 16}}}, insns[:2], ), nil) matchBlock(t, blocks[1], insns[2:4], nil) matchBlock(t, blocks[2], insns[4:5], nil) matchBlock(t, blocks[3], insns[5:], nil) } // Check the parent guard is extended to cover indirect children that always return no match func TestIndirectGuardParentDeepNoMatch(t *testing.T) { insns := toInstructions([]bpf.Instruction{ // block 0 /* 0 */ bpf.LoadIndirect{Size: 4, Off: 10}, // guard [10:14] /* 1 */ bpf.JumpIf{Cond: bpf.JumpEqual, Val: 3, SkipTrue: 0, SkipFalse: 5}, // jump to block 1 or 4 // block 1 /* 2 */ bpf.LoadIndirect{Size: 4, Off: 12}, // guard [12:16] /* 3 */ bpf.JumpIf{Cond: bpf.JumpEqual, Val: 3, SkipTrue: 0, SkipFalse: 3}, // jump to block 2 or 4 // block 2 /* 4 */ bpf.LoadIndirect{Size: 4, Off: 14}, // guard [14:18] /* 5 */ bpf.JumpIf{Cond: bpf.JumpEqual, Val: 3, SkipTrue: 0, SkipFalse: 1}, // jump to block 3 or 4 // block 3 /* 6 */ bpf.RetA{}, // potential match // block 4 /* 7 */ bpf.RetConstant{}, // no match }) blocks := mustSplitBlocks(t, 5, insns) addIndirectPacketGuards(blocks) matchBlock(t, blocks[0], join( []instruction{{Instruction: packetGuardIndirect{start: 10, end: 18}}}, insns[:2], ), nil) matchBlock(t, blocks[1], insns[2:4], nil) matchBlock(t, blocks[2], insns[4:6], nil) matchBlock(t, blocks[3], insns[6:7], nil) matchBlock(t, blocks[4], insns[7:], nil) } // Check the parent guard isn't extended to cover children that could match func TestIndirectGuardParentMatch(t *testing.T) { insns := toInstructions([]bpf.Instruction{ // block 0 /* 0 */ bpf.LoadIndirect{Size: 2, Off: 10}, // guard [10:12] /* 1 */ bpf.JumpIf{Cond: bpf.JumpEqual, Val: 3, SkipTrue: 0, SkipFalse: 2}, // jump to block 1 or 2 // block 1 /* 2 */ bpf.LoadIndirect{Size: 4, Off: 9}, // guard [9:13] /* 3 */ bpf.RetA{}, // potential match // block 2 /* 4 */ bpf.LoadIndirect{Size: 1, Off: 15}, // guard [15:16] /* 5 */ bpf.RetConstant{}, // no match }) blocks := mustSplitBlocks(t, 3, insns) addIndirectPacketGuards(blocks) matchBlock(t, blocks[0], join( []instruction{{Instruction: packetGuardIndirect{start: 9, end: 13}}}, insns[:2], ), nil) matchBlock(t, blocks[1], insns[2:4], nil) matchBlock(t, blocks[2], join( []instruction{{Instruction: packetGuardIndirect{start: 9, end: 16}}}, // Extra guard info is preserved insns[4:], ), nil) } // Check we add new guards if one of the parent guards is not big enough due to RegX clobber func TestIndirectGuardParentClobber(t *testing.T) { check := func(clobber bpf.Instruction) func(t *testing.T) { return func(t *testing.T) { insns := toInstructions([]bpf.Instruction{ // block 0 /* 0 */ bpf.LoadIndirect{Size: 4, Off: 10}, // guard [10:14] /* 1 */ bpf.JumpIf{Cond: bpf.JumpEqual, Val: 3, SkipTrue: 0, SkipFalse: 3}, // jump to block 1 or 2 // block 1 /* 2 */ bpf.LoadIndirect{Size: 4, Off: 10}, // guard [10:14] /* 3 */ clobber, // clobber X, packet guard no longer valid /* 4 */ bpf.Jump{Skip: 1}, // jump to block 3 // block 2 /* 5 */ bpf.LoadIndirect{Size: 2, Off: 12}, // guard [12:14] // fall through to block 3 // block 3 /* 6 */ bpf.LoadIndirect{Size: 1, Off: 1}, // guard [1:2] /* 7 */ bpf.RetA{}, }) blocks := mustSplitBlocks(t, 4, insns) addIndirectPacketGuards(blocks) matchBlock(t, blocks[0], join( []instruction{{Instruction: packetGuardIndirect{start: 10, end: 14}}}, insns[:2], ), nil) matchBlock(t, blocks[1], insns[2:5], nil) matchBlock(t, blocks[2], insns[5:6], nil) matchBlock(t, blocks[3], join( []instruction{{Instruction: packetGuardIndirect{start: 1, end: 2}}}, insns[6:], ), nil) } } t.Run("constant", check(bpf.LoadConstant{Dst: bpf.RegX})) t.Run("scratch", check(bpf.LoadScratch{Dst: bpf.RegX})) t.Run("memshift", check(bpf.LoadMemShift{Off: 2})) } // Check we don't extend guards past RegX clobbers func TestIndirectGuardExtendClobber(t *testing.T) { check := func(clobber bpf.Instruction) func(t *testing.T) { return func(t *testing.T) { insns := toInstructions([]bpf.Instruction{ // block 0 /* 0 */ bpf.LoadIndirect{Size: 4, Off: 10}, // guard [10:14] /* 1 */ bpf.JumpIf{Cond: bpf.JumpEqual, Val: 3, SkipTrue: 0, SkipFalse: 5}, // jump to block 1 or 4 // block 1 /* 2 */ clobber, // clobber X, packet guard no longer valid /* 3 */ bpf.JumpIf{Cond: bpf.JumpEqual, Val: 3, SkipTrue: 0, SkipFalse: 3}, // jump to block 2 or 4 // block 2 /* 4 */ bpf.LoadIndirect{Size: 4, Off: 14}, // guard [14:18] /* 5 */ bpf.JumpIf{Cond: bpf.JumpEqual, Val: 3, SkipTrue: 0, SkipFalse: 1}, // jump to block 3 or 4 // block 3 /* 6 */ bpf.RetA{}, // potential match // block 4 /* 7 */ bpf.RetConstant{}, // no match }) blocks := mustSplitBlocks(t, 5, insns) addIndirectPacketGuards(blocks) matchBlock(t, blocks[0], join( []instruction{{Instruction: packetGuardIndirect{start: 10, end: 14}}}, insns[:2], ), nil) matchBlock(t, blocks[1], insns[2:4], nil) matchBlock(t, blocks[2], join( []instruction{{Instruction: packetGuardIndirect{start: 14, end: 18}}}, insns[4:6], ), nil) matchBlock(t, blocks[3], insns[6:7], nil) matchBlock(t, blocks[4], insns[7:], nil) } } t.Run("constant", check(bpf.LoadConstant{Dst: bpf.RegX})) t.Run("scratch", check(bpf.LoadScratch{Dst: bpf.RegX})) t.Run("memshift", check(bpf.LoadMemShift{Off: 2})) } // Check we use new guards if the parent ones aren't big enough func TestIndirectGuardParentsNotOK(t *testing.T) { insns := toInstructions([]bpf.Instruction{ // block 0 /* 0 */ bpf.LoadIndirect{Size: 4, Off: 10}, // guard [10:14] /* 1 */ bpf.JumpIf{Cond: bpf.JumpEqual, Val: 3, SkipTrue: 0, SkipFalse: 2}, // jump to block 1 or 2 // block 1 /* 2 */ bpf.LoadIndirect{Size: 4, Off: 8}, // guard [8:12] /* 3 */ bpf.Jump{Skip: 1}, // jump to block 3 // block 2 /* 4 */ bpf.LoadIndirect{Size: 2, Off: 13}, // guard [13:15] // fall through to block 3 // block 3 /* 5 */ bpf.LoadIndirect{Size: 1, Off: 9}, // guard [9:1] /* 6 */ bpf.RetA{}, }) blocks := mustSplitBlocks(t, 4, insns) addIndirectPacketGuards(blocks) matchBlock(t, blocks[0], join( []instruction{{Instruction: packetGuardIndirect{start: 9, end: 14}}}, insns[:2], ), nil) matchBlock(t, blocks[1], join( []instruction{{Instruction: packetGuardIndirect{start: 8, end: 14}}}, insns[2:4], ), nil) matchBlock(t, blocks[2], join( []instruction{{Instruction: packetGuardIndirect{start: 9, end: 15}}}, insns[4:5], ), nil) matchBlock(t, blocks[3], insns[5:], nil) } func join(insns ...[]instruction) []instruction { res := []instruction{} for _, insn := range insns { res = append(res, insn...) } return res } // matchBlock checks a block has the given instructions and jumps func matchBlock(t *testing.T, b *block, expected []instruction, jumps map[pos]*block) { t.Helper() if !reflect.DeepEqual(expected, b.insns) { t.Fatalf("expected instructions %v, got %v", expected, b.insns) } if jumps != nil && !reflect.DeepEqual(jumps, b.jumps) { t.Fatalf("expected jumps %v, got %v", jumps, b.jumps) } } func mustSplitBlocks(t *testing.T, blockCount int, insns []instruction) []*block { blocks, err := splitBlocks(insns) if err != nil { t.Fatal("splitBlocks failed:", err) } if len(blocks) != blockCount { t.Fatalf("expected %d blocks got %d", blockCount, len(blocks)) } return blocks } golang-github-cloudflare-cbpfc-0.0~git20231012.992ed75/clang/000077500000000000000000000000001456326020600231055ustar00rootroot00000000000000golang-github-cloudflare-cbpfc-0.0~git20231012.992ed75/clang/clang.go000066400000000000000000000045771456326020600245350ustar00rootroot00000000000000// Package clang implements a simple wrapper for invoking clang to // compile C to eBPF package clang import ( "fmt" "io/ioutil" "os" "os/exec" "path/filepath" "strings" "github.com/pkg/errors" ) // CompileOpts configure how an XDP program is compiled / built type Opts struct { // clang binary to use Clang string // Header directories to include Include []string // Destination directory for compiled programs. // Uses a temporary directory if empty. Output string // Emit DWARF debug info in the XDP elf. // Required for BTF. EmitDebug bool } // Compile compiles a C source string into an ELF func Compile(source []byte, name string, opts Opts) ([]byte, error) { var err error outdir := opts.Output if outdir == "" { outdir, err = ioutil.TempDir("", "cbpfc-clang") if err != nil { return nil, errors.Wrap(err, "can't create output directory") } defer os.RemoveAll(outdir) } else { _ = os.Mkdir(outdir, 0755) } inputFile := fmt.Sprintf("%s.c", name) outputFile := fmt.Sprintf("%s.elf", name) err = ioutil.WriteFile(filepath.Join(outdir, inputFile), source, 0644) if err != nil { return nil, errors.Wrap(err, "can't write out program") } flags := []string{ "-O2", "-Wall", "-Werror", "-nostdinc", "-c", "-target", "bpf", inputFile, "-o", outputFile, } for _, include := range opts.Include { // debug build script will be in a different directory, relative imports won't work absInclude, err := filepath.Abs(include) if err != nil { return nil, errors.Wrapf(err, "can't get absolute path to include %s", include) } flags = append(flags, "-I", absInclude) } if opts.EmitDebug { flags = append(flags, "-g") } cmd := exec.Command(opts.Clang, flags...) // debug build script if opts.Output != "" { cmdline := cmd.Path + " " + strings.Join(flags, " ") + "\n" err := ioutil.WriteFile(filepath.Join(outdir, "build"), []byte(cmdline), 0644) if err != nil { return nil, errors.Wrap(err, "can't write build cmdline") } } cmd.Dir = outdir _, err = cmd.Output() if err != nil { switch e := err.(type) { case *exec.ExitError: return nil, errors.Wrapf(e, "unable to compile C:\n%s", string(e.Stderr)) default: return nil, errors.Wrapf(e, "unable to compile C") } } elf, err := ioutil.ReadFile(filepath.Join(outdir, outputFile)) if err != nil { return nil, errors.Wrap(err, "can't read ELF") } return elf, nil } golang-github-cloudflare-cbpfc-0.0~git20231012.992ed75/ebpf.go000066400000000000000000000253461456326020600232760ustar00rootroot00000000000000package cbpfc import ( "fmt" "math" "github.com/cilium/ebpf/asm" "github.com/pkg/errors" "golang.org/x/net/bpf" ) // internal label when packet doesn't match const noMatchLabel = "nomatch" // alu operation to eBPF var aluToEBPF = map[bpf.ALUOp]asm.ALUOp{ bpf.ALUOpAdd: asm.Add, bpf.ALUOpSub: asm.Sub, bpf.ALUOpMul: asm.Mul, bpf.ALUOpDiv: asm.Div, bpf.ALUOpOr: asm.Or, bpf.ALUOpAnd: asm.And, bpf.ALUOpShiftLeft: asm.LSh, bpf.ALUOpShiftRight: asm.RSh, bpf.ALUOpMod: asm.Mod, bpf.ALUOpXor: asm.Xor, } // bpf sizes to ebpf var sizeToEBPF = map[int]asm.Size{ 1: asm.Byte, 2: asm.Half, 4: asm.Word, } // EBPFOpts control how a cBPF filter is converted to eBPF type EBPFOpts struct { // PacketStart is a register holding a pointer to the start of the packet. // Not modified. PacketStart asm.Register // PacketEnd is a register holding a pointer to the end of the packet. // Not modified. PacketEnd asm.Register // Register to output the filter return value in. Result asm.Register // Label to jump to with the result of the filter in register Result. ResultLabel string // Working are registers used internally. // Caller saved. // Must be different to PacketStart and PacketEnd, but Result can be reused. Working [4]asm.Register // StackOffset is the number of bytes of stack already used / reserved. // R10 (ebpf frame pointer) + StackOffset will be used as the top of the stack. StackOffset int // LabelPrefix is the prefix to prepend to labels used internally. LabelPrefix string } // ebpfOpts is the internal version of EBPFOpts type ebpfOpts struct { EBPFOpts // Registers mapping directly to cBPF regA asm.Register regX asm.Register // Temp / scratch register regTmp asm.Register // Register for indirect packet loads // Allows the range of a packet guard to be preserved across multiple loads by the verifier regIndirect asm.Register } func (e ebpfOpts) reg(reg bpf.Register) asm.Register { switch reg { case bpf.RegA: return e.regA case bpf.RegX: return e.regX default: panic("unknown bpf register") } } func (e ebpfOpts) label(name string) string { return fmt.Sprintf("%s_%s", e.LabelPrefix, name) } // eBPF stack address offset for BPF scratch slot scracth. func (e ebpfOpts) stackOffset(scratch int) int16 { // First usable stack space ends at StackOffset. return -int16(e.StackOffset + (scratch+1)*4) } // ToEBF converts a cBPF filter to eBPF. // // The generated eBPF code always jumps to opts.ResultLabel, with register opts.Result containing the filter's return value: // 0 if the packet does not match the cBPF filter, // non 0 if the packet does match. func ToEBPF(filter []bpf.Instruction, opts EBPFOpts) (asm.Instructions, error) { blocks, err := compile(filter) if err != nil { return nil, err } eOpts := ebpfOpts{ EBPFOpts: opts, regA: opts.Working[0], regX: opts.Working[1], regTmp: opts.Working[2], regIndirect: opts.Working[3], } // opts.Result does not have to be unique err = registersUnique(eOpts.PacketStart, eOpts.PacketEnd, eOpts.regA, eOpts.regX, eOpts.regTmp, eOpts.regIndirect) if err != nil { return nil, err } err = registerValid(eOpts.Result) if err != nil { return nil, err } if eOpts.StackOffset&1 == 1 { return nil, errors.Errorf("unaligned stack offset") } eInsns := asm.Instructions{} for _, block := range blocks { for i, insn := range block.insns { eInsn, err := insnToEBPF(insn, block, eOpts) if err != nil { return nil, errors.Wrapf(err, "unable to compile %v", insn) } // First insn of the block, add symbol so it can be referenced in jumps if i == 0 { eInsn[0] = eInsn[0].WithSymbol(eOpts.label(block.Label())) } eInsns = append(eInsns, eInsn...) } } // kernel verifier does not like dead code - only include no match block if we used it if _, ok := eInsns.ReferenceOffsets()[eOpts.label(noMatchLabel)]; ok { eInsns = append(eInsns, asm.Mov.Imm(eOpts.Result, 0).WithSymbol(eOpts.label(noMatchLabel)), asm.Ja.Label(opts.ResultLabel), ) } return eInsns, nil } // registersUnique ensures the registers are valid and unique func registersUnique(regs ...asm.Register) error { seen := make(map[asm.Register]struct{}, len(regs)) for _, reg := range regs { if err := registerValid(reg); err != nil { return err } if _, ok := seen[reg]; ok { return errors.Errorf("register %v used twice", reg) } seen[reg] = struct{}{} } return nil } // registerValid ensures that a register is a valid ebpf register func registerValid(reg asm.Register) error { if reg > asm.R9 { return errors.Errorf("invalid register %v", reg) } return nil } // insnToEBPF compiles an instruction to a set of eBPF instructions func insnToEBPF(insn instruction, blk *block, opts ebpfOpts) (asm.Instructions, error) { switch i := insn.Instruction.(type) { case bpf.LoadConstant: return ebpfInsn(asm.Mov.Imm32(opts.reg(i.Dst), int32(i.Val))) case bpf.LoadScratch: return ebpfInsn(asm.LoadMem(opts.reg(i.Dst), asm.R10, opts.stackOffset(i.N), asm.Word)) case bpf.LoadAbsolute: return packetLoad(opts, opts.PacketStart, i.Off, i.Size, func(src asm.Register, offset int16, size asm.Size) asm.Instructions { return appendNtoh(opts.regA, size, asm.LoadMem(opts.regA, src, offset, size), ) }) case bpf.LoadIndirect: // last packet guard set opts.regIndirect to packetstart + x return packetLoad(opts, opts.regIndirect, i.Off, i.Size, func(src asm.Register, offset int16, size asm.Size) asm.Instructions { return appendNtoh(opts.regA, size, asm.LoadMem(opts.regA, src, offset, size), ) }) case bpf.LoadMemShift: return packetLoad(opts, opts.PacketStart, i.Off, 1, func(src asm.Register, offset int16, size asm.Size) asm.Instructions { return []asm.Instruction{ asm.LoadMem(opts.regX, src, offset, size), asm.And.Imm32(opts.regX, 0xF), // clear upper 4 bits asm.LSh.Imm32(opts.regX, 2), // 32bit words to bytes } }) case bpf.StoreScratch: return ebpfInsn(asm.StoreMem(asm.R10, opts.stackOffset(i.N), opts.reg(i.Src), asm.Word)) case bpf.LoadExtension: if i.Num != bpf.ExtLen { return nil, errors.Errorf("unsupported BPF extension %v", i) } return ebpfInsn( asm.Mov.Reg(opts.regA, opts.PacketEnd), asm.Sub.Reg32(opts.regA, opts.PacketStart), ) case bpf.ALUOpConstant: return ebpfInsn(aluToEBPF[i.Op].Imm32(opts.regA, int32(i.Val))) case bpf.ALUOpX: return ebpfInsn(aluToEBPF[i.Op].Reg32(opts.regA, opts.regX)) case bpf.NegateA: return ebpfInsn(asm.Neg.Imm32(opts.regA, 0)) case bpf.Jump: return ebpfInsn(asm.Ja.Label(opts.label(blk.skipToBlock(skip(i.Skip)).Label()))) case bpf.JumpIf: return condToEBPF(opts, skip(i.SkipTrue), skip(i.SkipFalse), blk, i.Cond, func(jo asm.JumpOp, label string) asm.Instructions { // eBPF immediates are signed, zero extend into temp register if int32(i.Val) < 0 { return asm.Instructions{ asm.Mov.Imm32(opts.regTmp, int32(i.Val)), jo.Reg(opts.regA, opts.regTmp, label), } } return asm.Instructions{jo.Imm(opts.regA, int32(i.Val), label)} }) case bpf.JumpIfX: return condToEBPF(opts, skip(i.SkipTrue), skip(i.SkipFalse), blk, i.Cond, func(jo asm.JumpOp, label string) asm.Instructions { return asm.Instructions{jo.Reg(opts.regA, opts.regX, label)} }) case bpf.RetA: return ebpfInsn( asm.Mov.Reg32(opts.Result, opts.regA), asm.Ja.Label(opts.ResultLabel), ) case bpf.RetConstant: return ebpfInsn( asm.Mov.Imm32(opts.Result, int32(i.Val)), asm.Ja.Label(opts.ResultLabel), ) case bpf.TXA: return ebpfInsn(asm.Mov.Reg32(opts.regA, opts.regX)) case bpf.TAX: return ebpfInsn(asm.Mov.Reg32(opts.regX, opts.regA)) case packetGuardAbsolute: return ebpfInsn( asm.Mov.Reg(opts.regTmp, opts.PacketStart), asm.Add.Imm(opts.regTmp, i.end), asm.JGT.Reg(opts.regTmp, opts.PacketEnd, opts.label(noMatchLabel)), ) case packetGuardIndirect: return ebpfInsn( // Sign extend RegX to 64bits so we can do signed ALU operations. asm.Mov.Reg(opts.regIndirect, opts.regX), asm.LSh.Imm(opts.regIndirect, 32), asm.ArSh.Imm(opts.regIndirect, 32), // Check maxStartOffset() asm.Add.Imm(opts.regIndirect, i.start), asm.JGE.Imm(opts.regIndirect, i.maxStartOffset(), opts.label(noMatchLabel)), // packet_start + signed x + start // This will have a smin_value >= 0 asm.Add.Reg(opts.regIndirect, opts.PacketStart), // different reg (so actual load picks offset), but same verifier context id asm.Mov.Reg(opts.regTmp, opts.regIndirect), asm.Add.Imm(opts.regTmp, i.length()), asm.JGT.Reg(opts.regTmp, opts.PacketEnd, opts.label(noMatchLabel)), ) case checkXNotZero: return ebpfInsn(asm.JEq.Imm(opts.regX, 0, opts.label(noMatchLabel))) default: return nil, errors.Errorf("unsupported instruction %v", insn) } } type packetRead func(src asm.Register, offset int16, size asm.Size) asm.Instructions func packetLoad(opts ebpfOpts, src asm.Register, offset uint32, size int, makeRead packetRead) (asm.Instructions, error) { // cBPF supports 32 bit signed offsets, but eBPF only 16 bit natively. if int32(offset) > math.MaxInt16 || int32(offset) < math.MinInt16 { return append(asm.Instructions{ asm.Mov.Reg(opts.regTmp, src), // cBPF offsets are signed, casting to int32 is safe. asm.Add.Imm(opts.regTmp, int32(offset)), }, makeRead(opts.regTmp, 0, sizeToEBPF[size])...), nil } return makeRead(src, int16(offset), sizeToEBPF[size]), nil } func appendNtoh(reg asm.Register, size asm.Size, insns ...asm.Instruction) asm.Instructions { if size == asm.Byte { return insns } // BPF_FROM_BE should be a nop on big endian architectures return append(insns, asm.HostTo(asm.BE, reg, size)) } func condToEBPF(opts ebpfOpts, skipTrue, skipFalse skip, blk *block, cond bpf.JumpTest, insn func(jo asm.JumpOp, label string) asm.Instructions) (asm.Instructions, error) { var condToJump = map[bpf.JumpTest]asm.JumpOp{ bpf.JumpEqual: asm.JEq, bpf.JumpNotEqual: asm.JNE, bpf.JumpGreaterThan: asm.JGT, bpf.JumpLessThan: asm.JLT, bpf.JumpGreaterOrEqual: asm.JGE, bpf.JumpLessOrEqual: asm.JLE, bpf.JumpBitsSet: asm.JSet, // BitsNotSet doesn't map to anything nicely } trueLabel := opts.label(blk.skipToBlock(skipTrue).Label()) falseLabel := opts.label(blk.skipToBlock(skipFalse).Label()) // no skipFalse, we only have to explicitly jump to one block trueOnly := skipFalse == 0 // No native BitsNotSet, convert to BitsSet if cond == bpf.JumpBitsNotSet { cond = bpf.JumpBitsSet trueLabel, falseLabel = falseLabel, trueLabel trueOnly = false } if trueOnly { return insn(condToJump[cond], trueLabel), nil } return append( insn(condToJump[cond], trueLabel), asm.Ja.Label(falseLabel), ), nil } func ebpfInsn(insns ...asm.Instruction) (asm.Instructions, error) { return insns, nil } golang-github-cloudflare-cbpfc-0.0~git20231012.992ed75/ebpf_example_test.go000066400000000000000000000032141456326020600260360ustar00rootroot00000000000000package cbpfc import ( "github.com/cilium/ebpf/asm" "github.com/pkg/errors" "golang.org/x/net/bpf" ) // ExampleToEBPF demonstrates how to use ToEBPF() to embed a cBPF filter // in an eBPF assembly program. func ExampleToEBPF() { // simple cBPF filter that matches all packets filter := []bpf.Instruction{ bpf.RetConstant{Val: 1}, } prog, err := buildEBPF(filter) if err != nil { panic(err) } // Prog can be loaded directly using cilium/ebpf, // or converted to a '[]struct bpf_insn' for libbpf _ = prog } // buildEBPF compiles a cBPF filter to eBPF, and embeds it an eBPF program. // The XDP program XDP_DROP's incoming packets that match the filter. // Returns the eBPF program instructions func buildEBPF(filter []bpf.Instruction) (asm.Instructions, error) { ebpfFilter, err := ToEBPF(filter, EBPFOpts{ // Pass packet start and end pointers in these registers PacketStart: asm.R2, PacketEnd: asm.R3, // Result of filter Result: asm.R4, ResultLabel: "result", // Registers used by generated code Working: [4]asm.Register{asm.R4, asm.R5, asm.R6, asm.R7}, LabelPrefix: "filter", }) if err != nil { return nil, errors.Wrap(err, "converting filter to eBPF") } prog := asm.Instructions{ // R1 holds XDP context // Packet start asm.LoadMem(asm.R2, asm.R1, 0, asm.Word), // Packet end asm.LoadMem(asm.R3, asm.R1, 4, asm.Word), // Fall through to filter } prog = append(prog, ebpfFilter...) prog = append(prog, asm.Mov.Imm(asm.R0, 2).WithSymbol("result"), // XDP_PASS asm.JEq.Imm(asm.R4, 0, "return"), asm.Mov.Imm(asm.R0, 1), // XDP_DROP asm.Return().WithSymbol("return"), ) return prog, nil } golang-github-cloudflare-cbpfc-0.0~git20231012.992ed75/ebpf_test.go000066400000000000000000000007251456326020600243270ustar00rootroot00000000000000package cbpfc import ( "testing" "github.com/cilium/ebpf" "golang.org/x/net/bpf" ) // ebpfBacked is backend that compiles classic BPF to eBPF func ebpfBackend(tb testing.TB, insns []bpf.Instruction, in []byte) result { prog, err := buildEBPF(insns) if err != nil { tb.Fatal(err) } tb.Logf("\n%v", prog) return testProg(tb, &ebpf.ProgramSpec{ Name: "ebpf_filter", Type: ebpf.XDP, Instructions: prog, License: "BSD", }, in) } golang-github-cloudflare-cbpfc-0.0~git20231012.992ed75/go.mod000066400000000000000000000003641456326020600231320ustar00rootroot00000000000000module github.com/cloudflare/cbpfc require ( github.com/cilium/ebpf v0.11.0 github.com/pkg/errors v0.9.1 golang.org/x/net v0.17.0 golang.org/x/sys v0.13.0 ) require golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2 // indirect go 1.19 golang-github-cloudflare-cbpfc-0.0~git20231012.992ed75/go.sum000066400000000000000000000023321456326020600231540ustar00rootroot00000000000000github.com/cilium/ebpf v0.11.0 h1:V8gS/bTCCjX9uUnkUFUpPsksM8n1lXBAvHcpiFk1X2Y= github.com/cilium/ebpf v0.11.0/go.mod h1:WE7CZAnqOL2RouJ4f1uyNhqr2P4CCvXFIqdRDUgWsVs= github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2 h1:Jvc7gsqn21cJHCmAWx0LiimpP18LZmUxkT5Mp7EZ1mI= golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang-github-cloudflare-cbpfc-0.0~git20231012.992ed75/insn_test.go000066400000000000000000000547021456326020600243660ustar00rootroot00000000000000package cbpfc import ( "bytes" "flag" "fmt" "math" "os" "testing" "github.com/cilium/ebpf" "golang.org/x/net/bpf" // syscall has a wonky RLIM_INFINITY, and no RLIMIT_MEMLOCK "golang.org/x/sys/unix" ) func TestMain(m *testing.M) { // Needed for testing.Short flag.Parse() if !testing.Short() { // Remove any locked memory limits so we can load BPF programs err := unix.Setrlimit(unix.RLIMIT_MEMLOCK, &unix.Rlimit{ Cur: unix.RLIM_INFINITY, Max: unix.RLIM_INFINITY, }) if err != nil { panic(err) } } os.Exit(m.Run()) } func TestZeroInitA(t *testing.T) { t.Parallel() filter := []bpf.Instruction{ bpf.RetA{}, } checkBackends(t, filter, []byte{}, noMatch) } func TestZeroInitX(t *testing.T) { t.Parallel() filter := []bpf.Instruction{ bpf.TXA{}, bpf.RetA{}, } checkBackends(t, filter, []byte{}, noMatch) } func TestPartialZeroInitX(t *testing.T) { t.Parallel() filter := []bpf.Instruction{ bpf.LoadAbsolute{Off: 0, Size: 1}, bpf.JumpIf{Cond: bpf.JumpEqual, Val: 3, SkipTrue: 0, SkipFalse: 1}, // jump to block 1 or 2 // block 1 bpf.TAX{}, // initialize RegX // Fall through // block 2 bpf.TXA{}, // RegX used potentially uninitialized bpf.RetA{}, } checkBackends(t, filter, []byte{0}, noMatch) checkBackends(t, filter, []byte{3}, match) } func TestLoadConstantA(t *testing.T) { t.Parallel() filter := func(val uint32) []bpf.Instruction { return []bpf.Instruction{ bpf.LoadConstant{Dst: bpf.RegA, Val: val}, bpf.JumpIf{Cond: bpf.JumpEqual, Val: val, SkipTrue: 1}, bpf.RetConstant{Val: 0}, bpf.RetConstant{Val: 1}, } } checkBackends(t, filter(1), []byte{}, match) checkBackends(t, filter(28), []byte{}, match) checkBackends(t, filter(0), []byte{}, match) } func TestLoadConstantX(t *testing.T) { t.Parallel() filter := func(val uint32) []bpf.Instruction { return []bpf.Instruction{ bpf.LoadConstant{Dst: bpf.RegX, Val: val}, bpf.TXA{}, bpf.JumpIf{Cond: bpf.JumpEqual, Val: val, SkipTrue: 1}, bpf.RetConstant{Val: 0}, bpf.RetConstant{Val: 1}, } } checkBackends(t, filter(1), []byte{}, match) checkBackends(t, filter(28), []byte{}, match) checkBackends(t, filter(0), []byte{}, match) } func TestLoadAbsolute(t *testing.T) { t.Parallel() filter := func(val uint32, size int) []bpf.Instruction { return []bpf.Instruction{ bpf.LoadAbsolute{Off: 2, Size: size}, bpf.JumpIf{Cond: bpf.JumpEqual, Val: val, SkipTrue: 1}, bpf.RetConstant{Val: 0}, bpf.RetConstant{Val: 1}, } } // 1 checkBackends(t, filter(5, 1), []byte{0, 0, 5}, match) checkBackends(t, filter(6, 1), []byte{0, 0, 5}, noMatch) // 2 checkBackends(t, filter(0xDEAD, 2), []byte{0, 0, 0xDE, 0xAD}, match) checkBackends(t, filter(0xDEAF, 2), []byte{0, 0, 0xDE, 0xAD}, noMatch) // 4 checkBackends(t, filter(0xDEADBEEF, 4), []byte{0, 0, 0xDE, 0xAD, 0xBE, 0xEF}, match) checkBackends(t, filter(0xDEAFBEEF, 4), []byte{0, 0, 0xDE, 0xAD, 0xBE, 0xEF}, noMatch) } func TestLoadAbsoluteBigOffset(t *testing.T) { t.Parallel() // XDP limits packets to one page, so there's no way to feed a packet big enough to test the offsets // we want through BPF_PROG_TEST_RUN. // All we can check is that the verifier accepts the program and it doesn't match. filter := func(load bpf.Instruction) []bpf.Instruction { return []bpf.Instruction{ load, bpf.ALUOpConstant{Op: bpf.ALUOpAdd, Val: 2}, bpf.RetA{}, } } checkBackends(t, filter(bpf.LoadAbsolute{Off: maxPacketOffset - 1, Size: 1}), nil, noMatch) checkBackends(t, filter(bpf.LoadAbsolute{Off: maxPacketOffset, Size: 1}), nil, noMatch) checkBackends(t, filter(bpf.LoadAbsolute{Off: maxPacketOffset - 2, Size: 2}), nil, noMatch) checkBackends(t, filter(bpf.LoadAbsolute{Off: maxPacketOffset - 1, Size: 2}), nil, noMatch) checkBackends(t, filter(bpf.LoadAbsolute{Off: maxPacketOffset - 4, Size: 4}), nil, noMatch) checkBackends(t, filter(bpf.LoadAbsolute{Off: maxPacketOffset - 3, Size: 4}), nil, noMatch) checkBackends(t, filter(bpf.LoadMemShift{Off: maxPacketOffset - 1}), nil, noMatch) checkBackends(t, filter(bpf.LoadMemShift{Off: maxPacketOffset}), nil, noMatch) } func TestLoadIndirect(t *testing.T) { t.Parallel() // With a constant RegX, the verifier knows it's exact value. t.Run("constant_valid", func(t *testing.T) { filter := func(regX int32, off int32, size int, val uint32) []bpf.Instruction { return []bpf.Instruction{ bpf.LoadConstant{Dst: bpf.RegX, Val: uint32(regX)}, bpf.LoadIndirect{Off: uint32(off), Size: size}, bpf.JumpIf{Cond: bpf.JumpEqual, Val: val, SkipTrue: 1}, bpf.RetConstant{Val: 0}, bpf.RetConstant{Val: 1}, } } // RegX: INT32_MIN+4, Off: INT32_MAX, Size: 1 checkBackends(t, filter(math.MinInt32+4, math.MaxInt32, 1, 5), []byte{0, 0, 0, 5}, match) checkBackends(t, filter(math.MinInt32+4, math.MaxInt32, 1, 6), []byte{0, 0, 0, 5}, noMatch) // RegX: 3, Off: -1, Size: 2 checkBackends(t, filter(3, -1, 2, 0xDEAD), []byte{0, 0, 0xDE, 0xAD}, match) checkBackends(t, filter(3, -1, 2, 0xDEAD), []byte{0, 0, 0xDE, 0xAF}, noMatch) // RegX: 1, Off: 2, Size: 4 checkBackends(t, filter(1, 2, 4, 0xDEADBEEF), []byte{0, 0, 0, 0xDE, 0xAD, 0xBE, 0xEF}, match) checkBackends(t, filter(1, 2, 4, 0xDEADBEEF), []byte{0, 0, 0, 0xDE, 0xAA, 0xBE, 0xEF}, noMatch) }) // But with a variable RegX, the verifier only has whatever checks we perform. t.Run("variable_valid", func(t *testing.T) { filter := func(off int32, size int, val uint32) []bpf.Instruction { return []bpf.Instruction{ bpf.LoadAbsolute{Off: 0, Size: 4}, bpf.TAX{}, bpf.LoadIndirect{Off: uint32(off), Size: size}, bpf.JumpIf{Cond: bpf.JumpEqual, Val: val, SkipTrue: 1}, bpf.RetConstant{Val: 0}, bpf.RetConstant{Val: 1}, } } // RegX: -6, Off: 13, Size: 1 checkBackends(t, filter(13, 1, 5), []byte{0xFF, 0xFF, 0xFF, 0xFA, 0, 0, 0, 5}, match) checkBackends(t, filter(13, 1, 6), []byte{0xFF, 0xFF, 0xFF, 0xFA, 0, 0, 0, 5}, noMatch) // RegX: INT32_MAX, Off: INT32_MIN+7, Size: 2 checkBackends(t, filter(math.MinInt32+7, 2, 0xDEAD), []byte{0x7F, 0xFF, 0xFF, 0xFF, 0, 0, 0xDE, 0xAD}, match) checkBackends(t, filter(math.MinInt32+7, 2, 0xDEAD), []byte{0x7F, 0xFF, 0xFF, 0xFF, 0, 0, 0xDE, 0xAF}, noMatch) // RegX: 3, Off: 4, Size: 4 checkBackends(t, filter(4, 4, 0xDEADBEEF), []byte{0x00, 0x00, 0x00, 0x03, 0, 0, 0, 0xDE, 0xAD, 0xBE, 0xEF}, match) checkBackends(t, filter(4, 4, 0xDEADBEEF), []byte{0x00, 0x00, 0x00, 0x03, 0, 0, 0, 0xDE, 0xAA, 0xBE, 0xEF}, noMatch) }) t.Run("constant_outofbounds", func(t *testing.T) { // Always return match to ensure noMatch comes from the packet load. filter := func(regX, off int32, size int) []bpf.Instruction { return []bpf.Instruction{ bpf.LoadConstant{Dst: bpf.RegX, Val: uint32(regX)}, bpf.LoadIndirect{Off: uint32(off), Size: size}, bpf.RetConstant{Val: 1}, } } // Not out of bounds, sanity check checkBackends(t, filter(-3, 3, 1), nil, match) // Before packet // RegX: -16, Off: 15 checkBackends(t, filter(-16, 15, 1), nil, noMatch) // RegX: -1, Off: -2 checkBackends(t, filter(-1, -2, 4), nil, noMatch) // RegX: 255, Off: -300 checkBackends(t, filter(255, -300, 2), nil, noMatch) // After packet checkBackends(t, filter(-16, 30, 1), nil, noMatch) }) t.Run("variable_outofbounds", func(t *testing.T) { // Always return match to ensure noMatch comes from the packet load. filter := func(off int32, size int) []bpf.Instruction { return []bpf.Instruction{ bpf.LoadAbsolute{Off: 0, Size: 4}, bpf.TAX{}, bpf.LoadIndirect{Off: uint32(off), Size: size}, bpf.RetConstant{Val: 1}, } } // Not out of bounds, sanity check checkBackends(t, filter(3, 1), []byte{0xFF, 0xFF, 0xFF, 0xFD}, match) // Before packet // RegX: -16, Off: 15 checkBackends(t, filter(15, 1), []byte{0xFF, 0xFF, 0xFF, 0xF0}, noMatch) // RegX: -1, Off: -2 checkBackends(t, filter(-2, 4), []byte{0xFF, 0xFF, 0xFF, 0xFF}, noMatch) // RegX: 255, Off: -300 checkBackends(t, filter(-300, 2), []byte{0x00, 0x00, 0x00, 0xFF}, noMatch) // After packet checkBackends(t, filter(30, 1), []byte{0xFF, 0xFF, 0xFF, 0xF0}, noMatch) }) } func TestLoadIndirectBigOffset(t *testing.T) { filter := func(off uint32, size int) []bpf.Instruction { return []bpf.Instruction{ // RegX is 0 initialized bpf.LoadIndirect{Off: off, Size: size}, bpf.ALUOpConstant{Op: bpf.ALUOpAdd, Val: 2}, bpf.RetA{}, } } // XDP limits packets to one page, so there's no way to feed a packet big enough to test the offsets // we want through BPF_PROG_TEST_RUN. // All we can check is that the verifier accepts the program and it doesn't match. checkBackends(t, filter(0, 1), nil, match) checkBackends(t, filter(0, 4), nil, match) checkBackends(t, filter(maxPacketOffset-1, 1), nil, noMatch) checkBackends(t, filter(maxPacketOffset, 1), nil, noMatch) checkBackends(t, filter(maxPacketOffset-2, 2), nil, noMatch) checkBackends(t, filter(maxPacketOffset-1, 2), nil, noMatch) checkBackends(t, filter(maxPacketOffset-4, 4), nil, noMatch) checkBackends(t, filter(maxPacketOffset-3, 4), nil, noMatch) } // Indirect offset that would cause packetGuardIndirect.length() to overflow. func TestLoadIndirectGuardOverflow(t *testing.T) { var min int32 = math.MinInt32 checkBackends(t, []bpf.Instruction{ // Variable RegX bpf.LoadAbsolute{Off: 0, Size: 4}, bpf.TAX{}, // Two loads, with offsets more than maxPacketOffset apart bpf.LoadIndirect{Off: uint32(min), Size: 1}, bpf.LoadIndirect{Off: 0, Size: 2}, bpf.LoadIndirect{Off: math.MaxInt32, Size: 4}, bpf.TXA{}, bpf.RetA{}, }, nil, noMatch) } // The 0 scratch slot is usable. func TestScratchZero(t *testing.T) { t.Parallel() filter := []bpf.Instruction{ bpf.LoadConstant{Dst: bpf.RegA, Val: 4}, bpf.StoreScratch{Src: bpf.RegA, N: 0}, // clobber the reg in the mean time bpf.LoadConstant{Dst: bpf.RegA, Val: 0}, bpf.LoadScratch{Dst: bpf.RegA, N: 0}, bpf.JumpIf{Cond: bpf.JumpEqual, Val: 4, SkipTrue: 1}, bpf.RetConstant{Val: 0}, bpf.RetConstant{Val: 1}, } checkBackends(t, filter, nil, match) } func TestScratchA(t *testing.T) { t.Parallel() filter := func(val uint32) []bpf.Instruction { return []bpf.Instruction{ bpf.LoadConstant{Dst: bpf.RegA, Val: val}, bpf.StoreScratch{Src: bpf.RegA, N: 7}, // clobber the reg in the mean time bpf.LoadConstant{Dst: bpf.RegA, Val: 0}, // load garbage in the adjacent slots bpf.LoadConstant{Dst: bpf.RegA, Val: 0xFFFFFFFF}, bpf.StoreScratch{Src: bpf.RegA, N: 6}, bpf.LoadConstant{Dst: bpf.RegA, Val: 0xFFFFFFFF}, bpf.StoreScratch{Src: bpf.RegA, N: 8}, bpf.LoadScratch{Dst: bpf.RegA, N: 7}, bpf.JumpIf{Cond: bpf.JumpEqual, Val: val, SkipTrue: 1}, bpf.RetConstant{Val: 0}, bpf.RetConstant{Val: 1}, } } checkBackends(t, filter(0xdeadbeef), []byte{}, match) checkBackends(t, filter(0), []byte{}, match) } func TestScratchX(t *testing.T) { t.Parallel() filter := func(val uint32) []bpf.Instruction { return []bpf.Instruction{ bpf.LoadConstant{Dst: bpf.RegX, Val: val}, bpf.StoreScratch{Src: bpf.RegX, N: 7}, // clobber the reg in the mean time bpf.LoadConstant{Dst: bpf.RegX, Val: 0}, // load garbage in the adjacent slots bpf.LoadConstant{Dst: bpf.RegX, Val: 0xFFFFFFFF}, bpf.StoreScratch{Src: bpf.RegX, N: 6}, bpf.LoadConstant{Dst: bpf.RegX, Val: 0xFFFFFFFF}, bpf.StoreScratch{Src: bpf.RegX, N: 8}, bpf.LoadScratch{Dst: bpf.RegX, N: 7}, bpf.TXA{}, bpf.JumpIf{Cond: bpf.JumpEqual, Val: val, SkipTrue: 1}, bpf.RetConstant{Val: 0}, bpf.RetConstant{Val: 1}, } } checkBackends(t, filter(0xdeadbeef), []byte{}, match) checkBackends(t, filter(0), []byte{}, match) } func TestMemShift(t *testing.T) { t.Parallel() filter := func(val uint32) []bpf.Instruction { return []bpf.Instruction{ bpf.LoadConstant{Dst: bpf.RegA, Val: val}, bpf.LoadMemShift{Off: 2}, bpf.JumpIfX{Cond: bpf.JumpEqual, SkipTrue: 1}, bpf.RetConstant{Val: 0}, bpf.RetConstant{Val: 1}, } } checkBackends(t, filter(40), []byte{0, 0, 0xAA}, match) checkBackends(t, filter(0), []byte{0, 0, 0xF0}, match) } func TestLoadExtLen(t *testing.T) { t.Parallel() filter := func(pktLen uint32) []bpf.Instruction { return []bpf.Instruction{ bpf.LoadExtension{Num: bpf.ExtLen}, bpf.JumpIf{Cond: bpf.JumpEqual, Val: pktLen, SkipTrue: 1}, bpf.RetConstant{Val: 0}, bpf.RetConstant{Val: 1}, } } checkBackends(t, filter(16), []byte{0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef}, match) } // check a OP b == res for both ALUOpConstant and ALUOpX func checkAlu(t *testing.T, op bpf.ALUOp, a, b, res uint32) { t.Helper() constFilter := []bpf.Instruction{ bpf.LoadConstant{Dst: bpf.RegA, Val: a}, bpf.ALUOpConstant{Op: op, Val: b}, bpf.JumpIf{Cond: bpf.JumpEqual, Val: res, SkipTrue: 1}, bpf.RetConstant{Val: 0}, bpf.RetConstant{Val: 1}, } checkBackends(t, constFilter, []byte{}, match) xFilter := []bpf.Instruction{ bpf.LoadConstant{Dst: bpf.RegA, Val: a}, bpf.LoadConstant{Dst: bpf.RegX, Val: b}, bpf.ALUOpX{Op: op}, bpf.JumpIf{Cond: bpf.JumpEqual, Val: res, SkipTrue: 1}, bpf.RetConstant{Val: 0}, bpf.RetConstant{Val: 1}, } checkBackends(t, xFilter, []byte{}, match) } func TestALUAdd(t *testing.T) { t.Parallel() checkAlu(t, bpf.ALUOpAdd, 1, 0, 1) checkAlu(t, bpf.ALUOpAdd, 4, 13, 17) } func TestALUSub(t *testing.T) { t.Parallel() checkAlu(t, bpf.ALUOpSub, 1, 1, 0) checkAlu(t, bpf.ALUOpSub, 13, 9, 4) } func TestALUMul(t *testing.T) { t.Parallel() checkAlu(t, bpf.ALUOpMul, 0, 1, 0) checkAlu(t, bpf.ALUOpMul, 4, 13, 52) // overflow - 2^31 * 2 checkAlu(t, bpf.ALUOpMul, 2, 0x80000000, 0) } func TestALUDiv(t *testing.T) { t.Parallel() checkAlu(t, bpf.ALUOpDiv, 2, 2, 1) checkAlu(t, bpf.ALUOpDiv, 19, 3, 6) } func TestALUDivZero(t *testing.T) { t.Parallel() filter := []bpf.Instruction{ bpf.LoadAbsolute{Size: 1, Off: 0}, bpf.TAX{}, bpf.LoadConstant{Dst: bpf.RegA, Val: 10}, bpf.ALUOpX{Op: bpf.ALUOpDiv}, bpf.RetConstant{Val: 1}, } checkBackends(t, filter, []byte{0}, noMatch) checkBackends(t, filter, []byte{1}, match) } // Check that ALU operations aren't signed. // The only operator that is implemented differently for signed vs unsigned math with two's complement is division. func TestALUDivNegative(t *testing.T) { checkBackends(t, []bpf.Instruction{ bpf.LoadConstant{Dst: bpf.RegA, Val: 0x00000004}, // 4 bpf.LoadConstant{Dst: bpf.RegX, Val: 0xFFFFFFFE}, // -2 bpf.ALUOpX{Op: bpf.ALUOpDiv}, bpf.JumpIf{Cond: bpf.JumpEqual, Val: 0, SkipTrue: 1}, // 0 means division is unsigned, -2 signed bpf.RetConstant{}, bpf.RetConstant{Val: 1}, }, nil, match) } func TestALUOr(t *testing.T) { t.Parallel() checkAlu(t, bpf.ALUOpOr, 1, 0, 1) checkAlu(t, bpf.ALUOpOr, 0xF0, 0x0F, 0xFF) } func TestALUAnd(t *testing.T) { t.Parallel() checkAlu(t, bpf.ALUOpAnd, 1, 0, 0) checkAlu(t, bpf.ALUOpAnd, 0xF0, 0x80, 0x80) checkAlu(t, bpf.ALUOpAnd, 0xF0, 0x0F, 0x00) } func TestALUShiftLeft(t *testing.T) { t.Parallel() checkAlu(t, bpf.ALUOpShiftLeft, 1, 0, 1) checkAlu(t, bpf.ALUOpShiftLeft, 1, 4, 0x10) } func TestALUShiftRight(t *testing.T) { t.Parallel() checkAlu(t, bpf.ALUOpShiftRight, 0xF0, 4, 0x0F) checkAlu(t, bpf.ALUOpShiftRight, 0xF0, 8, 0) } func TestALUMod(t *testing.T) { t.Parallel() checkAlu(t, bpf.ALUOpMod, 16, 4, 0) checkAlu(t, bpf.ALUOpMod, 17, 4, 1) } func TestALUXor(t *testing.T) { t.Parallel() checkAlu(t, bpf.ALUOpXor, 1, 1, 0) checkAlu(t, bpf.ALUOpMod, 6, 4, 2) } func TestNegateA(t *testing.T) { t.Parallel() filter := []bpf.Instruction{ bpf.LoadConstant{Dst: bpf.RegA, Val: 26}, bpf.NegateA{}, bpf.JumpIf{Cond: bpf.JumpEqual, Val: uint32(26 | 0x80000000), SkipTrue: 1}, bpf.RetConstant{Val: 0}, bpf.RetConstant{Val: 1}, } checkBackends(t, filter, []byte{}, noMatch) } func TestJump(t *testing.T) { t.Parallel() filter := []bpf.Instruction{ // "dummy" jump so the unreachable code after the real jump isn't removed bpf.LoadAbsolute{Off: 0, Size: 1}, bpf.JumpIf{Cond: bpf.JumpEqual, Val: 1, SkipTrue: 1}, bpf.Jump{Skip: 1}, bpf.LoadConstant{Dst: bpf.RegA, Val: 1}, bpf.RetA{}, } checkBackends(t, filter, []byte{}, noMatch) } // Jump that does nothing. func TestJump0(t *testing.T) { t.Parallel() filter := []bpf.Instruction{ bpf.LoadAbsolute{Off: 0, Size: 1}, bpf.Jump{Skip: 1}, bpf.JumpIf{Cond: bpf.JumpEqual}, bpf.JumpIfX{Cond: bpf.JumpEqual}, bpf.RetA{}, } checkBackends(t, filter, []byte{}, noMatch) checkBackends(t, filter, []byte{1}, match) } // a needs to be != 0 func checkJump(t *testing.T, cond bpf.JumpTest, a, b uint32, result bool) { t.Helper() if a == 0 { t.Fatal("a must be non 0") } // match if cond is true expected := noMatch if result { expected = match } // constant skipTrue constTrueFilter := []bpf.Instruction{ bpf.LoadConstant{Dst: bpf.RegA, Val: a}, bpf.JumpIf{Cond: cond, Val: b, SkipTrue: 1}, bpf.RetConstant{Val: 0}, bpf.RetConstant{Val: 1}, } checkBackends(t, constTrueFilter, []byte{}, expected) // constant skipTrue & skipFalse constBothFilter := []bpf.Instruction{ bpf.LoadConstant{Dst: bpf.RegA, Val: a}, // "dummy" interleaved jump so the actual test jump can use both skipFalse and skipTrue bpf.JumpIf{Cond: bpf.JumpEqual, Val: 0, SkipTrue: 1}, bpf.JumpIf{Cond: cond, Val: b, SkipTrue: 2, SkipFalse: 1}, // "dummy" target bpf.RetConstant{Val: 1}, bpf.RetConstant{Val: 0}, bpf.RetConstant{Val: 1}, } checkBackends(t, constBothFilter, []byte{}, expected) // X skipTrue xTrueFilter := []bpf.Instruction{ bpf.LoadConstant{Dst: bpf.RegA, Val: a}, bpf.LoadConstant{Dst: bpf.RegX, Val: b}, bpf.JumpIfX{Cond: cond, SkipTrue: 1}, bpf.RetConstant{Val: 0}, bpf.RetConstant{Val: 1}, } checkBackends(t, xTrueFilter, []byte{}, expected) // X skipTrue & skipFalse xBothFilter := []bpf.Instruction{ bpf.LoadConstant{Dst: bpf.RegA, Val: a}, bpf.LoadConstant{Dst: bpf.RegX, Val: b}, // "dummy" interleaved jump so the actual test jump can use both skipFalse and skipTrue bpf.JumpIf{Cond: bpf.JumpEqual, Val: 0, SkipTrue: 1}, bpf.JumpIfX{Cond: cond, SkipTrue: 2, SkipFalse: 1}, // "dummy" target bpf.RetConstant{Val: 1}, bpf.RetConstant{Val: 0}, bpf.RetConstant{Val: 1}, } checkBackends(t, xBothFilter, []byte{}, expected) } func TestJumpIfEqual(t *testing.T) { t.Parallel() checkJump(t, bpf.JumpEqual, 23, 23, true) checkJump(t, bpf.JumpEqual, 23, 21, false) } func TestJumpIfNotEqual(t *testing.T) { t.Parallel() checkJump(t, bpf.JumpNotEqual, 23, 23, false) checkJump(t, bpf.JumpNotEqual, 23, 21, true) } func TestJumpIfGreaterThan(t *testing.T) { t.Parallel() checkJump(t, bpf.JumpGreaterThan, 24, 23, true) checkJump(t, bpf.JumpGreaterThan, 23, 23, false) checkJump(t, bpf.JumpGreaterThan, 22, 23, false) } func TestJumpIfLessThan(t *testing.T) { t.Parallel() checkJump(t, bpf.JumpLessThan, 24, 23, false) checkJump(t, bpf.JumpLessThan, 23, 23, false) checkJump(t, bpf.JumpLessThan, 22, 23, true) } func TestJumpIfGreaterOrEqual(t *testing.T) { t.Parallel() checkJump(t, bpf.JumpGreaterOrEqual, 24, 23, true) checkJump(t, bpf.JumpGreaterOrEqual, 23, 23, true) checkJump(t, bpf.JumpGreaterOrEqual, 22, 23, false) } func TestJumpIfLessOrEqual(t *testing.T) { t.Parallel() checkJump(t, bpf.JumpLessOrEqual, 24, 23, false) checkJump(t, bpf.JumpLessOrEqual, 23, 23, true) checkJump(t, bpf.JumpLessOrEqual, 22, 23, true) } func TestJumpIfBitsSet(t *testing.T) { t.Parallel() checkJump(t, bpf.JumpBitsSet, 6, 4, true) checkJump(t, bpf.JumpBitsSet, 6, 2, true) checkJump(t, bpf.JumpBitsSet, 6, 8, false) } func TestJumpIfBitsNotSet(t *testing.T) { t.Parallel() checkJump(t, bpf.JumpBitsNotSet, 6, 4, false) checkJump(t, bpf.JumpBitsNotSet, 6, 2, false) checkJump(t, bpf.JumpBitsNotSet, 6, 8, true) } func TestRetA(t *testing.T) { t.Parallel() filter := []bpf.Instruction{ bpf.LoadConstant{Dst: bpf.RegA, Val: 1}, bpf.RetA{}, } checkBackends(t, filter, []byte{}, match) } func TestRetConstant(t *testing.T) { t.Parallel() filter := []bpf.Instruction{ bpf.RetConstant{Val: 1}, } checkBackends(t, filter, []byte{}, match) filter = []bpf.Instruction{ bpf.RetConstant{Val: 0}, } checkBackends(t, filter, []byte{}, noMatch) } func TestTXA(t *testing.T) { t.Parallel() filter := []bpf.Instruction{ bpf.LoadConstant{Dst: bpf.RegX, Val: 1}, bpf.TXA{}, bpf.RetA{}, } checkBackends(t, filter, []byte{}, match) } func TestTAX(t *testing.T) { t.Parallel() filter := []bpf.Instruction{ bpf.LoadConstant{Dst: bpf.RegA, Val: 1}, bpf.TAX{}, bpf.TXA{}, bpf.RetA{}, } checkBackends(t, filter, []byte{}, match) } type result int const ( match result = iota noMatch ) func (r result) String() string { switch r { case match: return "match" case noMatch: return "no match" default: return fmt.Sprintf("result(%d)", int(r)) } } // True IFF packet matches filter type backend func(testing.TB, []bpf.Instruction, []byte) result // checkBackends checks if all the backends match the packet as expected. // Input packet is 0 padded to min ethernet length. func checkBackends(t *testing.T, filter []bpf.Instruction, in []byte, expected result) { t.Helper() if len(in) < 14 { t := make([]byte, 14) copy(t, in) in = t } check := func(b backend) func(*testing.T) { return func(t *testing.T) { if got := b(t, filter, in); got != expected { t.Fatalf("Got %q, expected %q", got, expected) } } } t.Run("C", check(cBackend)) t.Run("eBPF", check(ebpfBackend)) t.Run("kernel", check(kernelBackend)) } type XDPAction int func (r XDPAction) String() string { switch r { case XDPAborted: return "XDPAborted" case XDPDrop: return "XDPDrop" case XDPPass: return "XDPPass" case XDPTx: return "XDPTx" default: return fmt.Sprintf("XDPResult(%d)", int(r)) } } const ( XDPAborted XDPAction = iota XDPDrop XDPPass XDPTx ) // testProg runs an eBPF program and checks it has not modified the packet func testProg(tb testing.TB, progSpec *ebpf.ProgramSpec, in []byte) result { // -short skips tests that require permissions // Skipping the tests this late ensures the eBPF program still builds at least if testing.Short() { tb.SkipNow() } prog, err := ebpf.NewProgramWithOptions(progSpec, ebpf.ProgramOptions{ LogLevel: 2, // Get full verifier logs. }) if err != nil { tb.Fatal(err) } defer prog.Close() ret, out, err := prog.Test(in) if err != nil { tb.Fatal(err) } if !bytes.Equal(in, out) { tb.Fatalf("Program modified input:\nIn: %v\nOut: %v\n", in, out) } // The XDP programs we build drop matching packets switch r := XDPAction(ret); r { case XDPDrop: return match case XDPPass: return noMatch default: tb.Fatalf("Unexpected XDP return code %v", r) panic("unreachable") } } golang-github-cloudflare-cbpfc-0.0~git20231012.992ed75/kernel_test.go000066400000000000000000000036321456326020600246730ustar00rootroot00000000000000package cbpfc import ( "bytes" "net" "testing" "time" "unsafe" "golang.org/x/net/bpf" "golang.org/x/sys/unix" ) // kernelBackend is a backend that runs cBPF in the kernel func kernelBackend(tb testing.TB, insns []bpf.Instruction, in []byte) result { filter, err := bpf.Assemble(insns) if err != nil { tb.Fatal(err) } // Use a unix socket to test the filter // This doesn't risk interfering with any other network traffic, doesn't require / add special // headers (as would be the case if we used UDP for example) that the XDP tests don't deal with, // and doesn't require any special permissions read, err := net.ListenUnixgram("unixgram", &net.UnixAddr{Name: "", Net: "unixgram"}) if err != nil { tb.Fatal(err) } defer read.Close() readConn, err := read.SyscallConn() if err != nil { tb.Fatal(err) } err = readConn.Control(func(fd uintptr) { err := unix.SetsockoptSockFprog(int(fd), unix.SOL_SOCKET, unix.SO_ATTACH_FILTER, &unix.SockFprog{ Len: uint16(len(filter)), Filter: (*unix.SockFilter)(unsafe.Pointer(&filter[0])), }) if err != nil { tb.Fatal(err) } }) if err != nil { tb.Fatal(err) } write, err := net.Dial("unixgram", read.LocalAddr().String()) if err != nil { tb.Fatal(err) } defer write.Close() if _, err := write.Write(in); err != nil { tb.Fatal(err) } read.SetDeadline(time.Now().Add(50 * time.Millisecond)) // SocketFilters only allow matching packets through // If the packet does not match, the only signal we have is the absence of a packet var out [1500]byte n, err := read.Read(out[:]) if err != nil { if nerr, ok := err.(net.Error); ok && nerr.Timeout() { return noMatch } tb.Fatal(err) } // Sanity check we received the right packet // Received packet is truncated to the SocketFilter's return value if !bytes.Equal(in[:n], out[:n]) { tb.Fatalf("Received unexpected packet:\nSent: %v\nGot: %v\n", in, out[:n]) } return match }