summaryrefslogtreecommitdiffstats
path: root/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf')
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/asm.go41
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/constants.go218
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/doc.go82
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/instructions.go704
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/instructions_test.go525
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/setter.go10
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/testdata/all_instructions.bpf1
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/testdata/all_instructions.txt79
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm.go140
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm_aluop_test.go512
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm_bpf_test.go192
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm_extension_test.go49
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm_instructions.go174
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm_jump_test.go380
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm_load_test.go246
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm_ret_test.go115
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm_scratch_test.go247
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm_test.go144
18 files changed, 3859 insertions, 0 deletions
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/asm.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/asm.go
new file mode 100644
index 000000000..15e21b181
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/asm.go
@@ -0,0 +1,41 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bpf
+
+import "fmt"
+
+// Assemble converts insts into raw instructions suitable for loading
+// into a BPF virtual machine.
+//
+// Currently, no optimization is attempted, the assembled program flow
+// is exactly as provided.
+func Assemble(insts []Instruction) ([]RawInstruction, error) {
+ ret := make([]RawInstruction, len(insts))
+ var err error
+ for i, inst := range insts {
+ ret[i], err = inst.Assemble()
+ if err != nil {
+ return nil, fmt.Errorf("assembling instruction %d: %s", i+1, err)
+ }
+ }
+ return ret, nil
+}
+
+// Disassemble attempts to parse raw back into
+// Instructions. Unrecognized RawInstructions are assumed to be an
+// extension not implemented by this package, and are passed through
+// unchanged to the output. The allDecoded value reports whether insts
+// contains no RawInstructions.
+func Disassemble(raw []RawInstruction) (insts []Instruction, allDecoded bool) {
+ insts = make([]Instruction, len(raw))
+ allDecoded = true
+ for i, r := range raw {
+ insts[i] = r.Disassemble()
+ if _, ok := insts[i].(RawInstruction); ok {
+ allDecoded = false
+ }
+ }
+ return insts, allDecoded
+}
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/constants.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/constants.go
new file mode 100644
index 000000000..b89ca3523
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/constants.go
@@ -0,0 +1,218 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bpf
+
+// A Register is a register of the BPF virtual machine.
+type Register uint16
+
+const (
+ // RegA is the accumulator register. RegA is always the
+ // destination register of ALU operations.
+ RegA Register = iota
+ // RegX is the indirection register, used by LoadIndirect
+ // operations.
+ RegX
+)
+
+// An ALUOp is an arithmetic or logic operation.
+type ALUOp uint16
+
+// ALU binary operation types.
+const (
+ ALUOpAdd ALUOp = iota << 4
+ ALUOpSub
+ ALUOpMul
+ ALUOpDiv
+ ALUOpOr
+ ALUOpAnd
+ ALUOpShiftLeft
+ ALUOpShiftRight
+ aluOpNeg // Not exported because it's the only unary ALU operation, and gets its own instruction type.
+ ALUOpMod
+ ALUOpXor
+)
+
+// A JumpTest is a comparison operator used in conditional jumps.
+type JumpTest uint16
+
+// Supported operators for conditional jumps.
+const (
+ // K == A
+ JumpEqual JumpTest = iota
+ // K != A
+ JumpNotEqual
+ // K > A
+ JumpGreaterThan
+ // K < A
+ JumpLessThan
+ // K >= A
+ JumpGreaterOrEqual
+ // K <= A
+ JumpLessOrEqual
+ // K & A != 0
+ JumpBitsSet
+ // K & A == 0
+ JumpBitsNotSet
+)
+
+// An Extension is a function call provided by the kernel that
+// performs advanced operations that are expensive or impossible
+// within the BPF virtual machine.
+//
+// Extensions are only implemented by the Linux kernel.
+//
+// TODO: should we prune this list? Some of these extensions seem
+// either broken or near-impossible to use correctly, whereas other
+// (len, random, ifindex) are quite useful.
+type Extension int
+
+// Extension functions available in the Linux kernel.
+const (
+ // extOffset is the negative maximum number of instructions used
+ // to load instructions by overloading the K argument.
+ extOffset = -0x1000
+ // ExtLen returns the length of the packet.
+ ExtLen Extension = 1
+ // ExtProto returns the packet's L3 protocol type.
+ ExtProto Extension = 0
+ // ExtType returns the packet's type (skb->pkt_type in the kernel)
+ //
+ // TODO: better documentation. How nice an API do we want to
+ // provide for these esoteric extensions?
+ ExtType Extension = 4
+ // ExtPayloadOffset returns the offset of the packet payload, or
+ // the first protocol header that the kernel does not know how to
+ // parse.
+ ExtPayloadOffset Extension = 52
+ // ExtInterfaceIndex returns the index of the interface on which
+ // the packet was received.
+ ExtInterfaceIndex Extension = 8
+ // ExtNetlinkAttr returns the netlink attribute of type X at
+ // offset A.
+ ExtNetlinkAttr Extension = 12
+ // ExtNetlinkAttrNested returns the nested netlink attribute of
+ // type X at offset A.
+ ExtNetlinkAttrNested Extension = 16
+ // ExtMark returns the packet's mark value.
+ ExtMark Extension = 20
+ // ExtQueue returns the packet's assigned hardware queue.
+ ExtQueue Extension = 24
+ // ExtLinkLayerType returns the packet's hardware address type
+ // (e.g. Ethernet, Infiniband).
+ ExtLinkLayerType Extension = 28
+ // ExtRXHash returns the packets receive hash.
+ //
+ // TODO: figure out what this rxhash actually is.
+ ExtRXHash Extension = 32
+ // ExtCPUID returns the ID of the CPU processing the current
+ // packet.
+ ExtCPUID Extension = 36
+ // ExtVLANTag returns the packet's VLAN tag.
+ ExtVLANTag Extension = 44
+ // ExtVLANTagPresent returns non-zero if the packet has a VLAN
+ // tag.
+ //
+ // TODO: I think this might be a lie: it reads bit 0x1000 of the
+ // VLAN header, which changed meaning in recent revisions of the
+ // spec - this extension may now return meaningless information.
+ ExtVLANTagPresent Extension = 48
+ // ExtVLANProto returns 0x8100 if the frame has a VLAN header,
+ // 0x88a8 if the frame has a "Q-in-Q" double VLAN header, or some
+ // other value if no VLAN information is present.
+ ExtVLANProto Extension = 60
+ // ExtRand returns a uniformly random uint32.
+ ExtRand Extension = 56
+)
+
+// The following gives names to various bit patterns used in opcode construction.
+
+const (
+ opMaskCls uint16 = 0x7
+ // opClsLoad masks
+ opMaskLoadDest = 0x01
+ opMaskLoadWidth = 0x18
+ opMaskLoadMode = 0xe0
+ // opClsALU
+ opMaskOperandSrc = 0x08
+ opMaskOperator = 0xf0
+ // opClsJump
+ opMaskJumpConst = 0x0f
+ opMaskJumpCond = 0xf0
+)
+
+const (
+ // +---------------+-----------------+---+---+---+
+ // | AddrMode (3b) | LoadWidth (2b) | 0 | 0 | 0 |
+ // +---------------+-----------------+---+---+---+
+ opClsLoadA uint16 = iota
+ // +---------------+-----------------+---+---+---+
+ // | AddrMode (3b) | LoadWidth (2b) | 0 | 0 | 1 |
+ // +---------------+-----------------+---+---+---+
+ opClsLoadX
+ // +---+---+---+---+---+---+---+---+
+ // | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
+ // +---+---+---+---+---+---+---+---+
+ opClsStoreA
+ // +---+---+---+---+---+---+---+---+
+ // | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
+ // +---+---+---+---+---+---+---+---+
+ opClsStoreX
+ // +---------------+-----------------+---+---+---+
+ // | Operator (4b) | OperandSrc (1b) | 1 | 0 | 0 |
+ // +---------------+-----------------+---+---+---+
+ opClsALU
+ // +-----------------------------+---+---+---+---+
+ // | TestOperator (4b) | 0 | 1 | 0 | 1 |
+ // +-----------------------------+---+---+---+---+
+ opClsJump
+ // +---+-------------------------+---+---+---+---+
+ // | 0 | 0 | 0 | RetSrc (1b) | 0 | 1 | 1 | 0 |
+ // +---+-------------------------+---+---+---+---+
+ opClsReturn
+ // +---+-------------------------+---+---+---+---+
+ // | 0 | 0 | 0 | TXAorTAX (1b) | 0 | 1 | 1 | 1 |
+ // +---+-------------------------+---+---+---+---+
+ opClsMisc
+)
+
+const (
+ opAddrModeImmediate uint16 = iota << 5
+ opAddrModeAbsolute
+ opAddrModeIndirect
+ opAddrModeScratch
+ opAddrModePacketLen // actually an extension, not an addressing mode.
+ opAddrModeMemShift
+)
+
+const (
+ opLoadWidth4 uint16 = iota << 3
+ opLoadWidth2
+ opLoadWidth1
+)
+
+// Operator defined by ALUOp*
+
+const (
+ opALUSrcConstant uint16 = iota << 3
+ opALUSrcX
+)
+
+const (
+ opJumpAlways = iota << 4
+ opJumpEqual
+ opJumpGT
+ opJumpGE
+ opJumpSet
+)
+
+const (
+ opRetSrcConstant uint16 = iota << 4
+ opRetSrcA
+)
+
+const (
+ opMiscTAX = 0x00
+ opMiscTXA = 0x80
+)
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/doc.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/doc.go
new file mode 100644
index 000000000..ae62feb53
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/doc.go
@@ -0,0 +1,82 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+
+Package bpf implements marshaling and unmarshaling of programs for the
+Berkeley Packet Filter virtual machine, and provides a Go implementation
+of the virtual machine.
+
+BPF's main use is to specify a packet filter for network taps, so that
+the kernel doesn't have to expensively copy every packet it sees to
+userspace. However, it's been repurposed to other areas where running
+user code in-kernel is needed. For example, Linux's seccomp uses BPF
+to apply security policies to system calls. For simplicity, this
+documentation refers only to packets, but other uses of BPF have their
+own data payloads.
+
+BPF programs run in a restricted virtual machine. It has almost no
+access to kernel functions, and while conditional branches are
+allowed, they can only jump forwards, to guarantee that there are no
+infinite loops.
+
+The virtual machine
+
+The BPF VM is an accumulator machine. Its main register, called
+register A, is an implicit source and destination in all arithmetic
+and logic operations. The machine also has 16 scratch registers for
+temporary storage, and an indirection register (register X) for
+indirect memory access. All registers are 32 bits wide.
+
+Each run of a BPF program is given one packet, which is placed in the
+VM's read-only "main memory". LoadAbsolute and LoadIndirect
+instructions can fetch up to 32 bits at a time into register A for
+examination.
+
+The goal of a BPF program is to produce and return a verdict (uint32),
+which tells the kernel what to do with the packet. In the context of
+packet filtering, the returned value is the number of bytes of the
+packet to forward to userspace, or 0 to ignore the packet. Other
+contexts like seccomp define their own return values.
+
+In order to simplify programs, attempts to read past the end of the
+packet terminate the program execution with a verdict of 0 (ignore
+packet). This means that the vast majority of BPF programs don't need
+to do any explicit bounds checking.
+
+In addition to the bytes of the packet, some BPF programs have access
+to extensions, which are essentially calls to kernel utility
+functions. Currently, the only extensions supported by this package
+are the Linux packet filter extensions.
+
+Examples
+
+This packet filter selects all ARP packets.
+
+ bpf.Assemble([]bpf.Instruction{
+ // Load "EtherType" field from the ethernet header.
+ bpf.LoadAbsolute{Off: 12, Size: 2},
+ // Skip over the next instruction if EtherType is not ARP.
+ bpf.JumpIf{Cond: bpf.JumpNotEqual, Val: 0x0806, SkipTrue: 1},
+ // Verdict is "send up to 4k of the packet to userspace."
+ bpf.RetConstant{Val: 4096},
+ // Verdict is "ignore packet."
+ bpf.RetConstant{Val: 0},
+ })
+
+This packet filter captures a random 1% sample of traffic.
+
+ bpf.Assemble([]bpf.Instruction{
+ // Get a 32-bit random number from the Linux kernel.
+ bpf.LoadExtension{Num: bpf.ExtRand},
+ // 1% dice roll?
+ bpf.JumpIf{Cond: bpf.JumpLessThan, Val: 2^32/100, SkipFalse: 1},
+ // Capture.
+ bpf.RetConstant{Val: 4096},
+ // Ignore.
+ bpf.RetConstant{Val: 0},
+ })
+
+*/
+package bpf // import "golang.org/x/net/bpf"
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/instructions.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/instructions.go
new file mode 100644
index 000000000..3b4fd0891
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/instructions.go
@@ -0,0 +1,704 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bpf
+
+import "fmt"
+
+// An Instruction is one instruction executed by the BPF virtual
+// machine.
+type Instruction interface {
+ // Assemble assembles the Instruction into a RawInstruction.
+ Assemble() (RawInstruction, error)
+}
+
+// A RawInstruction is a raw BPF virtual machine instruction.
+type RawInstruction struct {
+ // Operation to execute.
+ Op uint16
+ // For conditional jump instructions, the number of instructions
+ // to skip if the condition is true/false.
+ Jt uint8
+ Jf uint8
+ // Constant parameter. The meaning depends on the Op.
+ K uint32
+}
+
+// Assemble implements the Instruction Assemble method.
+func (ri RawInstruction) Assemble() (RawInstruction, error) { return ri, nil }
+
+// Disassemble parses ri into an Instruction and returns it. If ri is
+// not recognized by this package, ri itself is returned.
+func (ri RawInstruction) Disassemble() Instruction {
+ switch ri.Op & opMaskCls {
+ case opClsLoadA, opClsLoadX:
+ reg := Register(ri.Op & opMaskLoadDest)
+ sz := 0
+ switch ri.Op & opMaskLoadWidth {
+ case opLoadWidth4:
+ sz = 4
+ case opLoadWidth2:
+ sz = 2
+ case opLoadWidth1:
+ sz = 1
+ default:
+ return ri
+ }
+ switch ri.Op & opMaskLoadMode {
+ case opAddrModeImmediate:
+ if sz != 4 {
+ return ri
+ }
+ return LoadConstant{Dst: reg, Val: ri.K}
+ case opAddrModeScratch:
+ if sz != 4 || ri.K > 15 {
+ return ri
+ }
+ return LoadScratch{Dst: reg, N: int(ri.K)}
+ case opAddrModeAbsolute:
+ if ri.K > extOffset+0xffffffff {
+ return LoadExtension{Num: Extension(-extOffset + ri.K)}
+ }
+ return LoadAbsolute{Size: sz, Off: ri.K}
+ case opAddrModeIndirect:
+ return LoadIndirect{Size: sz, Off: ri.K}
+ case opAddrModePacketLen:
+ if sz != 4 {
+ return ri
+ }
+ return LoadExtension{Num: ExtLen}
+ case opAddrModeMemShift:
+ return LoadMemShift{Off: ri.K}
+ default:
+ return ri
+ }
+
+ case opClsStoreA:
+ if ri.Op != opClsStoreA || ri.K > 15 {
+ return ri
+ }
+ return StoreScratch{Src: RegA, N: int(ri.K)}
+
+ case opClsStoreX:
+ if ri.Op != opClsStoreX || ri.K > 15 {
+ return ri
+ }
+ return StoreScratch{Src: RegX, N: int(ri.K)}
+
+ case opClsALU:
+ switch op := ALUOp(ri.Op & opMaskOperator); op {
+ case ALUOpAdd, ALUOpSub, ALUOpMul, ALUOpDiv, ALUOpOr, ALUOpAnd, ALUOpShiftLeft, ALUOpShiftRight, ALUOpMod, ALUOpXor:
+ if ri.Op&opMaskOperandSrc != 0 {
+ return ALUOpX{Op: op}
+ }
+ return ALUOpConstant{Op: op, Val: ri.K}
+ case aluOpNeg:
+ return NegateA{}
+ default:
+ return ri
+ }
+
+ case opClsJump:
+ if ri.Op&opMaskJumpConst != opClsJump {
+ return ri
+ }
+ switch ri.Op & opMaskJumpCond {
+ case opJumpAlways:
+ return Jump{Skip: ri.K}
+ case opJumpEqual:
+ if ri.Jt == 0 {
+ return JumpIf{
+ Cond: JumpNotEqual,
+ Val: ri.K,
+ SkipTrue: ri.Jf,
+ SkipFalse: 0,
+ }
+ }
+ return JumpIf{
+ Cond: JumpEqual,
+ Val: ri.K,
+ SkipTrue: ri.Jt,
+ SkipFalse: ri.Jf,
+ }
+ case opJumpGT:
+ if ri.Jt == 0 {
+ return JumpIf{
+ Cond: JumpLessOrEqual,
+ Val: ri.K,
+ SkipTrue: ri.Jf,
+ SkipFalse: 0,
+ }
+ }
+ return JumpIf{
+ Cond: JumpGreaterThan,
+ Val: ri.K,
+ SkipTrue: ri.Jt,
+ SkipFalse: ri.Jf,
+ }
+ case opJumpGE:
+ if ri.Jt == 0 {
+ return JumpIf{
+ Cond: JumpLessThan,
+ Val: ri.K,
+ SkipTrue: ri.Jf,
+ SkipFalse: 0,
+ }
+ }
+ return JumpIf{
+ Cond: JumpGreaterOrEqual,
+ Val: ri.K,
+ SkipTrue: ri.Jt,
+ SkipFalse: ri.Jf,
+ }
+ case opJumpSet:
+ return JumpIf{
+ Cond: JumpBitsSet,
+ Val: ri.K,
+ SkipTrue: ri.Jt,
+ SkipFalse: ri.Jf,
+ }
+ default:
+ return ri
+ }
+
+ case opClsReturn:
+ switch ri.Op {
+ case opClsReturn | opRetSrcA:
+ return RetA{}
+ case opClsReturn | opRetSrcConstant:
+ return RetConstant{Val: ri.K}
+ default:
+ return ri
+ }
+
+ case opClsMisc:
+ switch ri.Op {
+ case opClsMisc | opMiscTAX:
+ return TAX{}
+ case opClsMisc | opMiscTXA:
+ return TXA{}
+ default:
+ return ri
+ }
+
+ default:
+ panic("unreachable") // switch is exhaustive on the bit pattern
+ }
+}
+
+// LoadConstant loads Val into register Dst.
+type LoadConstant struct {
+ Dst Register
+ Val uint32
+}
+
+// Assemble implements the Instruction Assemble method.
+func (a LoadConstant) Assemble() (RawInstruction, error) {
+ return assembleLoad(a.Dst, 4, opAddrModeImmediate, a.Val)
+}
+
+// String returns the the instruction in assembler notation.
+func (a LoadConstant) String() string {
+ switch a.Dst {
+ case RegA:
+ return fmt.Sprintf("ld #%d", a.Val)
+ case RegX:
+ return fmt.Sprintf("ldx #%d", a.Val)
+ default:
+ return fmt.Sprintf("unknown instruction: %#v", a)
+ }
+}
+
+// LoadScratch loads scratch[N] into register Dst.
+type LoadScratch struct {
+ Dst Register
+ N int // 0-15
+}
+
+// Assemble implements the Instruction Assemble method.
+func (a LoadScratch) Assemble() (RawInstruction, error) {
+ if a.N < 0 || a.N > 15 {
+ return RawInstruction{}, fmt.Errorf("invalid scratch slot %d", a.N)
+ }
+ return assembleLoad(a.Dst, 4, opAddrModeScratch, uint32(a.N))
+}
+
+// String returns the the instruction in assembler notation.
+func (a LoadScratch) String() string {
+ switch a.Dst {
+ case RegA:
+ return fmt.Sprintf("ld M[%d]", a.N)
+ case RegX:
+ return fmt.Sprintf("ldx M[%d]", a.N)
+ default:
+ return fmt.Sprintf("unknown instruction: %#v", a)
+ }
+}
+
+// LoadAbsolute loads packet[Off:Off+Size] as an integer value into
+// register A.
+type LoadAbsolute struct {
+ Off uint32
+ Size int // 1, 2 or 4
+}
+
+// Assemble implements the Instruction Assemble method.
+func (a LoadAbsolute) Assemble() (RawInstruction, error) {
+ return assembleLoad(RegA, a.Size, opAddrModeAbsolute, a.Off)
+}
+
+// String returns the the instruction in assembler notation.
+func (a LoadAbsolute) String() string {
+ switch a.Size {
+ case 1: // byte
+ return fmt.Sprintf("ldb [%d]", a.Off)
+ case 2: // half word
+ return fmt.Sprintf("ldh [%d]", a.Off)
+ case 4: // word
+ if a.Off > extOffset+0xffffffff {
+ return LoadExtension{Num: Extension(a.Off + 0x1000)}.String()
+ }
+ return fmt.Sprintf("ld [%d]", a.Off)
+ default:
+ return fmt.Sprintf("unknown instruction: %#v", a)
+ }
+}
+
+// LoadIndirect loads packet[X+Off:X+Off+Size] as an integer value
+// into register A.
+type LoadIndirect struct {
+ Off uint32
+ Size int // 1, 2 or 4
+}
+
+// Assemble implements the Instruction Assemble method.
+func (a LoadIndirect) Assemble() (RawInstruction, error) {
+ return assembleLoad(RegA, a.Size, opAddrModeIndirect, a.Off)
+}
+
+// String returns the the instruction in assembler notation.
+func (a LoadIndirect) String() string {
+ switch a.Size {
+ case 1: // byte
+ return fmt.Sprintf("ldb [x + %d]", a.Off)
+ case 2: // half word
+ return fmt.Sprintf("ldh [x + %d]", a.Off)
+ case 4: // word
+ return fmt.Sprintf("ld [x + %d]", a.Off)
+ default:
+ return fmt.Sprintf("unknown instruction: %#v", a)
+ }
+}
+
+// LoadMemShift multiplies the first 4 bits of the byte at packet[Off]
+// by 4 and stores the result in register X.
+//
+// This instruction is mainly useful to load into X the length of an
+// IPv4 packet header in a single instruction, rather than have to do
+// the arithmetic on the header's first byte by hand.
+type LoadMemShift struct {
+ Off uint32
+}
+
+// Assemble implements the Instruction Assemble method.
+func (a LoadMemShift) Assemble() (RawInstruction, error) {
+ return assembleLoad(RegX, 1, opAddrModeMemShift, a.Off)
+}
+
+// String returns the the instruction in assembler notation.
+func (a LoadMemShift) String() string {
+ return fmt.Sprintf("ldx 4*([%d]&0xf)", a.Off)
+}
+
+// LoadExtension invokes a linux-specific extension and stores the
+// result in register A.
+type LoadExtension struct {
+ Num Extension
+}
+
+// Assemble implements the Instruction Assemble method.
+func (a LoadExtension) Assemble() (RawInstruction, error) {
+ if a.Num == ExtLen {
+ return assembleLoad(RegA, 4, opAddrModePacketLen, 0)
+ }
+ return assembleLoad(RegA, 4, opAddrModeAbsolute, uint32(extOffset+a.Num))
+}
+
+// String returns the the instruction in assembler notation.
+func (a LoadExtension) String() string {
+ switch a.Num {
+ case ExtLen:
+ return "ld #len"
+ case ExtProto:
+ return "ld #proto"
+ case ExtType:
+ return "ld #type"
+ case ExtPayloadOffset:
+ return "ld #poff"
+ case ExtInterfaceIndex:
+ return "ld #ifidx"
+ case ExtNetlinkAttr:
+ return "ld #nla"
+ case ExtNetlinkAttrNested:
+ return "ld #nlan"
+ case ExtMark:
+ return "ld #mark"
+ case ExtQueue:
+ return "ld #queue"
+ case ExtLinkLayerType:
+ return "ld #hatype"
+ case ExtRXHash:
+ return "ld #rxhash"
+ case ExtCPUID:
+ return "ld #cpu"
+ case ExtVLANTag:
+ return "ld #vlan_tci"
+ case ExtVLANTagPresent:
+ return "ld #vlan_avail"
+ case ExtVLANProto:
+ return "ld #vlan_tpid"
+ case ExtRand:
+ return "ld #rand"
+ default:
+ return fmt.Sprintf("unknown instruction: %#v", a)
+ }
+}
+
+// StoreScratch stores register Src into scratch[N].
+type StoreScratch struct {
+ Src Register
+ N int // 0-15
+}
+
+// Assemble implements the Instruction Assemble method.
+func (a StoreScratch) Assemble() (RawInstruction, error) {
+ if a.N < 0 || a.N > 15 {
+ return RawInstruction{}, fmt.Errorf("invalid scratch slot %d", a.N)
+ }
+ var op uint16
+ switch a.Src {
+ case RegA:
+ op = opClsStoreA
+ case RegX:
+ op = opClsStoreX
+ default:
+ return RawInstruction{}, fmt.Errorf("invalid source register %v", a.Src)
+ }
+
+ return RawInstruction{
+ Op: op,
+ K: uint32(a.N),
+ }, nil
+}
+
+// String returns the the instruction in assembler notation.
+func (a StoreScratch) String() string {
+ switch a.Src {
+ case RegA:
+ return fmt.Sprintf("st M[%d]", a.N)
+ case RegX:
+ return fmt.Sprintf("stx M[%d]", a.N)
+ default:
+ return fmt.Sprintf("unknown instruction: %#v", a)
+ }
+}
+
+// ALUOpConstant executes A = A <Op> Val.
+type ALUOpConstant struct {
+ Op ALUOp
+ Val uint32
+}
+
+// Assemble implements the Instruction Assemble method.
+func (a ALUOpConstant) Assemble() (RawInstruction, error) {
+ return RawInstruction{
+ Op: opClsALU | opALUSrcConstant | uint16(a.Op),
+ K: a.Val,
+ }, nil
+}
+
+// String returns the the instruction in assembler notation.
+func (a ALUOpConstant) String() string {
+ switch a.Op {
+ case ALUOpAdd:
+ return fmt.Sprintf("add #%d", a.Val)
+ case ALUOpSub:
+ return fmt.Sprintf("sub #%d", a.Val)
+ case ALUOpMul:
+ return fmt.Sprintf("mul #%d", a.Val)
+ case ALUOpDiv:
+ return fmt.Sprintf("div #%d", a.Val)
+ case ALUOpMod:
+ return fmt.Sprintf("mod #%d", a.Val)
+ case ALUOpAnd:
+ return fmt.Sprintf("and #%d", a.Val)
+ case ALUOpOr:
+ return fmt.Sprintf("or #%d", a.Val)
+ case ALUOpXor:
+ return fmt.Sprintf("xor #%d", a.Val)
+ case ALUOpShiftLeft:
+ return fmt.Sprintf("lsh #%d", a.Val)
+ case ALUOpShiftRight:
+ return fmt.Sprintf("rsh #%d", a.Val)
+ default:
+ return fmt.Sprintf("unknown instruction: %#v", a)
+ }
+}
+
+// ALUOpX executes A = A <Op> X
+type ALUOpX struct {
+ Op ALUOp
+}
+
+// Assemble implements the Instruction Assemble method.
+func (a ALUOpX) Assemble() (RawInstruction, error) {
+ return RawInstruction{
+ Op: opClsALU | opALUSrcX | uint16(a.Op),
+ }, nil
+}
+
+// String returns the the instruction in assembler notation.
+func (a ALUOpX) String() string {
+ switch a.Op {
+ case ALUOpAdd:
+ return "add x"
+ case ALUOpSub:
+ return "sub x"
+ case ALUOpMul:
+ return "mul x"
+ case ALUOpDiv:
+ return "div x"
+ case ALUOpMod:
+ return "mod x"
+ case ALUOpAnd:
+ return "and x"
+ case ALUOpOr:
+ return "or x"
+ case ALUOpXor:
+ return "xor x"
+ case ALUOpShiftLeft:
+ return "lsh x"
+ case ALUOpShiftRight:
+ return "rsh x"
+ default:
+ return fmt.Sprintf("unknown instruction: %#v", a)
+ }
+}
+
+// NegateA executes A = -A.
+type NegateA struct{}
+
+// Assemble implements the Instruction Assemble method.
+func (a NegateA) Assemble() (RawInstruction, error) {
+ return RawInstruction{
+ Op: opClsALU | uint16(aluOpNeg),
+ }, nil
+}
+
+// String returns the the instruction in assembler notation.
+func (a NegateA) String() string {
+ return fmt.Sprintf("neg")
+}
+
+// Jump skips the following Skip instructions in the program.
+type Jump struct {
+ Skip uint32
+}
+
+// Assemble implements the Instruction Assemble method.
+func (a Jump) Assemble() (RawInstruction, error) {
+ return RawInstruction{
+ Op: opClsJump | opJumpAlways,
+ K: a.Skip,
+ }, nil
+}
+
+// String returns the the instruction in assembler notation.
+func (a Jump) String() string {
+ return fmt.Sprintf("ja %d", a.Skip)
+}
+
+// JumpIf skips the following Skip instructions in the program if A
+// <Cond> Val is true.
+type JumpIf struct {
+ Cond JumpTest
+ Val uint32
+ SkipTrue uint8
+ SkipFalse uint8
+}
+
+// Assemble implements the Instruction Assemble method.
+func (a JumpIf) Assemble() (RawInstruction, error) {
+ var (
+ cond uint16
+ flip bool
+ )
+ switch a.Cond {
+ case JumpEqual:
+ cond = opJumpEqual
+ case JumpNotEqual:
+ cond, flip = opJumpEqual, true
+ case JumpGreaterThan:
+ cond = opJumpGT
+ case JumpLessThan:
+ cond, flip = opJumpGE, true
+ case JumpGreaterOrEqual:
+ cond = opJumpGE
+ case JumpLessOrEqual:
+ cond, flip = opJumpGT, true
+ case JumpBitsSet:
+ cond = opJumpSet
+ case JumpBitsNotSet:
+ cond, flip = opJumpSet, true
+ default:
+ return RawInstruction{}, fmt.Errorf("unknown JumpTest %v", a.Cond)
+ }
+ jt, jf := a.SkipTrue, a.SkipFalse
+ if flip {
+ jt, jf = jf, jt
+ }
+ return RawInstruction{
+ Op: opClsJump | cond,
+ Jt: jt,
+ Jf: jf,
+ K: a.Val,
+ }, nil
+}
+
+// String returns the the instruction in assembler notation.
+func (a JumpIf) String() string {
+ switch a.Cond {
+ // K == A
+ case JumpEqual:
+ return conditionalJump(a, "jeq", "jneq")
+ // K != A
+ case JumpNotEqual:
+ return fmt.Sprintf("jneq #%d,%d", a.Val, a.SkipTrue)
+ // K > A
+ case JumpGreaterThan:
+ return conditionalJump(a, "jgt", "jle")
+ // K < A
+ case JumpLessThan:
+ return fmt.Sprintf("jlt #%d,%d", a.Val, a.SkipTrue)
+ // K >= A
+ case JumpGreaterOrEqual:
+ return conditionalJump(a, "jge", "jlt")
+ // K <= A
+ case JumpLessOrEqual:
+ return fmt.Sprintf("jle #%d,%d", a.Val, a.SkipTrue)
+ // K & A != 0
+ case JumpBitsSet:
+ if a.SkipFalse > 0 {
+ return fmt.Sprintf("jset #%d,%d,%d", a.Val, a.SkipTrue, a.SkipFalse)
+ }
+ return fmt.Sprintf("jset #%d,%d", a.Val, a.SkipTrue)
+ // K & A == 0, there is no assembler instruction for JumpBitNotSet, use JumpBitSet and invert skips
+ case JumpBitsNotSet:
+ return JumpIf{Cond: JumpBitsSet, SkipTrue: a.SkipFalse, SkipFalse: a.SkipTrue, Val: a.Val}.String()
+ default:
+ return fmt.Sprintf("unknown instruction: %#v", a)
+ }
+}
+
+func conditionalJump(inst JumpIf, positiveJump, negativeJump string) string {
+ if inst.SkipTrue > 0 {
+ if inst.SkipFalse > 0 {
+ return fmt.Sprintf("%s #%d,%d,%d", positiveJump, inst.Val, inst.SkipTrue, inst.SkipFalse)
+ }
+ return fmt.Sprintf("%s #%d,%d", positiveJump, inst.Val, inst.SkipTrue)
+ }
+ return fmt.Sprintf("%s #%d,%d", negativeJump, inst.Val, inst.SkipFalse)
+}
+
+// RetA exits the BPF program, returning the value of register A.
+type RetA struct{}
+
+// Assemble implements the Instruction Assemble method.
+func (a RetA) Assemble() (RawInstruction, error) {
+ return RawInstruction{
+ Op: opClsReturn | opRetSrcA,
+ }, nil
+}
+
+// String returns the the instruction in assembler notation.
+func (a RetA) String() string {
+ return fmt.Sprintf("ret a")
+}
+
+// RetConstant exits the BPF program, returning a constant value.
+type RetConstant struct {
+ Val uint32
+}
+
+// Assemble implements the Instruction Assemble method.
+func (a RetConstant) Assemble() (RawInstruction, error) {
+ return RawInstruction{
+ Op: opClsReturn | opRetSrcConstant,
+ K: a.Val,
+ }, nil
+}
+
+// String returns the the instruction in assembler notation.
+func (a RetConstant) String() string {
+ return fmt.Sprintf("ret #%d", a.Val)
+}
+
+// TXA copies the value of register X to register A.
+type TXA struct{}
+
+// Assemble implements the Instruction Assemble method.
+func (a TXA) Assemble() (RawInstruction, error) {
+ return RawInstruction{
+ Op: opClsMisc | opMiscTXA,
+ }, nil
+}
+
+// String returns the the instruction in assembler notation.
+func (a TXA) String() string {
+ return fmt.Sprintf("txa")
+}
+
+// TAX copies the value of register A to register X.
+type TAX struct{}
+
+// Assemble implements the Instruction Assemble method.
+func (a TAX) Assemble() (RawInstruction, error) {
+ return RawInstruction{
+ Op: opClsMisc | opMiscTAX,
+ }, nil
+}
+
+// String returns the the instruction in assembler notation.
+func (a TAX) String() string {
+ return fmt.Sprintf("tax")
+}
+
+func assembleLoad(dst Register, loadSize int, mode uint16, k uint32) (RawInstruction, error) {
+ var (
+ cls uint16
+ sz uint16
+ )
+ switch dst {
+ case RegA:
+ cls = opClsLoadA
+ case RegX:
+ cls = opClsLoadX
+ default:
+ return RawInstruction{}, fmt.Errorf("invalid target register %v", dst)
+ }
+ switch loadSize {
+ case 1:
+ sz = opLoadWidth1
+ case 2:
+ sz = opLoadWidth2
+ case 4:
+ sz = opLoadWidth4
+ default:
+ return RawInstruction{}, fmt.Errorf("invalid load byte length %d", sz)
+ }
+ return RawInstruction{
+ Op: cls | sz | mode,
+ K: k,
+ }, nil
+}
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/instructions_test.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/instructions_test.go
new file mode 100644
index 000000000..dde474aba
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/instructions_test.go
@@ -0,0 +1,525 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bpf
+
+import (
+ "fmt"
+ "io/ioutil"
+ "reflect"
+ "strconv"
+ "strings"
+ "testing"
+)
+
+// This is a direct translation of the program in
+// testdata/all_instructions.txt.
+var allInstructions = []Instruction{
+ LoadConstant{Dst: RegA, Val: 42},
+ LoadConstant{Dst: RegX, Val: 42},
+
+ LoadScratch{Dst: RegA, N: 3},
+ LoadScratch{Dst: RegX, N: 3},
+
+ LoadAbsolute{Off: 42, Size: 1},
+ LoadAbsolute{Off: 42, Size: 2},
+ LoadAbsolute{Off: 42, Size: 4},
+
+ LoadIndirect{Off: 42, Size: 1},
+ LoadIndirect{Off: 42, Size: 2},
+ LoadIndirect{Off: 42, Size: 4},
+
+ LoadMemShift{Off: 42},
+
+ LoadExtension{Num: ExtLen},
+ LoadExtension{Num: ExtProto},
+ LoadExtension{Num: ExtType},
+ LoadExtension{Num: ExtRand},
+
+ StoreScratch{Src: RegA, N: 3},
+ StoreScratch{Src: RegX, N: 3},
+
+ ALUOpConstant{Op: ALUOpAdd, Val: 42},
+ ALUOpConstant{Op: ALUOpSub, Val: 42},
+ ALUOpConstant{Op: ALUOpMul, Val: 42},
+ ALUOpConstant{Op: ALUOpDiv, Val: 42},
+ ALUOpConstant{Op: ALUOpOr, Val: 42},
+ ALUOpConstant{Op: ALUOpAnd, Val: 42},
+ ALUOpConstant{Op: ALUOpShiftLeft, Val: 42},
+ ALUOpConstant{Op: ALUOpShiftRight, Val: 42},
+ ALUOpConstant{Op: ALUOpMod, Val: 42},
+ ALUOpConstant{Op: ALUOpXor, Val: 42},
+
+ ALUOpX{Op: ALUOpAdd},
+ ALUOpX{Op: ALUOpSub},
+ ALUOpX{Op: ALUOpMul},
+ ALUOpX{Op: ALUOpDiv},
+ ALUOpX{Op: ALUOpOr},
+ ALUOpX{Op: ALUOpAnd},
+ ALUOpX{Op: ALUOpShiftLeft},
+ ALUOpX{Op: ALUOpShiftRight},
+ ALUOpX{Op: ALUOpMod},
+ ALUOpX{Op: ALUOpXor},
+
+ NegateA{},
+
+ Jump{Skip: 10},
+ JumpIf{Cond: JumpEqual, Val: 42, SkipTrue: 8, SkipFalse: 9},
+ JumpIf{Cond: JumpNotEqual, Val: 42, SkipTrue: 8},
+ JumpIf{Cond: JumpLessThan, Val: 42, SkipTrue: 7},
+ JumpIf{Cond: JumpLessOrEqual, Val: 42, SkipTrue: 6},
+ JumpIf{Cond: JumpGreaterThan, Val: 42, SkipTrue: 4, SkipFalse: 5},
+ JumpIf{Cond: JumpGreaterOrEqual, Val: 42, SkipTrue: 3, SkipFalse: 4},
+ JumpIf{Cond: JumpBitsSet, Val: 42, SkipTrue: 2, SkipFalse: 3},
+
+ TAX{},
+ TXA{},
+
+ RetA{},
+ RetConstant{Val: 42},
+}
+var allInstructionsExpected = "testdata/all_instructions.bpf"
+
+// Check that we produce the same output as the canonical bpf_asm
+// linux kernel tool.
+func TestInterop(t *testing.T) {
+ out, err := Assemble(allInstructions)
+ if err != nil {
+ t.Fatalf("assembly of allInstructions program failed: %s", err)
+ }
+ t.Logf("Assembled program is %d instructions long", len(out))
+
+ bs, err := ioutil.ReadFile(allInstructionsExpected)
+ if err != nil {
+ t.Fatalf("reading %s: %s", allInstructionsExpected, err)
+ }
+ // First statement is the number of statements, last statement is
+ // empty. We just ignore both and rely on slice length.
+ stmts := strings.Split(string(bs), ",")
+ if len(stmts)-2 != len(out) {
+ t.Fatalf("test program lengths don't match: %s has %d, Go implementation has %d", allInstructionsExpected, len(stmts)-2, len(allInstructions))
+ }
+
+ for i, stmt := range stmts[1 : len(stmts)-2] {
+ nums := strings.Split(stmt, " ")
+ if len(nums) != 4 {
+ t.Fatalf("malformed instruction %d in %s: %s", i+1, allInstructionsExpected, stmt)
+ }
+
+ actual := out[i]
+
+ op, err := strconv.ParseUint(nums[0], 10, 16)
+ if err != nil {
+ t.Fatalf("malformed opcode %s in instruction %d of %s", nums[0], i+1, allInstructionsExpected)
+ }
+ if actual.Op != uint16(op) {
+ t.Errorf("opcode mismatch on instruction %d (%#v): got 0x%02x, want 0x%02x", i+1, allInstructions[i], actual.Op, op)
+ }
+
+ jt, err := strconv.ParseUint(nums[1], 10, 8)
+ if err != nil {
+ t.Fatalf("malformed jt offset %s in instruction %d of %s", nums[1], i+1, allInstructionsExpected)
+ }
+ if actual.Jt != uint8(jt) {
+ t.Errorf("jt mismatch on instruction %d (%#v): got %d, want %d", i+1, allInstructions[i], actual.Jt, jt)
+ }
+
+ jf, err := strconv.ParseUint(nums[2], 10, 8)
+ if err != nil {
+ t.Fatalf("malformed jf offset %s in instruction %d of %s", nums[2], i+1, allInstructionsExpected)
+ }
+ if actual.Jf != uint8(jf) {
+ t.Errorf("jf mismatch on instruction %d (%#v): got %d, want %d", i+1, allInstructions[i], actual.Jf, jf)
+ }
+
+ k, err := strconv.ParseUint(nums[3], 10, 32)
+ if err != nil {
+ t.Fatalf("malformed constant %s in instruction %d of %s", nums[3], i+1, allInstructionsExpected)
+ }
+ if actual.K != uint32(k) {
+ t.Errorf("constant mismatch on instruction %d (%#v): got %d, want %d", i+1, allInstructions[i], actual.K, k)
+ }
+ }
+}
+
+// Check that assembly and disassembly match each other.
+func TestAsmDisasm(t *testing.T) {
+ prog1, err := Assemble(allInstructions)
+ if err != nil {
+ t.Fatalf("assembly of allInstructions program failed: %s", err)
+ }
+ t.Logf("Assembled program is %d instructions long", len(prog1))
+
+ got, allDecoded := Disassemble(prog1)
+ if !allDecoded {
+ t.Errorf("Disassemble(Assemble(allInstructions)) produced unrecognized instructions:")
+ for i, inst := range got {
+ if r, ok := inst.(RawInstruction); ok {
+ t.Logf(" insn %d, %#v --> %#v", i+1, allInstructions[i], r)
+ }
+ }
+ }
+
+ if len(allInstructions) != len(got) {
+ t.Fatalf("disassembly changed program size: %d insns before, %d insns after", len(allInstructions), len(got))
+ }
+ if !reflect.DeepEqual(allInstructions, got) {
+ t.Errorf("program mutated by disassembly:")
+ for i := range got {
+ if !reflect.DeepEqual(allInstructions[i], got[i]) {
+ t.Logf(" insn %d, s: %#v, p1: %#v, got: %#v", i+1, allInstructions[i], prog1[i], got[i])
+ }
+ }
+ }
+}
+
+type InvalidInstruction struct{}
+
+func (a InvalidInstruction) Assemble() (RawInstruction, error) {
+ return RawInstruction{}, fmt.Errorf("Invalid Instruction")
+}
+
+func (a InvalidInstruction) String() string {
+ return fmt.Sprintf("unknown instruction: %#v", a)
+}
+
+func TestString(t *testing.T) {
+ testCases := []struct {
+ instruction Instruction
+ assembler string
+ }{
+ {
+ instruction: LoadConstant{Dst: RegA, Val: 42},
+ assembler: "ld #42",
+ },
+ {
+ instruction: LoadConstant{Dst: RegX, Val: 42},
+ assembler: "ldx #42",
+ },
+ {
+ instruction: LoadConstant{Dst: 0xffff, Val: 42},
+ assembler: "unknown instruction: bpf.LoadConstant{Dst:0xffff, Val:0x2a}",
+ },
+ {
+ instruction: LoadScratch{Dst: RegA, N: 3},
+ assembler: "ld M[3]",
+ },
+ {
+ instruction: LoadScratch{Dst: RegX, N: 3},
+ assembler: "ldx M[3]",
+ },
+ {
+ instruction: LoadScratch{Dst: 0xffff, N: 3},
+ assembler: "unknown instruction: bpf.LoadScratch{Dst:0xffff, N:3}",
+ },
+ {
+ instruction: LoadAbsolute{Off: 42, Size: 1},
+ assembler: "ldb [42]",
+ },
+ {
+ instruction: LoadAbsolute{Off: 42, Size: 2},
+ assembler: "ldh [42]",
+ },
+ {
+ instruction: LoadAbsolute{Off: 42, Size: 4},
+ assembler: "ld [42]",
+ },
+ {
+ instruction: LoadAbsolute{Off: 42, Size: -1},
+ assembler: "unknown instruction: bpf.LoadAbsolute{Off:0x2a, Size:-1}",
+ },
+ {
+ instruction: LoadIndirect{Off: 42, Size: 1},
+ assembler: "ldb [x + 42]",
+ },
+ {
+ instruction: LoadIndirect{Off: 42, Size: 2},
+ assembler: "ldh [x + 42]",
+ },
+ {
+ instruction: LoadIndirect{Off: 42, Size: 4},
+ assembler: "ld [x + 42]",
+ },
+ {
+ instruction: LoadIndirect{Off: 42, Size: -1},
+ assembler: "unknown instruction: bpf.LoadIndirect{Off:0x2a, Size:-1}",
+ },
+ {
+ instruction: LoadMemShift{Off: 42},
+ assembler: "ldx 4*([42]&0xf)",
+ },
+ {
+ instruction: LoadExtension{Num: ExtLen},
+ assembler: "ld #len",
+ },
+ {
+ instruction: LoadExtension{Num: ExtProto},
+ assembler: "ld #proto",
+ },
+ {
+ instruction: LoadExtension{Num: ExtType},
+ assembler: "ld #type",
+ },
+ {
+ instruction: LoadExtension{Num: ExtPayloadOffset},
+ assembler: "ld #poff",
+ },
+ {
+ instruction: LoadExtension{Num: ExtInterfaceIndex},
+ assembler: "ld #ifidx",
+ },
+ {
+ instruction: LoadExtension{Num: ExtNetlinkAttr},
+ assembler: "ld #nla",
+ },
+ {
+ instruction: LoadExtension{Num: ExtNetlinkAttrNested},
+ assembler: "ld #nlan",
+ },
+ {
+ instruction: LoadExtension{Num: ExtMark},
+ assembler: "ld #mark",
+ },
+ {
+ instruction: LoadExtension{Num: ExtQueue},
+ assembler: "ld #queue",
+ },
+ {
+ instruction: LoadExtension{Num: ExtLinkLayerType},
+ assembler: "ld #hatype",
+ },
+ {
+ instruction: LoadExtension{Num: ExtRXHash},
+ assembler: "ld #rxhash",
+ },
+ {
+ instruction: LoadExtension{Num: ExtCPUID},
+ assembler: "ld #cpu",
+ },
+ {
+ instruction: LoadExtension{Num: ExtVLANTag},
+ assembler: "ld #vlan_tci",
+ },
+ {
+ instruction: LoadExtension{Num: ExtVLANTagPresent},
+ assembler: "ld #vlan_avail",
+ },
+ {
+ instruction: LoadExtension{Num: ExtVLANProto},
+ assembler: "ld #vlan_tpid",
+ },
+ {
+ instruction: LoadExtension{Num: ExtRand},
+ assembler: "ld #rand",
+ },
+ {
+ instruction: LoadAbsolute{Off: 0xfffff038, Size: 4},
+ assembler: "ld #rand",
+ },
+ {
+ instruction: LoadExtension{Num: 0xfff},
+ assembler: "unknown instruction: bpf.LoadExtension{Num:4095}",
+ },
+ {
+ instruction: StoreScratch{Src: RegA, N: 3},
+ assembler: "st M[3]",
+ },
+ {
+ instruction: StoreScratch{Src: RegX, N: 3},
+ assembler: "stx M[3]",
+ },
+ {
+ instruction: StoreScratch{Src: 0xffff, N: 3},
+ assembler: "unknown instruction: bpf.StoreScratch{Src:0xffff, N:3}",
+ },
+ {
+ instruction: ALUOpConstant{Op: ALUOpAdd, Val: 42},
+ assembler: "add #42",
+ },
+ {
+ instruction: ALUOpConstant{Op: ALUOpSub, Val: 42},
+ assembler: "sub #42",
+ },
+ {
+ instruction: ALUOpConstant{Op: ALUOpMul, Val: 42},
+ assembler: "mul #42",
+ },
+ {
+ instruction: ALUOpConstant{Op: ALUOpDiv, Val: 42},
+ assembler: "div #42",
+ },
+ {
+ instruction: ALUOpConstant{Op: ALUOpOr, Val: 42},
+ assembler: "or #42",
+ },
+ {
+ instruction: ALUOpConstant{Op: ALUOpAnd, Val: 42},
+ assembler: "and #42",
+ },
+ {
+ instruction: ALUOpConstant{Op: ALUOpShiftLeft, Val: 42},
+ assembler: "lsh #42",
+ },
+ {
+ instruction: ALUOpConstant{Op: ALUOpShiftRight, Val: 42},
+ assembler: "rsh #42",
+ },
+ {
+ instruction: ALUOpConstant{Op: ALUOpMod, Val: 42},
+ assembler: "mod #42",
+ },
+ {
+ instruction: ALUOpConstant{Op: ALUOpXor, Val: 42},
+ assembler: "xor #42",
+ },
+ {
+ instruction: ALUOpConstant{Op: 0xffff, Val: 42},
+ assembler: "unknown instruction: bpf.ALUOpConstant{Op:0xffff, Val:0x2a}",
+ },
+ {
+ instruction: ALUOpX{Op: ALUOpAdd},
+ assembler: "add x",
+ },
+ {
+ instruction: ALUOpX{Op: ALUOpSub},
+ assembler: "sub x",
+ },
+ {
+ instruction: ALUOpX{Op: ALUOpMul},
+ assembler: "mul x",
+ },
+ {
+ instruction: ALUOpX{Op: ALUOpDiv},
+ assembler: "div x",
+ },
+ {
+ instruction: ALUOpX{Op: ALUOpOr},
+ assembler: "or x",
+ },
+ {
+ instruction: ALUOpX{Op: ALUOpAnd},
+ assembler: "and x",
+ },
+ {
+ instruction: ALUOpX{Op: ALUOpShiftLeft},
+ assembler: "lsh x",
+ },
+ {
+ instruction: ALUOpX{Op: ALUOpShiftRight},
+ assembler: "rsh x",
+ },
+ {
+ instruction: ALUOpX{Op: ALUOpMod},
+ assembler: "mod x",
+ },
+ {
+ instruction: ALUOpX{Op: ALUOpXor},
+ assembler: "xor x",
+ },
+ {
+ instruction: ALUOpX{Op: 0xffff},
+ assembler: "unknown instruction: bpf.ALUOpX{Op:0xffff}",
+ },
+ {
+ instruction: NegateA{},
+ assembler: "neg",
+ },
+ {
+ instruction: Jump{Skip: 10},
+ assembler: "ja 10",
+ },
+ {
+ instruction: JumpIf{Cond: JumpEqual, Val: 42, SkipTrue: 8, SkipFalse: 9},
+ assembler: "jeq #42,8,9",
+ },
+ {
+ instruction: JumpIf{Cond: JumpEqual, Val: 42, SkipTrue: 8},
+ assembler: "jeq #42,8",
+ },
+ {
+ instruction: JumpIf{Cond: JumpEqual, Val: 42, SkipFalse: 8},
+ assembler: "jneq #42,8",
+ },
+ {
+ instruction: JumpIf{Cond: JumpNotEqual, Val: 42, SkipTrue: 8},
+ assembler: "jneq #42,8",
+ },
+ {
+ instruction: JumpIf{Cond: JumpLessThan, Val: 42, SkipTrue: 7},
+ assembler: "jlt #42,7",
+ },
+ {
+ instruction: JumpIf{Cond: JumpLessOrEqual, Val: 42, SkipTrue: 6},
+ assembler: "jle #42,6",
+ },
+ {
+ instruction: JumpIf{Cond: JumpGreaterThan, Val: 42, SkipTrue: 4, SkipFalse: 5},
+ assembler: "jgt #42,4,5",
+ },
+ {
+ instruction: JumpIf{Cond: JumpGreaterThan, Val: 42, SkipTrue: 4},
+ assembler: "jgt #42,4",
+ },
+ {
+ instruction: JumpIf{Cond: JumpGreaterOrEqual, Val: 42, SkipTrue: 3, SkipFalse: 4},
+ assembler: "jge #42,3,4",
+ },
+ {
+ instruction: JumpIf{Cond: JumpGreaterOrEqual, Val: 42, SkipTrue: 3},
+ assembler: "jge #42,3",
+ },
+ {
+ instruction: JumpIf{Cond: JumpBitsSet, Val: 42, SkipTrue: 2, SkipFalse: 3},
+ assembler: "jset #42,2,3",
+ },
+ {
+ instruction: JumpIf{Cond: JumpBitsSet, Val: 42, SkipTrue: 2},
+ assembler: "jset #42,2",
+ },
+ {
+ instruction: JumpIf{Cond: JumpBitsNotSet, Val: 42, SkipTrue: 2, SkipFalse: 3},
+ assembler: "jset #42,3,2",
+ },
+ {
+ instruction: JumpIf{Cond: JumpBitsNotSet, Val: 42, SkipTrue: 2},
+ assembler: "jset #42,0,2",
+ },
+ {
+ instruction: JumpIf{Cond: 0xffff, Val: 42, SkipTrue: 1, SkipFalse: 2},
+ assembler: "unknown instruction: bpf.JumpIf{Cond:0xffff, Val:0x2a, SkipTrue:0x1, SkipFalse:0x2}",
+ },
+ {
+ instruction: TAX{},
+ assembler: "tax",
+ },
+ {
+ instruction: TXA{},
+ assembler: "txa",
+ },
+ {
+ instruction: RetA{},
+ assembler: "ret a",
+ },
+ {
+ instruction: RetConstant{Val: 42},
+ assembler: "ret #42",
+ },
+ // Invalid instruction
+ {
+ instruction: InvalidInstruction{},
+ assembler: "unknown instruction: bpf.InvalidInstruction{}",
+ },
+ }
+
+ for _, testCase := range testCases {
+ if input, ok := testCase.instruction.(fmt.Stringer); ok {
+ got := input.String()
+ if got != testCase.assembler {
+ t.Errorf("String did not return expected assembler notation, expected: %s, got: %s", testCase.assembler, got)
+ }
+ } else {
+ t.Errorf("Instruction %#v is not a fmt.Stringer", testCase.instruction)
+ }
+ }
+}
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/setter.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/setter.go
new file mode 100644
index 000000000..43e35f0ac
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/setter.go
@@ -0,0 +1,10 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bpf
+
+// A Setter is a type which can attach a compiled BPF filter to itself.
+type Setter interface {
+ SetBPF(filter []RawInstruction) error
+}
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/testdata/all_instructions.bpf b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/testdata/all_instructions.bpf
new file mode 100644
index 000000000..f87144064
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/testdata/all_instructions.bpf
@@ -0,0 +1 @@
+50,0 0 0 42,1 0 0 42,96 0 0 3,97 0 0 3,48 0 0 42,40 0 0 42,32 0 0 42,80 0 0 42,72 0 0 42,64 0 0 42,177 0 0 42,128 0 0 0,32 0 0 4294963200,32 0 0 4294963204,32 0 0 4294963256,2 0 0 3,3 0 0 3,4 0 0 42,20 0 0 42,36 0 0 42,52 0 0 42,68 0 0 42,84 0 0 42,100 0 0 42,116 0 0 42,148 0 0 42,164 0 0 42,12 0 0 0,28 0 0 0,44 0 0 0,60 0 0 0,76 0 0 0,92 0 0 0,108 0 0 0,124 0 0 0,156 0 0 0,172 0 0 0,132 0 0 0,5 0 0 10,21 8 9 42,21 0 8 42,53 0 7 42,37 0 6 42,37 4 5 42,53 3 4 42,69 2 3 42,7 0 0 0,135 0 0 0,22 0 0 0,6 0 0 0,
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/testdata/all_instructions.txt b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/testdata/all_instructions.txt
new file mode 100644
index 000000000..304550155
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/testdata/all_instructions.txt
@@ -0,0 +1,79 @@
+# This filter is compiled to all_instructions.bpf by the `bpf_asm`
+# tool, which can be found in the linux kernel source tree under
+# tools/net.
+
+# Load immediate
+ld #42
+ldx #42
+
+# Load scratch
+ld M[3]
+ldx M[3]
+
+# Load absolute
+ldb [42]
+ldh [42]
+ld [42]
+
+# Load indirect
+ldb [x + 42]
+ldh [x + 42]
+ld [x + 42]
+
+# Load IPv4 header length
+ldx 4*([42]&0xf)
+
+# Run extension function
+ld #len
+ld #proto
+ld #type
+ld #rand
+
+# Store scratch
+st M[3]
+stx M[3]
+
+# A <op> constant
+add #42
+sub #42
+mul #42
+div #42
+or #42
+and #42
+lsh #42
+rsh #42
+mod #42
+xor #42
+
+# A <op> X
+add x
+sub x
+mul x
+div x
+or x
+and x
+lsh x
+rsh x
+mod x
+xor x
+
+# !A
+neg
+
+# Jumps
+ja end
+jeq #42,prev,end
+jne #42,end
+jlt #42,end
+jle #42,end
+jgt #42,prev,end
+jge #42,prev,end
+jset #42,prev,end
+
+# Register transfers
+tax
+txa
+
+# Returns
+prev: ret a
+end: ret #42
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm.go
new file mode 100644
index 000000000..4c656f1e1
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm.go
@@ -0,0 +1,140 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bpf
+
+import (
+ "errors"
+ "fmt"
+)
+
+// A VM is an emulated BPF virtual machine.
+type VM struct {
+ filter []Instruction
+}
+
+// NewVM returns a new VM using the input BPF program.
+func NewVM(filter []Instruction) (*VM, error) {
+ if len(filter) == 0 {
+ return nil, errors.New("one or more Instructions must be specified")
+ }
+
+ for i, ins := range filter {
+ check := len(filter) - (i + 1)
+ switch ins := ins.(type) {
+ // Check for out-of-bounds jumps in instructions
+ case Jump:
+ if check <= int(ins.Skip) {
+ return nil, fmt.Errorf("cannot jump %d instructions; jumping past program bounds", ins.Skip)
+ }
+ case JumpIf:
+ if check <= int(ins.SkipTrue) {
+ return nil, fmt.Errorf("cannot jump %d instructions in true case; jumping past program bounds", ins.SkipTrue)
+ }
+ if check <= int(ins.SkipFalse) {
+ return nil, fmt.Errorf("cannot jump %d instructions in false case; jumping past program bounds", ins.SkipFalse)
+ }
+ // Check for division or modulus by zero
+ case ALUOpConstant:
+ if ins.Val != 0 {
+ break
+ }
+
+ switch ins.Op {
+ case ALUOpDiv, ALUOpMod:
+ return nil, errors.New("cannot divide by zero using ALUOpConstant")
+ }
+ // Check for unknown extensions
+ case LoadExtension:
+ switch ins.Num {
+ case ExtLen:
+ default:
+ return nil, fmt.Errorf("extension %d not implemented", ins.Num)
+ }
+ }
+ }
+
+ // Make sure last instruction is a return instruction
+ switch filter[len(filter)-1].(type) {
+ case RetA, RetConstant:
+ default:
+ return nil, errors.New("BPF program must end with RetA or RetConstant")
+ }
+
+ // Though our VM works using disassembled instructions, we
+ // attempt to assemble the input filter anyway to ensure it is compatible
+ // with an operating system VM.
+ _, err := Assemble(filter)
+
+ return &VM{
+ filter: filter,
+ }, err
+}
+
+// Run runs the VM's BPF program against the input bytes.
+// Run returns the number of bytes accepted by the BPF program, and any errors
+// which occurred while processing the program.
+func (v *VM) Run(in []byte) (int, error) {
+ var (
+ // Registers of the virtual machine
+ regA uint32
+ regX uint32
+ regScratch [16]uint32
+
+ // OK is true if the program should continue processing the next
+ // instruction, or false if not, causing the loop to break
+ ok = true
+ )
+
+ // TODO(mdlayher): implement:
+ // - NegateA:
+ // - would require a change from uint32 registers to int32
+ // registers
+
+ // TODO(mdlayher): add interop tests that check signedness of ALU
+ // operations against kernel implementation, and make sure Go
+ // implementation matches behavior
+
+ for i := 0; i < len(v.filter) && ok; i++ {
+ ins := v.filter[i]
+
+ switch ins := ins.(type) {
+ case ALUOpConstant:
+ regA = aluOpConstant(ins, regA)
+ case ALUOpX:
+ regA, ok = aluOpX(ins, regA, regX)
+ case Jump:
+ i += int(ins.Skip)
+ case JumpIf:
+ jump := jumpIf(ins, regA)
+ i += jump
+ case LoadAbsolute:
+ regA, ok = loadAbsolute(ins, in)
+ case LoadConstant:
+ regA, regX = loadConstant(ins, regA, regX)
+ case LoadExtension:
+ regA = loadExtension(ins, in)
+ case LoadIndirect:
+ regA, ok = loadIndirect(ins, in, regX)
+ case LoadMemShift:
+ regX, ok = loadMemShift(ins, in)
+ case LoadScratch:
+ regA, regX = loadScratch(ins, regScratch, regA, regX)
+ case RetA:
+ return int(regA), nil
+ case RetConstant:
+ return int(ins.Val), nil
+ case StoreScratch:
+ regScratch = storeScratch(ins, regScratch, regA, regX)
+ case TAX:
+ regX = regA
+ case TXA:
+ regA = regX
+ default:
+ return 0, fmt.Errorf("unknown Instruction at index %d: %T", i, ins)
+ }
+ }
+
+ return 0, nil
+}
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm_aluop_test.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm_aluop_test.go
new file mode 100644
index 000000000..16678244a
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm_aluop_test.go
@@ -0,0 +1,512 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bpf_test
+
+import (
+ "testing"
+
+ "golang.org/x/net/bpf"
+)
+
+func TestVMALUOpAdd(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 1,
+ },
+ bpf.ALUOpConstant{
+ Op: bpf.ALUOpAdd,
+ Val: 3,
+ },
+ bpf.RetA{},
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 8, 2, 3,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 3, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMALUOpSub(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 1,
+ },
+ bpf.TAX{},
+ bpf.ALUOpX{
+ Op: bpf.ALUOpSub,
+ },
+ bpf.RetA{},
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 1, 2, 3,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 0, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMALUOpMul(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 1,
+ },
+ bpf.ALUOpConstant{
+ Op: bpf.ALUOpMul,
+ Val: 2,
+ },
+ bpf.RetA{},
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 6, 2, 3, 4,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 4, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMALUOpDiv(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 1,
+ },
+ bpf.ALUOpConstant{
+ Op: bpf.ALUOpDiv,
+ Val: 2,
+ },
+ bpf.RetA{},
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 20, 2, 3, 4,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 2, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMALUOpDivByZeroALUOpConstant(t *testing.T) {
+ _, _, err := testVM(t, []bpf.Instruction{
+ bpf.ALUOpConstant{
+ Op: bpf.ALUOpDiv,
+ Val: 0,
+ },
+ bpf.RetA{},
+ })
+ if errStr(err) != "cannot divide by zero using ALUOpConstant" {
+ t.Fatalf("unexpected error: %v", err)
+ }
+}
+
+func TestVMALUOpDivByZeroALUOpX(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ // Load byte 0 into X
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 1,
+ },
+ bpf.TAX{},
+ // Load byte 1 into A
+ bpf.LoadAbsolute{
+ Off: 9,
+ Size: 1,
+ },
+ // Attempt to perform 1/0
+ bpf.ALUOpX{
+ Op: bpf.ALUOpDiv,
+ },
+ // Return 4 bytes if program does not terminate
+ bpf.LoadConstant{
+ Val: 12,
+ },
+ bpf.RetA{},
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0, 1, 3, 4,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 0, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMALUOpOr(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 2,
+ },
+ bpf.ALUOpConstant{
+ Op: bpf.ALUOpOr,
+ Val: 0x01,
+ },
+ bpf.RetA{},
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x10, 0x03, 0x04,
+ 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0xff,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 9, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMALUOpAnd(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 2,
+ },
+ bpf.ALUOpConstant{
+ Op: bpf.ALUOpAnd,
+ Val: 0x0019,
+ },
+ bpf.RetA{},
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xaa, 0x09,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 1, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMALUOpShiftLeft(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 1,
+ },
+ bpf.ALUOpConstant{
+ Op: bpf.ALUOpShiftLeft,
+ Val: 0x01,
+ },
+ bpf.JumpIf{
+ Cond: bpf.JumpEqual,
+ Val: 0x02,
+ SkipTrue: 1,
+ },
+ bpf.RetConstant{
+ Val: 0,
+ },
+ bpf.RetConstant{
+ Val: 9,
+ },
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0x01, 0xaa,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 1, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMALUOpShiftRight(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 1,
+ },
+ bpf.ALUOpConstant{
+ Op: bpf.ALUOpShiftRight,
+ Val: 0x01,
+ },
+ bpf.JumpIf{
+ Cond: bpf.JumpEqual,
+ Val: 0x04,
+ SkipTrue: 1,
+ },
+ bpf.RetConstant{
+ Val: 0,
+ },
+ bpf.RetConstant{
+ Val: 9,
+ },
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0x08, 0xff, 0xff,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 1, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMALUOpMod(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 1,
+ },
+ bpf.ALUOpConstant{
+ Op: bpf.ALUOpMod,
+ Val: 20,
+ },
+ bpf.RetA{},
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 30, 0, 0,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 2, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMALUOpModByZeroALUOpConstant(t *testing.T) {
+ _, _, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 1,
+ },
+ bpf.ALUOpConstant{
+ Op: bpf.ALUOpMod,
+ Val: 0,
+ },
+ bpf.RetA{},
+ })
+ if errStr(err) != "cannot divide by zero using ALUOpConstant" {
+ t.Fatalf("unexpected error: %v", err)
+ }
+}
+
+func TestVMALUOpModByZeroALUOpX(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ // Load byte 0 into X
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 1,
+ },
+ bpf.TAX{},
+ // Load byte 1 into A
+ bpf.LoadAbsolute{
+ Off: 9,
+ Size: 1,
+ },
+ // Attempt to perform 1%0
+ bpf.ALUOpX{
+ Op: bpf.ALUOpMod,
+ },
+ // Return 4 bytes if program does not terminate
+ bpf.LoadConstant{
+ Val: 12,
+ },
+ bpf.RetA{},
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0, 1, 3, 4,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 0, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMALUOpXor(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 1,
+ },
+ bpf.ALUOpConstant{
+ Op: bpf.ALUOpXor,
+ Val: 0x0a,
+ },
+ bpf.JumpIf{
+ Cond: bpf.JumpEqual,
+ Val: 0x01,
+ SkipTrue: 1,
+ },
+ bpf.RetConstant{
+ Val: 0,
+ },
+ bpf.RetConstant{
+ Val: 9,
+ },
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0x0b, 0x00, 0x00, 0x00,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 1, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMALUOpUnknown(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 1,
+ },
+ bpf.ALUOpConstant{
+ Op: bpf.ALUOpAdd,
+ Val: 1,
+ },
+ // Verify that an unknown operation is a no-op
+ bpf.ALUOpConstant{
+ Op: 100,
+ },
+ bpf.JumpIf{
+ Cond: bpf.JumpEqual,
+ Val: 0x02,
+ SkipTrue: 1,
+ },
+ bpf.RetConstant{
+ Val: 0,
+ },
+ bpf.RetConstant{
+ Val: 9,
+ },
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 1,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 1, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm_bpf_test.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm_bpf_test.go
new file mode 100644
index 000000000..77fa8fe4a
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm_bpf_test.go
@@ -0,0 +1,192 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bpf_test
+
+import (
+ "net"
+ "runtime"
+ "testing"
+ "time"
+
+ "golang.org/x/net/bpf"
+ "golang.org/x/net/ipv4"
+)
+
+// A virtualMachine is a BPF virtual machine which can process an
+// input packet against a BPF program and render a verdict.
+type virtualMachine interface {
+ Run(in []byte) (int, error)
+}
+
+// canUseOSVM indicates if the OS BPF VM is available on this platform.
+func canUseOSVM() bool {
+ // OS BPF VM can only be used on platforms where x/net/ipv4 supports
+ // attaching a BPF program to a socket.
+ switch runtime.GOOS {
+ case "linux":
+ return true
+ }
+
+ return false
+}
+
+// All BPF tests against both the Go VM and OS VM are assumed to
+// be used with a UDP socket. As a result, the entire contents
+// of a UDP datagram is sent through the BPF program, but only
+// the body after the UDP header will ever be returned in output.
+
+// testVM sets up a Go BPF VM, and if available, a native OS BPF VM
+// for integration testing.
+func testVM(t *testing.T, filter []bpf.Instruction) (virtualMachine, func(), error) {
+ goVM, err := bpf.NewVM(filter)
+ if err != nil {
+ // Some tests expect an error, so this error must be returned
+ // instead of fatally exiting the test
+ return nil, nil, err
+ }
+
+ mvm := &multiVirtualMachine{
+ goVM: goVM,
+
+ t: t,
+ }
+
+ // If available, add the OS VM for tests which verify that both the Go
+ // VM and OS VM have exactly the same output for the same input program
+ // and packet.
+ done := func() {}
+ if canUseOSVM() {
+ osVM, osVMDone := testOSVM(t, filter)
+ done = func() { osVMDone() }
+ mvm.osVM = osVM
+ }
+
+ return mvm, done, nil
+}
+
+// udpHeaderLen is the length of a UDP header.
+const udpHeaderLen = 8
+
+// A multiVirtualMachine is a virtualMachine which can call out to both the Go VM
+// and the native OS VM, if the OS VM is available.
+type multiVirtualMachine struct {
+ goVM virtualMachine
+ osVM virtualMachine
+
+ t *testing.T
+}
+
+func (mvm *multiVirtualMachine) Run(in []byte) (int, error) {
+ if len(in) < udpHeaderLen {
+ mvm.t.Fatalf("input must be at least length of UDP header (%d), got: %d",
+ udpHeaderLen, len(in))
+ }
+
+ // All tests have a UDP header as part of input, because the OS VM
+ // packets always will. For the Go VM, this output is trimmed before
+ // being sent back to tests.
+ goOut, goErr := mvm.goVM.Run(in)
+ if goOut >= udpHeaderLen {
+ goOut -= udpHeaderLen
+ }
+
+ // If Go output is larger than the size of the packet, packet filtering
+ // interop tests must trim the output bytes to the length of the packet.
+ // The BPF VM should not do this on its own, as other uses of it do
+ // not trim the output byte count.
+ trim := len(in) - udpHeaderLen
+ if goOut > trim {
+ goOut = trim
+ }
+
+ // When the OS VM is not available, process using the Go VM alone
+ if mvm.osVM == nil {
+ return goOut, goErr
+ }
+
+ // The OS VM will apply its own UDP header, so remove the pseudo header
+ // that the Go VM needs.
+ osOut, err := mvm.osVM.Run(in[udpHeaderLen:])
+ if err != nil {
+ mvm.t.Fatalf("error while running OS VM: %v", err)
+ }
+
+ // Verify both VMs return same number of bytes
+ var mismatch bool
+ if goOut != osOut {
+ mismatch = true
+ mvm.t.Logf("output byte count does not match:\n- go: %v\n- os: %v", goOut, osOut)
+ }
+
+ if mismatch {
+ mvm.t.Fatal("Go BPF and OS BPF packet outputs do not match")
+ }
+
+ return goOut, goErr
+}
+
+// An osVirtualMachine is a virtualMachine which uses the OS's BPF VM for
+// processing BPF programs.
+type osVirtualMachine struct {
+ l net.PacketConn
+ s net.Conn
+}
+
+// testOSVM creates a virtualMachine which uses the OS's BPF VM by injecting
+// packets into a UDP listener with a BPF program attached to it.
+func testOSVM(t *testing.T, filter []bpf.Instruction) (virtualMachine, func()) {
+ l, err := net.ListenPacket("udp4", "127.0.0.1:0")
+ if err != nil {
+ t.Fatalf("failed to open OS VM UDP listener: %v", err)
+ }
+
+ prog, err := bpf.Assemble(filter)
+ if err != nil {
+ t.Fatalf("failed to compile BPF program: %v", err)
+ }
+
+ p := ipv4.NewPacketConn(l)
+ if err = p.SetBPF(prog); err != nil {
+ t.Fatalf("failed to attach BPF program to listener: %v", err)
+ }
+
+ s, err := net.Dial("udp4", l.LocalAddr().String())
+ if err != nil {
+ t.Fatalf("failed to dial connection to listener: %v", err)
+ }
+
+ done := func() {
+ _ = s.Close()
+ _ = l.Close()
+ }
+
+ return &osVirtualMachine{
+ l: l,
+ s: s,
+ }, done
+}
+
+// Run sends the input bytes into the OS's BPF VM and returns its verdict.
+func (vm *osVirtualMachine) Run(in []byte) (int, error) {
+ go func() {
+ _, _ = vm.s.Write(in)
+ }()
+
+ vm.l.SetDeadline(time.Now().Add(50 * time.Millisecond))
+
+ var b [512]byte
+ n, _, err := vm.l.ReadFrom(b[:])
+ if err != nil {
+ // A timeout indicates that BPF filtered out the packet, and thus,
+ // no input should be returned.
+ if nerr, ok := err.(net.Error); ok && nerr.Timeout() {
+ return n, nil
+ }
+
+ return n, err
+ }
+
+ return n, nil
+}
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm_extension_test.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm_extension_test.go
new file mode 100644
index 000000000..7a48c82f3
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm_extension_test.go
@@ -0,0 +1,49 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bpf_test
+
+import (
+ "testing"
+
+ "golang.org/x/net/bpf"
+)
+
+func TestVMLoadExtensionNotImplemented(t *testing.T) {
+ _, _, err := testVM(t, []bpf.Instruction{
+ bpf.LoadExtension{
+ Num: 100,
+ },
+ bpf.RetA{},
+ })
+ if errStr(err) != "extension 100 not implemented" {
+ t.Fatalf("unexpected error: %v", err)
+ }
+}
+
+func TestVMLoadExtensionExtLen(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadExtension{
+ Num: bpf.ExtLen,
+ },
+ bpf.RetA{},
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0, 1, 2, 3,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 4, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm_instructions.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm_instructions.go
new file mode 100644
index 000000000..516f9462b
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm_instructions.go
@@ -0,0 +1,174 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bpf
+
+import (
+ "encoding/binary"
+ "fmt"
+)
+
+func aluOpConstant(ins ALUOpConstant, regA uint32) uint32 {
+ return aluOpCommon(ins.Op, regA, ins.Val)
+}
+
+func aluOpX(ins ALUOpX, regA uint32, regX uint32) (uint32, bool) {
+ // Guard against division or modulus by zero by terminating
+ // the program, as the OS BPF VM does
+ if regX == 0 {
+ switch ins.Op {
+ case ALUOpDiv, ALUOpMod:
+ return 0, false
+ }
+ }
+
+ return aluOpCommon(ins.Op, regA, regX), true
+}
+
+func aluOpCommon(op ALUOp, regA uint32, value uint32) uint32 {
+ switch op {
+ case ALUOpAdd:
+ return regA + value
+ case ALUOpSub:
+ return regA - value
+ case ALUOpMul:
+ return regA * value
+ case ALUOpDiv:
+ // Division by zero not permitted by NewVM and aluOpX checks
+ return regA / value
+ case ALUOpOr:
+ return regA | value
+ case ALUOpAnd:
+ return regA & value
+ case ALUOpShiftLeft:
+ return regA << value
+ case ALUOpShiftRight:
+ return regA >> value
+ case ALUOpMod:
+ // Modulus by zero not permitted by NewVM and aluOpX checks
+ return regA % value
+ case ALUOpXor:
+ return regA ^ value
+ default:
+ return regA
+ }
+}
+
+func jumpIf(ins JumpIf, value uint32) int {
+ var ok bool
+ inV := uint32(ins.Val)
+
+ switch ins.Cond {
+ case JumpEqual:
+ ok = value == inV
+ case JumpNotEqual:
+ ok = value != inV
+ case JumpGreaterThan:
+ ok = value > inV
+ case JumpLessThan:
+ ok = value < inV
+ case JumpGreaterOrEqual:
+ ok = value >= inV
+ case JumpLessOrEqual:
+ ok = value <= inV
+ case JumpBitsSet:
+ ok = (value & inV) != 0
+ case JumpBitsNotSet:
+ ok = (value & inV) == 0
+ }
+
+ if ok {
+ return int(ins.SkipTrue)
+ }
+
+ return int(ins.SkipFalse)
+}
+
+func loadAbsolute(ins LoadAbsolute, in []byte) (uint32, bool) {
+ offset := int(ins.Off)
+ size := int(ins.Size)
+
+ return loadCommon(in, offset, size)
+}
+
+func loadConstant(ins LoadConstant, regA uint32, regX uint32) (uint32, uint32) {
+ switch ins.Dst {
+ case RegA:
+ regA = ins.Val
+ case RegX:
+ regX = ins.Val
+ }
+
+ return regA, regX
+}
+
+func loadExtension(ins LoadExtension, in []byte) uint32 {
+ switch ins.Num {
+ case ExtLen:
+ return uint32(len(in))
+ default:
+ panic(fmt.Sprintf("unimplemented extension: %d", ins.Num))
+ }
+}
+
+func loadIndirect(ins LoadIndirect, in []byte, regX uint32) (uint32, bool) {
+ offset := int(ins.Off) + int(regX)
+ size := int(ins.Size)
+
+ return loadCommon(in, offset, size)
+}
+
+func loadMemShift(ins LoadMemShift, in []byte) (uint32, bool) {
+ offset := int(ins.Off)
+
+ if !inBounds(len(in), offset, 0) {
+ return 0, false
+ }
+
+ // Mask off high 4 bits and multiply low 4 bits by 4
+ return uint32(in[offset]&0x0f) * 4, true
+}
+
+func inBounds(inLen int, offset int, size int) bool {
+ return offset+size <= inLen
+}
+
+func loadCommon(in []byte, offset int, size int) (uint32, bool) {
+ if !inBounds(len(in), offset, size) {
+ return 0, false
+ }
+
+ switch size {
+ case 1:
+ return uint32(in[offset]), true
+ case 2:
+ return uint32(binary.BigEndian.Uint16(in[offset : offset+size])), true
+ case 4:
+ return uint32(binary.BigEndian.Uint32(in[offset : offset+size])), true
+ default:
+ panic(fmt.Sprintf("invalid load size: %d", size))
+ }
+}
+
+func loadScratch(ins LoadScratch, regScratch [16]uint32, regA uint32, regX uint32) (uint32, uint32) {
+ switch ins.Dst {
+ case RegA:
+ regA = regScratch[ins.N]
+ case RegX:
+ regX = regScratch[ins.N]
+ }
+
+ return regA, regX
+}
+
+func storeScratch(ins StoreScratch, regScratch [16]uint32, regA uint32, regX uint32) [16]uint32 {
+ switch ins.Src {
+ case RegA:
+ regScratch[ins.N] = regA
+ case RegX:
+ regScratch[ins.N] = regX
+ }
+
+ return regScratch
+}
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm_jump_test.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm_jump_test.go
new file mode 100644
index 000000000..e0a3a988b
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm_jump_test.go
@@ -0,0 +1,380 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bpf_test
+
+import (
+ "testing"
+
+ "golang.org/x/net/bpf"
+)
+
+func TestVMJumpOne(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 1,
+ },
+ bpf.Jump{
+ Skip: 1,
+ },
+ bpf.RetConstant{
+ Val: 0,
+ },
+ bpf.RetConstant{
+ Val: 9,
+ },
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 1,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 1, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMJumpOutOfProgram(t *testing.T) {
+ _, _, err := testVM(t, []bpf.Instruction{
+ bpf.Jump{
+ Skip: 1,
+ },
+ bpf.RetA{},
+ })
+ if errStr(err) != "cannot jump 1 instructions; jumping past program bounds" {
+ t.Fatalf("unexpected error: %v", err)
+ }
+}
+
+func TestVMJumpIfTrueOutOfProgram(t *testing.T) {
+ _, _, err := testVM(t, []bpf.Instruction{
+ bpf.JumpIf{
+ Cond: bpf.JumpEqual,
+ SkipTrue: 2,
+ },
+ bpf.RetA{},
+ })
+ if errStr(err) != "cannot jump 2 instructions in true case; jumping past program bounds" {
+ t.Fatalf("unexpected error: %v", err)
+ }
+}
+
+func TestVMJumpIfFalseOutOfProgram(t *testing.T) {
+ _, _, err := testVM(t, []bpf.Instruction{
+ bpf.JumpIf{
+ Cond: bpf.JumpEqual,
+ SkipFalse: 3,
+ },
+ bpf.RetA{},
+ })
+ if errStr(err) != "cannot jump 3 instructions in false case; jumping past program bounds" {
+ t.Fatalf("unexpected error: %v", err)
+ }
+}
+
+func TestVMJumpIfEqual(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 1,
+ },
+ bpf.JumpIf{
+ Cond: bpf.JumpEqual,
+ Val: 1,
+ SkipTrue: 1,
+ },
+ bpf.RetConstant{
+ Val: 0,
+ },
+ bpf.RetConstant{
+ Val: 9,
+ },
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 1,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 1, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMJumpIfNotEqual(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 1,
+ },
+ bpf.JumpIf{
+ Cond: bpf.JumpNotEqual,
+ Val: 1,
+ SkipFalse: 1,
+ },
+ bpf.RetConstant{
+ Val: 0,
+ },
+ bpf.RetConstant{
+ Val: 9,
+ },
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 1,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 1, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMJumpIfGreaterThan(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 4,
+ },
+ bpf.JumpIf{
+ Cond: bpf.JumpGreaterThan,
+ Val: 0x00010202,
+ SkipTrue: 1,
+ },
+ bpf.RetConstant{
+ Val: 0,
+ },
+ bpf.RetConstant{
+ Val: 12,
+ },
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0, 1, 2, 3,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 4, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMJumpIfLessThan(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 4,
+ },
+ bpf.JumpIf{
+ Cond: bpf.JumpLessThan,
+ Val: 0xff010203,
+ SkipTrue: 1,
+ },
+ bpf.RetConstant{
+ Val: 0,
+ },
+ bpf.RetConstant{
+ Val: 12,
+ },
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0, 1, 2, 3,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 4, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMJumpIfGreaterOrEqual(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 4,
+ },
+ bpf.JumpIf{
+ Cond: bpf.JumpGreaterOrEqual,
+ Val: 0x00010203,
+ SkipTrue: 1,
+ },
+ bpf.RetConstant{
+ Val: 0,
+ },
+ bpf.RetConstant{
+ Val: 12,
+ },
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0, 1, 2, 3,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 4, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMJumpIfLessOrEqual(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 4,
+ },
+ bpf.JumpIf{
+ Cond: bpf.JumpLessOrEqual,
+ Val: 0xff010203,
+ SkipTrue: 1,
+ },
+ bpf.RetConstant{
+ Val: 0,
+ },
+ bpf.RetConstant{
+ Val: 12,
+ },
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0, 1, 2, 3,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 4, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMJumpIfBitsSet(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 2,
+ },
+ bpf.JumpIf{
+ Cond: bpf.JumpBitsSet,
+ Val: 0x1122,
+ SkipTrue: 1,
+ },
+ bpf.RetConstant{
+ Val: 0,
+ },
+ bpf.RetConstant{
+ Val: 10,
+ },
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0x01, 0x02,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 2, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMJumpIfBitsNotSet(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 2,
+ },
+ bpf.JumpIf{
+ Cond: bpf.JumpBitsNotSet,
+ Val: 0x1221,
+ SkipTrue: 1,
+ },
+ bpf.RetConstant{
+ Val: 0,
+ },
+ bpf.RetConstant{
+ Val: 10,
+ },
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0x01, 0x02,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 2, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm_load_test.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm_load_test.go
new file mode 100644
index 000000000..04578b66b
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm_load_test.go
@@ -0,0 +1,246 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bpf_test
+
+import (
+ "net"
+ "testing"
+
+ "golang.org/x/net/bpf"
+ "golang.org/x/net/ipv4"
+)
+
+func TestVMLoadAbsoluteOffsetOutOfBounds(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 100,
+ Size: 2,
+ },
+ bpf.RetA{},
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0, 1, 2, 3,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 0, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMLoadAbsoluteOffsetPlusSizeOutOfBounds(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 2,
+ },
+ bpf.RetA{},
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 0, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMLoadAbsoluteBadInstructionSize(t *testing.T) {
+ _, _, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Size: 5,
+ },
+ bpf.RetA{},
+ })
+ if errStr(err) != "assembling instruction 1: invalid load byte length 0" {
+ t.Fatalf("unexpected error: %v", err)
+ }
+}
+
+func TestVMLoadConstantOK(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadConstant{
+ Dst: bpf.RegX,
+ Val: 9,
+ },
+ bpf.TXA{},
+ bpf.RetA{},
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 1, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMLoadIndirectOutOfBounds(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadIndirect{
+ Off: 100,
+ Size: 1,
+ },
+ bpf.RetA{},
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 0, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMLoadMemShiftOutOfBounds(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadMemShift{
+ Off: 100,
+ },
+ bpf.RetA{},
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 0, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+const (
+ dhcp4Port = 53
+)
+
+func TestVMLoadMemShiftLoadIndirectNoResult(t *testing.T) {
+ vm, in, done := testDHCPv4(t)
+ defer done()
+
+ // Append mostly empty UDP header with incorrect DHCPv4 port
+ in = append(in, []byte{
+ 0, 0,
+ 0, dhcp4Port + 1,
+ 0, 0,
+ 0, 0,
+ }...)
+
+ out, err := vm.Run(in)
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 0, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMLoadMemShiftLoadIndirectOK(t *testing.T) {
+ vm, in, done := testDHCPv4(t)
+ defer done()
+
+ // Append mostly empty UDP header with correct DHCPv4 port
+ in = append(in, []byte{
+ 0, 0,
+ 0, dhcp4Port,
+ 0, 0,
+ 0, 0,
+ }...)
+
+ out, err := vm.Run(in)
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := len(in)-8, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func testDHCPv4(t *testing.T) (virtualMachine, []byte, func()) {
+ // DHCPv4 test data courtesy of David Anderson:
+ // https://github.com/google/netboot/blob/master/dhcp4/conn_linux.go#L59-L70
+ vm, done, err := testVM(t, []bpf.Instruction{
+ // Load IPv4 packet length
+ bpf.LoadMemShift{Off: 8},
+ // Get UDP dport
+ bpf.LoadIndirect{Off: 8 + 2, Size: 2},
+ // Correct dport?
+ bpf.JumpIf{Cond: bpf.JumpEqual, Val: dhcp4Port, SkipFalse: 1},
+ // Accept
+ bpf.RetConstant{Val: 1500},
+ // Ignore
+ bpf.RetConstant{Val: 0},
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+
+ // Minimal requirements to make a valid IPv4 header
+ h := &ipv4.Header{
+ Len: ipv4.HeaderLen,
+ Src: net.IPv4(192, 168, 1, 1),
+ Dst: net.IPv4(192, 168, 1, 2),
+ }
+ hb, err := h.Marshal()
+ if err != nil {
+ t.Fatalf("failed to marshal IPv4 header: %v", err)
+ }
+
+ hb = append([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ }, hb...)
+
+ return vm, hb, done
+}
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm_ret_test.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm_ret_test.go
new file mode 100644
index 000000000..2d86eae3e
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm_ret_test.go
@@ -0,0 +1,115 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bpf_test
+
+import (
+ "testing"
+
+ "golang.org/x/net/bpf"
+)
+
+func TestVMRetA(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 1,
+ },
+ bpf.RetA{},
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 9,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 1, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMRetALargerThanInput(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 2,
+ },
+ bpf.RetA{},
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0, 255,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 2, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMRetConstant(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.RetConstant{
+ Val: 9,
+ },
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0, 1,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 1, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMRetConstantLargerThanInput(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.RetConstant{
+ Val: 16,
+ },
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0, 1,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 2, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm_scratch_test.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm_scratch_test.go
new file mode 100644
index 000000000..e600e3c28
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm_scratch_test.go
@@ -0,0 +1,247 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bpf_test
+
+import (
+ "testing"
+
+ "golang.org/x/net/bpf"
+)
+
+func TestVMStoreScratchInvalidScratchRegisterTooSmall(t *testing.T) {
+ _, _, err := testVM(t, []bpf.Instruction{
+ bpf.StoreScratch{
+ Src: bpf.RegA,
+ N: -1,
+ },
+ bpf.RetA{},
+ })
+ if errStr(err) != "assembling instruction 1: invalid scratch slot -1" {
+ t.Fatalf("unexpected error: %v", err)
+ }
+}
+
+func TestVMStoreScratchInvalidScratchRegisterTooLarge(t *testing.T) {
+ _, _, err := testVM(t, []bpf.Instruction{
+ bpf.StoreScratch{
+ Src: bpf.RegA,
+ N: 16,
+ },
+ bpf.RetA{},
+ })
+ if errStr(err) != "assembling instruction 1: invalid scratch slot 16" {
+ t.Fatalf("unexpected error: %v", err)
+ }
+}
+
+func TestVMStoreScratchUnknownSourceRegister(t *testing.T) {
+ _, _, err := testVM(t, []bpf.Instruction{
+ bpf.StoreScratch{
+ Src: 100,
+ N: 0,
+ },
+ bpf.RetA{},
+ })
+ if errStr(err) != "assembling instruction 1: invalid source register 100" {
+ t.Fatalf("unexpected error: %v", err)
+ }
+}
+
+func TestVMLoadScratchInvalidScratchRegisterTooSmall(t *testing.T) {
+ _, _, err := testVM(t, []bpf.Instruction{
+ bpf.LoadScratch{
+ Dst: bpf.RegX,
+ N: -1,
+ },
+ bpf.RetA{},
+ })
+ if errStr(err) != "assembling instruction 1: invalid scratch slot -1" {
+ t.Fatalf("unexpected error: %v", err)
+ }
+}
+
+func TestVMLoadScratchInvalidScratchRegisterTooLarge(t *testing.T) {
+ _, _, err := testVM(t, []bpf.Instruction{
+ bpf.LoadScratch{
+ Dst: bpf.RegX,
+ N: 16,
+ },
+ bpf.RetA{},
+ })
+ if errStr(err) != "assembling instruction 1: invalid scratch slot 16" {
+ t.Fatalf("unexpected error: %v", err)
+ }
+}
+
+func TestVMLoadScratchUnknownDestinationRegister(t *testing.T) {
+ _, _, err := testVM(t, []bpf.Instruction{
+ bpf.LoadScratch{
+ Dst: 100,
+ N: 0,
+ },
+ bpf.RetA{},
+ })
+ if errStr(err) != "assembling instruction 1: invalid target register 100" {
+ t.Fatalf("unexpected error: %v", err)
+ }
+}
+
+func TestVMStoreScratchLoadScratchOneValue(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ // Load byte 255
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 1,
+ },
+ // Copy to X and store in scratch[0]
+ bpf.TAX{},
+ bpf.StoreScratch{
+ Src: bpf.RegX,
+ N: 0,
+ },
+ // Load byte 1
+ bpf.LoadAbsolute{
+ Off: 9,
+ Size: 1,
+ },
+ // Overwrite 1 with 255 from scratch[0]
+ bpf.LoadScratch{
+ Dst: bpf.RegA,
+ N: 0,
+ },
+ // Return 255
+ bpf.RetA{},
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 255, 1, 2,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 3, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMStoreScratchLoadScratchMultipleValues(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ // Load byte 10
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 1,
+ },
+ // Store in scratch[0]
+ bpf.StoreScratch{
+ Src: bpf.RegA,
+ N: 0,
+ },
+ // Load byte 20
+ bpf.LoadAbsolute{
+ Off: 9,
+ Size: 1,
+ },
+ // Store in scratch[1]
+ bpf.StoreScratch{
+ Src: bpf.RegA,
+ N: 1,
+ },
+ // Load byte 30
+ bpf.LoadAbsolute{
+ Off: 10,
+ Size: 1,
+ },
+ // Store in scratch[2]
+ bpf.StoreScratch{
+ Src: bpf.RegA,
+ N: 2,
+ },
+ // Load byte 1
+ bpf.LoadAbsolute{
+ Off: 11,
+ Size: 1,
+ },
+ // Store in scratch[3]
+ bpf.StoreScratch{
+ Src: bpf.RegA,
+ N: 3,
+ },
+ // Load in byte 10 to X
+ bpf.LoadScratch{
+ Dst: bpf.RegX,
+ N: 0,
+ },
+ // Copy X -> A
+ bpf.TXA{},
+ // Verify value is 10
+ bpf.JumpIf{
+ Cond: bpf.JumpEqual,
+ Val: 10,
+ SkipTrue: 1,
+ },
+ // Fail test if incorrect
+ bpf.RetConstant{
+ Val: 0,
+ },
+ // Load in byte 20 to A
+ bpf.LoadScratch{
+ Dst: bpf.RegA,
+ N: 1,
+ },
+ // Verify value is 20
+ bpf.JumpIf{
+ Cond: bpf.JumpEqual,
+ Val: 20,
+ SkipTrue: 1,
+ },
+ // Fail test if incorrect
+ bpf.RetConstant{
+ Val: 0,
+ },
+ // Load in byte 30 to A
+ bpf.LoadScratch{
+ Dst: bpf.RegA,
+ N: 2,
+ },
+ // Verify value is 30
+ bpf.JumpIf{
+ Cond: bpf.JumpEqual,
+ Val: 30,
+ SkipTrue: 1,
+ },
+ // Fail test if incorrect
+ bpf.RetConstant{
+ Val: 0,
+ },
+ // Return first two bytes on success
+ bpf.RetConstant{
+ Val: 10,
+ },
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 10, 20, 30, 1,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 2, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm_test.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm_test.go
new file mode 100644
index 000000000..6bd4dd5c3
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/bpf/vm_test.go
@@ -0,0 +1,144 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bpf_test
+
+import (
+ "fmt"
+ "testing"
+
+ "golang.org/x/net/bpf"
+)
+
+var _ bpf.Instruction = unknown{}
+
+type unknown struct{}
+
+func (unknown) Assemble() (bpf.RawInstruction, error) {
+ return bpf.RawInstruction{}, nil
+}
+
+func TestVMUnknownInstruction(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadConstant{
+ Dst: bpf.RegA,
+ Val: 100,
+ },
+ // Should terminate the program with an error immediately
+ unknown{},
+ bpf.RetA{},
+ })
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ defer done()
+
+ _, err = vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00,
+ })
+ if errStr(err) != "unknown Instruction at index 1: bpf_test.unknown" {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+}
+
+func TestVMNoReturnInstruction(t *testing.T) {
+ _, _, err := testVM(t, []bpf.Instruction{
+ bpf.LoadConstant{
+ Dst: bpf.RegA,
+ Val: 1,
+ },
+ })
+ if errStr(err) != "BPF program must end with RetA or RetConstant" {
+ t.Fatalf("unexpected error: %v", err)
+ }
+}
+
+func TestVMNoInputInstructions(t *testing.T) {
+ _, _, err := testVM(t, []bpf.Instruction{})
+ if errStr(err) != "one or more Instructions must be specified" {
+ t.Fatalf("unexpected error: %v", err)
+ }
+}
+
+// ExampleNewVM demonstrates usage of a VM, using an Ethernet frame
+// as input and checking its EtherType to determine if it should be accepted.
+func ExampleNewVM() {
+ // Offset | Length | Comment
+ // -------------------------
+ // 00 | 06 | Ethernet destination MAC address
+ // 06 | 06 | Ethernet source MAC address
+ // 12 | 02 | Ethernet EtherType
+ const (
+ etOff = 12
+ etLen = 2
+
+ etARP = 0x0806
+ )
+
+ // Set up a VM to filter traffic based on if its EtherType
+ // matches the ARP EtherType.
+ vm, err := bpf.NewVM([]bpf.Instruction{
+ // Load EtherType value from Ethernet header
+ bpf.LoadAbsolute{
+ Off: etOff,
+ Size: etLen,
+ },
+ // If EtherType is equal to the ARP EtherType, jump to allow
+ // packet to be accepted
+ bpf.JumpIf{
+ Cond: bpf.JumpEqual,
+ Val: etARP,
+ SkipTrue: 1,
+ },
+ // EtherType does not match the ARP EtherType
+ bpf.RetConstant{
+ Val: 0,
+ },
+ // EtherType matches the ARP EtherType, accept up to 1500
+ // bytes of packet
+ bpf.RetConstant{
+ Val: 1500,
+ },
+ })
+ if err != nil {
+ panic(fmt.Sprintf("failed to load BPF program: %v", err))
+ }
+
+ // Create an Ethernet frame with the ARP EtherType for testing
+ frame := []byte{
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x11, 0x22, 0x33, 0x44, 0x55,
+ 0x08, 0x06,
+ // Payload omitted for brevity
+ }
+
+ // Run our VM's BPF program using the Ethernet frame as input
+ out, err := vm.Run(frame)
+ if err != nil {
+ panic(fmt.Sprintf("failed to accept Ethernet frame: %v", err))
+ }
+
+ // BPF VM can return a byte count greater than the number of input
+ // bytes, so trim the output to match the input byte length
+ if out > len(frame) {
+ out = len(frame)
+ }
+
+ fmt.Printf("out: %d bytes", out)
+
+ // Output:
+ // out: 14 bytes
+}
+
+// errStr returns the string representation of an error, or
+// "<nil>" if it is nil.
+func errStr(err error) string {
+ if err == nil {
+ return "<nil>"
+ }
+
+ return err.Error()
+}