Setup development environment with libbpf

This commit is contained in:
h3xduck
2021-11-20 21:07:23 -05:00
parent 8e7fd92dc4
commit 53da2d141d
313 changed files with 563362 additions and 0 deletions

View File

@@ -0,0 +1,70 @@
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
#ifndef __LINUX_COMPILER_H
#define __LINUX_COMPILER_H
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#define READ_ONCE(x) (*(volatile typeof(x) *)&x)
#define WRITE_ONCE(x, v) (*(volatile typeof(x) *)&x) = (v)
#define barrier() asm volatile("" ::: "memory")
#if defined(__x86_64__)
# define smp_rmb() barrier()
# define smp_wmb() barrier()
# define smp_mb() asm volatile("lock; addl $0,-132(%%rsp)" ::: "memory", "cc")
# define smp_store_release(p, v) \
do { \
barrier(); \
WRITE_ONCE(*p, v); \
} while (0)
# define smp_load_acquire(p) \
({ \
typeof(*p) ___p = READ_ONCE(*p); \
barrier(); \
___p; \
})
#elif defined(__aarch64__)
# define smp_rmb() asm volatile("dmb ishld" ::: "memory")
# define smp_wmb() asm volatile("dmb ishst" ::: "memory")
# define smp_mb() asm volatile("dmb ish" ::: "memory")
#endif
#ifndef smp_mb
# define smp_mb() __sync_synchronize()
#endif
#ifndef smp_rmb
# define smp_rmb() smp_mb()
#endif
#ifndef smp_wmb
# define smp_wmb() smp_mb()
#endif
#ifndef smp_store_release
# define smp_store_release(p, v) \
do { \
smp_mb(); \
WRITE_ONCE(*p, v); \
} while (0)
#endif
#ifndef smp_load_acquire
# define smp_load_acquire(p) \
({ \
typeof(*p) ___p = READ_ONCE(*p); \
smp_mb(); \
___p; \
})
#endif
#endif /* __LINUX_COMPILER_H */

View File

@@ -0,0 +1,38 @@
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
#ifndef __LINUX_ERR_H
#define __LINUX_ERR_H
#include <linux/types.h>
#include <asm/errno.h>
#define MAX_ERRNO 4095
#define IS_ERR_VALUE(x) ((x) >= (unsigned long)-MAX_ERRNO)
static inline void * ERR_PTR(long error_)
{
return (void *) error_;
}
static inline long PTR_ERR(const void *ptr)
{
return (long) ptr;
}
static inline bool IS_ERR(const void *ptr)
{
return IS_ERR_VALUE((unsigned long)ptr);
}
static inline bool IS_ERR_OR_NULL(const void *ptr)
{
return (!ptr) || IS_ERR_VALUE((unsigned long)ptr);
}
static inline long PTR_ERR_OR_ZERO(const void *ptr)
{
return IS_ERR(ptr) ? PTR_ERR(ptr) : 0;
}
#endif

View File

@@ -0,0 +1,134 @@
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
#ifndef __LINUX_FILTER_H
#define __LINUX_FILTER_H
#include <linux/bpf.h>
#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \
((struct bpf_insn) { \
.code = CODE, \
.dst_reg = DST, \
.src_reg = SRC, \
.off = OFF, \
.imm = IMM })
#define BPF_ALU32_IMM(OP, DST, IMM) \
((struct bpf_insn) { \
.code = BPF_ALU | BPF_OP(OP) | BPF_K, \
.dst_reg = DST, \
.src_reg = 0, \
.off = 0, \
.imm = IMM })
#define BPF_ALU64_IMM(OP, DST, IMM) \
((struct bpf_insn) { \
.code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
.dst_reg = DST, \
.src_reg = 0, \
.off = 0, \
.imm = IMM })
#define BPF_MOV64_IMM(DST, IMM) \
((struct bpf_insn) { \
.code = BPF_ALU64 | BPF_MOV | BPF_K, \
.dst_reg = DST, \
.src_reg = 0, \
.off = 0, \
.imm = IMM })
#define BPF_EXIT_INSN() \
((struct bpf_insn) { \
.code = BPF_JMP | BPF_EXIT, \
.dst_reg = 0, \
.src_reg = 0, \
.off = 0, \
.imm = 0 })
#define BPF_EMIT_CALL(FUNC) \
((struct bpf_insn) { \
.code = BPF_JMP | BPF_CALL, \
.dst_reg = 0, \
.src_reg = 0, \
.off = 0, \
.imm = ((FUNC) - BPF_FUNC_unspec) })
#define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \
((struct bpf_insn) { \
.code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \
.dst_reg = DST, \
.src_reg = SRC, \
.off = OFF, \
.imm = 0 })
#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
((struct bpf_insn) { \
.code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \
.dst_reg = DST, \
.src_reg = SRC, \
.off = OFF, \
.imm = 0 })
#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
((struct bpf_insn) { \
.code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \
.dst_reg = DST, \
.src_reg = 0, \
.off = OFF, \
.imm = IMM })
#define BPF_MOV64_REG(DST, SRC) \
((struct bpf_insn) { \
.code = BPF_ALU64 | BPF_MOV | BPF_X, \
.dst_reg = DST, \
.src_reg = SRC, \
.off = 0, \
.imm = 0 })
#define BPF_MOV32_IMM(DST, IMM) \
((struct bpf_insn) { \
.code = BPF_ALU | BPF_MOV | BPF_K, \
.dst_reg = DST, \
.src_reg = 0, \
.off = 0, \
.imm = IMM })
#define BPF_LD_IMM64_RAW_FULL(DST, SRC, OFF1, OFF2, IMM1, IMM2) \
((struct bpf_insn) { \
.code = BPF_LD | BPF_DW | BPF_IMM, \
.dst_reg = DST, \
.src_reg = SRC, \
.off = OFF1, \
.imm = IMM1 }), \
((struct bpf_insn) { \
.code = 0, \
.dst_reg = 0, \
.src_reg = 0, \
.off = OFF2, \
.imm = IMM2 })
#define BPF_LD_MAP_FD(DST, MAP_FD) \
BPF_LD_IMM64_RAW_FULL(DST, BPF_PSEUDO_MAP_FD, 0, 0, \
MAP_FD, 0)
#define BPF_LD_MAP_VALUE(DST, MAP_FD, VALUE_OFF) \
BPF_LD_IMM64_RAW_FULL(DST, BPF_PSEUDO_MAP_VALUE, 0, 0, \
MAP_FD, VALUE_OFF)
#define BPF_JMP_IMM(OP, DST, IMM, OFF) \
((struct bpf_insn) { \
.code = BPF_JMP | BPF_OP(OP) | BPF_K, \
.dst_reg = DST, \
.src_reg = 0, \
.off = OFF, \
.imm = IMM })
#define BPF_JMP32_IMM(OP, DST, IMM, OFF) \
((struct bpf_insn) { \
.code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \
.dst_reg = DST, \
.src_reg = 0, \
.off = OFF, \
.imm = IMM })
#endif

View File

@@ -0,0 +1,44 @@
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
#ifndef __LINUX_KERNEL_H
#define __LINUX_KERNEL_H
#ifndef offsetof
#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
#endif
#ifndef container_of
#define container_of(ptr, type, member) ({ \
const typeof(((type *)0)->member) * __mptr = (ptr); \
(type *)((char *)__mptr - offsetof(type, member)); })
#endif
#ifndef max
#define max(x, y) ({ \
typeof(x) _max1 = (x); \
typeof(y) _max2 = (y); \
(void) (&_max1 == &_max2); \
_max1 > _max2 ? _max1 : _max2; })
#endif
#ifndef min
#define min(x, y) ({ \
typeof(x) _min1 = (x); \
typeof(y) _min2 = (y); \
(void) (&_min1 == &_min2); \
_min1 < _min2 ? _min1 : _min2; })
#endif
#ifndef roundup
#define roundup(x, y) ( \
{ \
const typeof(y) __y = y; \
(((x) + (__y - 1)) / __y) * __y; \
} \
)
#endif
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
#endif

View File

@@ -0,0 +1,91 @@
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
#ifndef __LINUX_LIST_H
#define __LINUX_LIST_H
#define LIST_HEAD_INIT(name) { &(name), &(name) }
#define LIST_HEAD(name) \
struct list_head name = LIST_HEAD_INIT(name)
#define POISON_POINTER_DELTA 0
#define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA)
#define LIST_POISON2 ((void *) 0x200 + POISON_POINTER_DELTA)
static inline void INIT_LIST_HEAD(struct list_head *list)
{
list->next = list;
list->prev = list;
}
static inline void __list_add(struct list_head *new,
struct list_head *prev,
struct list_head *next)
{
next->prev = new;
new->next = next;
new->prev = prev;
prev->next = new;
}
/**
* list_add - add a new entry
* @new: new entry to be added
* @head: list head to add it after
*
* Insert a new entry after the specified head.
* This is good for implementing stacks.
*/
static inline void list_add(struct list_head *new, struct list_head *head)
{
__list_add(new, head, head->next);
}
/*
* Delete a list entry by making the prev/next entries
* point to each other.
*
* This is only for internal list manipulation where we know
* the prev/next entries already!
*/
static inline void __list_del(struct list_head * prev, struct list_head * next)
{
next->prev = prev;
prev->next = next;
}
/**
* list_del - deletes entry from list.
* @entry: the element to delete from the list.
* Note: list_empty() on entry does not return true after this, the entry is
* in an undefined state.
*/
static inline void __list_del_entry(struct list_head *entry)
{
__list_del(entry->prev, entry->next);
}
static inline void list_del(struct list_head *entry)
{
__list_del(entry->prev, entry->next);
entry->next = LIST_POISON1;
entry->prev = LIST_POISON2;
}
static inline int list_empty(const struct list_head *head)
{
return head->next == head;
}
#define list_entry(ptr, type, member) \
container_of(ptr, type, member)
#define list_first_entry(ptr, type, member) \
list_entry((ptr)->next, type, member)
#define list_next_entry(pos, member) \
list_entry((pos)->member.next, typeof(*(pos)), member)
#define list_for_each_entry(pos, head, member) \
for (pos = list_first_entry(head, typeof(*pos), member); \
&pos->member != (head); \
pos = list_next_entry(pos, member))
#endif

View File

@@ -0,0 +1,90 @@
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
#ifndef __LINUX_OVERFLOW_H
#define __LINUX_OVERFLOW_H
#define is_signed_type(type) (((type)(-1)) < (type)1)
#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type)))
#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
#define type_min(T) ((T)((T)-type_max(T)-(T)1))
#ifndef unlikely
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
#ifdef __GNUC__
#define GCC_VERSION (__GNUC__ * 10000 \
+ __GNUC_MINOR__ * 100 \
+ __GNUC_PATCHLEVEL__)
#if GCC_VERSION >= 50100
#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
#endif
#endif
#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
#define check_mul_overflow(a, b, d) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
typeof(d) __d = (d); \
(void) (&__a == &__b); \
(void) (&__a == __d); \
__builtin_mul_overflow(__a, __b, __d); \
})
#else
/*
* If one of a or b is a compile-time constant, this avoids a division.
*/
#define __unsigned_mul_overflow(a, b, d) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
typeof(d) __d = (d); \
(void) (&__a == &__b); \
(void) (&__a == __d); \
*__d = __a * __b; \
__builtin_constant_p(__b) ? \
__b > 0 && __a > type_max(typeof(__a)) / __b : \
__a > 0 && __b > type_max(typeof(__b)) / __a; \
})
/*
* Signed multiplication is rather hard. gcc always follows C99, so
* division is truncated towards 0. This means that we can write the
* overflow check like this:
*
* (a > 0 && (b > MAX/a || b < MIN/a)) ||
* (a < -1 && (b > MIN/a || b < MAX/a) ||
* (a == -1 && b == MIN)
*
* The redundant casts of -1 are to silence an annoying -Wtype-limits
* (included in -Wextra) warning: When the type is u8 or u16, the
* __b_c_e in check_mul_overflow obviously selects
* __unsigned_mul_overflow, but unfortunately gcc still parses this
* code and warns about the limited range of __b.
*/
#define __signed_mul_overflow(a, b, d) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
typeof(d) __d = (d); \
typeof(a) __tmax = type_max(typeof(a)); \
typeof(a) __tmin = type_min(typeof(a)); \
(void) (&__a == &__b); \
(void) (&__a == __d); \
*__d = (__u64)__a * (__u64)__b; \
(__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \
(__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \
(__b == (typeof(__b))-1 && __a == __tmin); \
})
#define check_mul_overflow(a, b, d) \
__builtin_choose_expr(is_signed_type(typeof(a)), \
__signed_mul_overflow(a, b, d), \
__unsigned_mul_overflow(a, b, d))
#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */
#endif

View File

@@ -0,0 +1,18 @@
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
#ifndef _TOOLS_LINUX_RING_BUFFER_H_
#define _TOOLS_LINUX_RING_BUFFER_H_
#include <linux/compiler.h>
static inline __u64 ring_buffer_read_head(struct perf_event_mmap_page *base)
{
return smp_load_acquire(&base->data_head);
}
static inline void ring_buffer_write_tail(struct perf_event_mmap_page *base,
__u64 tail)
{
smp_store_release(&base->data_tail, tail);
}
#endif /* _TOOLS_LINUX_RING_BUFFER_H_ */

View File

@@ -0,0 +1,31 @@
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
#ifndef __LINUX_TYPES_H
#define __LINUX_TYPES_H
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <asm/types.h>
#include <asm/posix_types.h>
#define __bitwise__
#define __bitwise __bitwise__
typedef __u16 __bitwise __le16;
typedef __u16 __bitwise __be16;
typedef __u32 __bitwise __le32;
typedef __u32 __bitwise __be32;
typedef __u64 __bitwise __le64;
typedef __u64 __bitwise __be64;
#ifndef __aligned_u64
# define __aligned_u64 __u64 __attribute__((aligned(8)))
#endif
struct list_head {
struct list_head *next, *prev;
};
#endif