diff --git a/main/404.html b/main/404.html index b7b7387cc..fe23ea3e0 100644 --- a/main/404.html +++ b/main/404.html @@ -447,6 +447,8 @@ + + @@ -568,6 +570,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -727,8 +749,6 @@ - - @@ -810,26 +830,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1122,6 +1122,8 @@ + + @@ -1175,6 +1177,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/blog/2023/09/07/bpfman-a-novel-way-to-manage-ebpf/index.html b/main/blog/2023/09/07/bpfman-a-novel-way-to-manage-ebpf/index.html index 570618a57..7baf81982 100644 --- a/main/blog/2023/09/07/bpfman-a-novel-way-to-manage-ebpf/index.html +++ b/main/blog/2023/09/07/bpfman-a-novel-way-to-manage-ebpf/index.html @@ -458,6 +458,8 @@ + + @@ -579,6 +581,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -738,8 +760,6 @@ - - @@ -821,26 +841,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1133,6 +1133,8 @@ + + @@ -1186,6 +1188,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/blog/2023/11/23/bpfd-becomes-bpfman/index.html b/main/blog/2023/11/23/bpfd-becomes-bpfman/index.html index d975b84d6..3c9a6ff62 100644 --- a/main/blog/2023/11/23/bpfd-becomes-bpfman/index.html +++ b/main/blog/2023/11/23/bpfd-becomes-bpfman/index.html @@ -460,6 +460,8 @@ + + @@ -581,6 +583,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -740,8 +762,6 @@ - - @@ -823,26 +843,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1135,6 +1135,8 @@ + + @@ -1188,6 +1190,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/blog/2023/11/25/a-new-logo-using-generative-ai-of-course/index.html b/main/blog/2023/11/25/a-new-logo-using-generative-ai-of-course/index.html index 2d1141f6c..b59bec417 100644 --- a/main/blog/2023/11/25/a-new-logo-using-generative-ai-of-course/index.html +++ b/main/blog/2023/11/25/a-new-logo-using-generative-ai-of-course/index.html @@ -460,6 +460,8 @@ + + @@ -581,6 +583,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -740,8 +762,6 @@ - - @@ -823,26 +843,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1135,6 +1135,8 @@ + + @@ -1188,6 +1190,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/blog/2024/01/04/community-meeting-january-4-2024/index.html b/main/blog/2024/01/04/community-meeting-january-4-2024/index.html index 8a1461fb0..45fc6eddc 100644 --- a/main/blog/2024/01/04/community-meeting-january-4-2024/index.html +++ b/main/blog/2024/01/04/community-meeting-january-4-2024/index.html @@ -460,6 +460,8 @@ + + @@ -581,6 +583,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -740,8 +762,6 @@ - - @@ -823,26 +843,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1135,6 +1135,8 @@ + + @@ -1188,6 +1190,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/blog/2024/01/15/bpfmans-shift-towards-a-daemonless-design-and-using-sled-a-high-performance-embedded-database/index.html b/main/blog/2024/01/15/bpfmans-shift-towards-a-daemonless-design-and-using-sled-a-high-performance-embedded-database/index.html index f649db400..d062db690 100644 --- a/main/blog/2024/01/15/bpfmans-shift-towards-a-daemonless-design-and-using-sled-a-high-performance-embedded-database/index.html +++ b/main/blog/2024/01/15/bpfmans-shift-towards-a-daemonless-design-and-using-sled-a-high-performance-embedded-database/index.html @@ -460,6 +460,8 @@ + + @@ -581,6 +583,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -740,8 +762,6 @@ - - @@ -823,26 +843,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1135,6 +1135,8 @@ + + @@ -1188,6 +1190,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/blog/2024/01/19/community-meeting-january-11-and-18-2024/index.html b/main/blog/2024/01/19/community-meeting-january-11-and-18-2024/index.html index 26d267011..5c5ccebde 100644 --- a/main/blog/2024/01/19/community-meeting-january-11-and-18-2024/index.html +++ b/main/blog/2024/01/19/community-meeting-january-11-and-18-2024/index.html @@ -460,6 +460,8 @@ + + @@ -581,6 +583,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -740,8 +762,6 @@ - - @@ -823,26 +843,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1135,6 +1135,8 @@ + + @@ -1188,6 +1190,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/blog/2024/02/26/technical-challenges-for-attaching-ebpf-programs-in-containers/index.html b/main/blog/2024/02/26/technical-challenges-for-attaching-ebpf-programs-in-containers/index.html index 4d9b58ad0..ae63fdc77 100644 --- a/main/blog/2024/02/26/technical-challenges-for-attaching-ebpf-programs-in-containers/index.html +++ b/main/blog/2024/02/26/technical-challenges-for-attaching-ebpf-programs-in-containers/index.html @@ -460,6 +460,8 @@ + + @@ -581,6 +583,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -740,8 +762,6 @@ - - @@ -823,26 +843,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1135,6 +1135,8 @@ + + @@ -1188,6 +1190,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + @@ -2111,7 +2133,7 @@

    How bpfman Agent Finds the programmatically, but we will step through the process of finding the host PIDs for the two containers here using cli commands to demonstrate how it works.

    We will use a kind deployment with bpfman for this -demo. See Deploy Locally via KIND for instructions on how to get this running.

    +demo. See Deploy Locally via KIND for instructions on how to get this running.

    The container selector in the above yaml file is the following.

      containers:
         namespace: bpfman
    diff --git a/main/blog/2024/02/27/bpfmans-integration-with-the-af_xdp-device-plugin-and-cni-for-kubernetes/index.html b/main/blog/2024/02/27/bpfmans-integration-with-the-af_xdp-device-plugin-and-cni-for-kubernetes/index.html
    index 9d8628068..98a733ec7 100644
    --- a/main/blog/2024/02/27/bpfmans-integration-with-the-af_xdp-device-plugin-and-cni-for-kubernetes/index.html
    +++ b/main/blog/2024/02/27/bpfmans-integration-with-the-af_xdp-device-plugin-and-cni-for-kubernetes/index.html
    @@ -458,6 +458,8 @@
             
           
             
    +      
    +        
           
             
           
    @@ -579,6 +581,26 @@
       
       
       
    +    
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -738,8 +760,6 @@ - - @@ -821,26 +841,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1133,6 +1133,8 @@ + + @@ -1186,6 +1188,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/blog/archive/2023/index.html b/main/blog/archive/2023/index.html index 8b9c02cd8..810f3d282 100644 --- a/main/blog/archive/2023/index.html +++ b/main/blog/archive/2023/index.html @@ -458,6 +458,8 @@ + + @@ -579,6 +581,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -738,8 +760,6 @@ - - @@ -821,26 +841,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1133,6 +1133,8 @@ + + @@ -1186,6 +1188,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/blog/archive/2024/index.html b/main/blog/archive/2024/index.html index 8b2185664..1b6207abb 100644 --- a/main/blog/archive/2024/index.html +++ b/main/blog/archive/2024/index.html @@ -458,6 +458,8 @@ + + @@ -579,6 +581,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -738,8 +760,6 @@ - - @@ -821,26 +841,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1133,6 +1133,8 @@ + + @@ -1186,6 +1188,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/blog/category/2024/index.html b/main/blog/category/2024/index.html index 865679527..ca6d4ce31 100644 --- a/main/blog/category/2024/index.html +++ b/main/blog/category/2024/index.html @@ -458,6 +458,8 @@ + + @@ -579,6 +581,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -738,8 +760,6 @@ - - @@ -821,26 +841,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1133,6 +1133,8 @@ + + @@ -1186,6 +1188,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/blog/category/community-meeting/index.html b/main/blog/category/community-meeting/index.html index 26293d616..be8a85d7c 100644 --- a/main/blog/category/community-meeting/index.html +++ b/main/blog/category/community-meeting/index.html @@ -458,6 +458,8 @@ + + @@ -579,6 +581,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -738,8 +760,6 @@ - - @@ -821,26 +841,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1133,6 +1133,8 @@ + + @@ -1186,6 +1188,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/blog/index.html b/main/blog/index.html index 19b88fad5..930a700c0 100644 --- a/main/blog/index.html +++ b/main/blog/index.html @@ -9,7 +9,7 @@ - + @@ -458,6 +458,8 @@ + + @@ -579,6 +581,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -738,8 +760,6 @@ - - @@ -821,26 +841,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1133,6 +1133,8 @@ + + @@ -1186,6 +1188,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/design/clusterVsNamespaceScoped/index.html b/main/design/clusterVsNamespaceScoped/index.html new file mode 100644 index 000000000..687531c70 --- /dev/null +++ b/main/design/clusterVsNamespaceScoped/index.html @@ -0,0 +1,2404 @@ + + + + + + + + + + + + + + + + + + + + + + + bpfman CRDs - Cluster vs Namespace Scoped - bpfman + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + + + +
    + + + + + + + +
    + +
    + + + + +
    +
    + + + +
    +
    +
    + + + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + + + + +

    bpfman CRDs - Cluster vs Namespace Scoped

    +

    Status

    +

    This design was implemented with +bpfman-operator pull request #344. +The feature was first releases in the bpfman-operator v0.5.5 release.

    +

    Introduction

    +

    For security reasons, cluster admins may want to limit certain applications to only loading eBPF programs +within a given namespace. +Currently, all bpfman Custom Resource Definitions (CRDs) are Cluster scoped. +To provide cluster admins with tighter controls on eBPF program loading, some of the bpfman CRDs also need +to be Namespace scoped.

    +

    Not all eBPF programs make sense to be namespaced scoped. +Some eBPF programs like kprobe cannot be constrained to a namespace. +The following programs will have a namespaced scoped variant:

    +
      +
    • Uprobe
    • +
    • TC
    • +
    • TCX
    • +
    • XDP
    • +
    +

    There will also be a namespace scoped BpfApplication variant that is limited to namespaced scoped +eBPF programs listed above.

    +

    Current Implementation

    +

    Currently, the reconciler code is broken into two layers (for both the bpfman-operator and the bpfman-agent). +There is the *Program layer, where there is a reconcile for each program type (Fentry, Fexit, Kprobe, etc). +At this layer, the program specific code handles creating the program specific structure. +The *Program layer then calls the Common layer to handle processing that is common across all programs.

    +

    There are some structures, then an interface that defines the set of methods the structure needs to support.

    +

    struct

    +

    There are a set of structures (one for the BPF Program CRD and then one for each *Program CRD) that define the +contents of the CRDs (bpfman-operator/apis/v1alpha). +Each object (BPF Program CRD and *Program CRD) also has a List object.

    +
    type BpfProgram struct {
    +    metav1.TypeMeta   `json:",inline"`
    +    metav1.ObjectMeta `json:"metadata,omitempty"`
    +
    +    Spec BpfProgramSpec `json:"spec"`
    +    // +optional
    +    Status BpfProgramStatus `json:"status,omitempty"`
    +}
    +type BpfProgramList struct {
    +    metav1.TypeMeta `json:",inline"`
    +    metav1.ListMeta `json:"metadata,omitempty"`
    +    Items           []BpfProgram `json:"items"`
    +}
    +
    +type FentryProgram struct {
    +    metav1.TypeMeta   `json:",inline"`
    +    metav1.ObjectMeta `json:"metadata,omitempty"`
    +
    +    Spec FentryProgramSpec `json:"spec"`
    +    // +optional
    +    Status FentryProgramStatus `json:"status,omitempty"`
    +}
    +type FentryProgramList struct {
    +    metav1.TypeMeta `json:",inline"`
    +    metav1.ListMeta `json:"metadata,omitempty"`
    +    Items           []FentryProgram `json:"items"`
    +}
    +
    +:
    +
    +

    There is a reconciler for each *Program. +For the implementation, there is a common set of data used by each *Program reconciler which is +contained in the base struct ReconcilerCommon. +Then there is a *Program struct, which includes each *Program’s Program struct and the base +struct ReconcilerCommon. +Below are the bpfman-agent structures, but the bpfman-operator follows the same pattern.

    +
    type ReconcilerCommon struct {
    +    client.Client
    +    Scheme       *runtime.Scheme
    +    GrpcConn     *grpc.ClientConn
    +    BpfmanClient gobpfman.BpfmanClient
    +    Logger       logr.Logger
    +    NodeName     string
    +    progId       *uint32
    +    finalizer    string
    +    recType      string
    +    appOwner     metav1.Object // Set if the owner is an application
    +}
    +
    +type FentryProgramReconciler struct {
    +    ReconcilerCommon
    +    currentFentryProgram *bpfmaniov1alpha1.FentryProgram
    +    ourNode              *v1.Node
    +}
    +
    +type FexitProgramReconciler struct {
    +    ReconcilerCommon
    +    currentFexitProgram *bpfmaniov1alpha1.FexitProgram
    +    ourNode             *v1.Node
    +}
    +
    +:
    +
    +

    interface

    +

    The bpfmanReconciler interface defines the set of methods the *Program structs must implement to use the +common reconciler code. +Below are the bpfman-agent structures, but the bpfman-operator uses a ProgramReconciler, which follows +the same pattern.

    +
    type bpfmanReconciler interface {
    +    SetupWithManager(mgr ctrl.Manager) error
    +    Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error)
    +    getFinalizer() string
    +    getOwner() metav1.Object
    +    getRecType() string
    +    getProgType() internal.ProgramType
    +    getName() string
    +    getExpectedBpfPrograms(ctx context.Context)
    +    (*bpfmaniov1alpha1.BpfProgramList, error)
    +    getLoadRequest(bpfProgram *bpfmaniov1alpha1.BpfProgram,
    +    mapOwnerId *uint32) (*gobpfman.LoadRequest, error)
    +    getNode() *v1.Node
    +    getBpfProgramCommon() *bpfmaniov1alpha1.BpfProgramCommon
    +    setCurrentProgram(program client.Object) error
    +    getNodeSelector() *metav1.LabelSelector
    +    getBpfGlobalData() map[string][]byte
    +    getAppProgramId() string
    +}
    +
    +

    There are also some common reconciler functions that perform common code.

    +
    func (r *ReconcilerCommon) reconcileCommon(ctx context.Context,
    +  rec bpfmanReconciler,
    +    programs []client.Object) (bool, ctrl.Result, error) {
    +:
    +}
    +
    +func (r *ReconcilerCommon) reconcileBpfProgram(ctx context.Context,
    +    rec bpfmanReconciler,
    +    loadedBpfPrograms map[string]*gobpfman.ListResponse_ListResult,
    +    bpfProgram *bpfmaniov1alpha1.BpfProgram,
    +    isNodeSelected bool,
    +    isBeingDeleted bool,
    +    mapOwnerStatus *MapOwnerParamStatus)
    +(bpfmaniov1alpha1.BpfProgramConditionType, error) {
    +:
    +}
    +
    +func (r *ReconcilerCommon) reconcileBpfProgramSuccessCondition(
    +    isLoaded bool,
    +    shouldBeLoaded bool,
    +    isNodeSelected bool,
    +    isBeingDeleted bool,
    +    noContainersOnNode bool,
    +  mapOwnerStatus *MapOwnerParamStatus) bpfmaniov1alpha1.BpfProgramConditionType {
    +:
    +}
    +
    +

    So looks something like this:

    +
                         --- FentryProgramReconciler
    +                     |     func (r *FentryProgramReconciler) getFinalizer() string {}
    +                     |
    +bpfmanReconciler   ----- FexitProgramReconciler
    +  ReconcilerCommon   |     func (r *FexitProgramReconciler) getFinalizer() string {}
    +                     |
    +                     --- …
    +
    +

    Adding Namespaced Scoped CRDs

    +

    While the contents are mostly the same for the namespace and cluster-scoped CRD in most cases, Kubernetes +requires different CRD for each type.

    +

    struct

    +

    The set of CRD structures will need to be duplicated for each Namespaced scoped CRD (bpfman-operator/apis/v1alpha). +Note, data is the similar, just a new object. +The primary change is the existing ContainerSelector struct will be replaced with a ContainerNsSelector. +For Namespaced scoped CRDs, the namespace in the ContainerSelector is removed. +The Namespace field for the object is embedded the metav1.ObjectMeta structure. +Not all Program Types will have a Namespaced version, only those that can be contained by a namespace:

    +
      +
    • TC
    • +
    • TCX
    • +
    • Uprobe
    • +
    • XDP
    • +
    +

    The Application Program will also have a namespaced version, but it will only allow the Program Types +that are namespaced.

    +
    type BpfProgram struct {
    +    metav1.TypeMeta   `json:",inline"`
    +    metav1.ObjectMeta `json:"metadata,omitempty"`
    +
    +    Spec BpfProgramSpec `json:"spec"`
    +    // +optional
    +    Status BpfProgramStatus `json:"status,omitempty"`
    +}
    +type BpfProgramList struct {
    +    metav1.TypeMeta `json:",inline"`
    +    metav1.ListMeta `json:"metadata,omitempty"`
    +    Items           []BpfProgram `json:"items"`
    +}
    +
    +type BpfNsProgram struct {
    +    metav1.TypeMeta   `json:",inline"`
    +    metav1.ObjectMeta `json:"metadata,omitempty"`
    +
    +    Spec BpfProgramSpec `json:"spec"`
    +    // +optional
    +    Status BpfProgramStatus `json:"status,omitempty"`
    +}
    +type BpfNsProgramList struct {
    +    metav1.TypeMeta `json:",inline"`
    +    metav1.ListMeta `json:"metadata,omitempty"`
    +    Items           []BpfProgram `json:"items"`
    +}
    +
    +type TcProgram struct {
    +    metav1.TypeMeta   `json:",inline"`
    +    metav1.ObjectMeta `json:"metadata,omitempty"`
    +
    +    Spec TcProgramSpec `json:"spec"`
    +    // +optional
    +    Status TcProgramStatus `json:"status,omitempty"`
    +}
    +type TcProgramList struct {
    +    metav1.TypeMeta `json:",inline"`
    +    metav1.ListMeta `json:"metadata,omitempty"`
    +    Items           []TcProgram `json:"items"`
    +}
    +
    +type TcNsProgram struct {
    +    metav1.TypeMeta   `json:",inline"`
    +    metav1.ObjectMeta `json:"metadata,omitempty"`
    +
    +    Spec TcNsProgramSpec `json:"spec"`
    +    // +optional
    +    Status TcProgramStatus `json:"status,omitempty"`
    +}
    +type TcNsProgramList struct {
    +    metav1.TypeMeta `json:",inline"`
    +    metav1.ListMeta `json:"metadata,omitempty"`
    +    Items           []TcNsProgram `json:"items"`
    +}
    +
    +:
    +
    +

    interface

    +

    The problem is that the bpfmanReconciler interface and common functions use the types bpfmanReconciler, +BpfProgram and BpfProgramList, which will need to be cluster or namespaced objects.

    +

    To allow the common code to act on both Cluster or Namespaced objects, two new interfaces will be introduced. +First is BpfProg. +Both BpfProgram and BpfNsProgram need to implement these functions.

    +
    type BpfProg interface {
    +    GetName() string
    +    GetUID() types.UID
    +    GetAnnotations() map[string]string
    +    GetLabels() map[string]string
    +    GetStatus() *bpfmaniov1alpha1.BpfProgramStatus
    +    GetClientObject() client.Object
    +}
    +
    +

    The second interface is BpfProgList. +Both BpfProgramList and BpfNsProgramList will need to implement these functions. +Because the list objects have lists of the BpfProgramor BpfNsProgram, the base interface is a generic, +where type T can be a either BpfProgram or BpfNsProgram.

    +
    type BpfProgList[T any] interface {
    +    GetItems() []T
    +}
    +
    +

    The reconciler base struct ReconcilerCommon then becomes a generic as well, and all references to the types +bpfmanReconciler, BpfProgram and BpfProgramList become the types bpfmanReconciler[T,TL], T and TL. +Below are the bpfman-agent structures, but the bpfman-operator follows the same pattern.

    +
    type ReconcilerCommon[T BpfProg, TL BpfProgList[T]] struct {
    +    : // Data is the same
    +}
    +
    +func (r *ReconcilerCommon) reconcileCommon(ctx context.Context,
    +rec bpfmanReconciler[T, TL],
    +    programs []client.Object) (bool, ctrl.Result, error) {
    +:
    +}
    +
    +func (r *ReconcilerCommon) reconcileBpfProgram(ctx context.Context,
    +    rec bpfmanReconciler[T, TL],
    +    loadedBpfPrograms map[string]*gobpfman.ListResponse_ListResult,
    +    bpfProgram *T,
    +    isNodeSelected bool,
    +    isBeingDeleted bool,
    +    mapOwnerStatus *MapOwnerParamStatus)
    +(bpfmaniov1alpha1.BpfProgramConditionType, error) {
    +:
    +}
    +
    +func (r *ReconcilerCommon) reconcileBpfProgramSuccessCondition(
    +    isLoaded bool,
    +    shouldBeLoaded bool,
    +    isNodeSelected bool,
    +    isBeingDeleted bool,
    +    noContainersOnNode bool,
    +  mapOwnerStatus *MapOwnerParamStatus) bpfmaniov1alpha1.BpfProgramConditionType {
    +:
    +}
    +
    +

    Same for the bpfmanReconciler interface.

    +
    type bpfmanReconciler[T BpfProg, TL BpfProgList[T]] interface {
    +    SetupWithManager(mgr ctrl.Manager) error
    +    Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error)
    +    getFinalizer() string
    +    getOwner() metav1.Object
    +    getRecType() string
    +    getProgType() internal.ProgramType
    +    getName() string
    +    getExpectedBpfPrograms(ctx context.Context)(*TL, error)
    +    getLoadRequest(bpfProgram *T,
    +    mapOwnerId *uint32) (*gobpfman.LoadRequest, error)
    +    getNode() *v1.Node
    +    getBpfProgramCommon() *bpfmaniov1alpha1.BpfProgramCommon
    +    setCurrentProgram(program client.Object) error
    +    getNodeSelector() *metav1.LabelSelector
    +    getBpfGlobalData() map[string][]byte
    +    getAppProgramId() string
    +}
    +
    +

    Issues arose when ReconcilerCommon functions needed to modify the BpfProgram or BpfNsProgram data. +For the modifications to be applied, the types need to be pointers bpfmanReconciler[*T, *TL], *T and *TL. +However, the compiler would not allow this:

    +
    cannot use type BpfProgList[*T] outside a type constraint: interface contains type constraints
    +
    +

    To work around this, a new layer was added. +A struct for cluster scoped code and a one for namespaced code. +So looks something like this:

    +
                         +--- ClusterProgramReconciler
    +                     |     |
    +                     |     +--- FentryProgramReconciler
    +                     |     |     func (r *FentryProgramReconciler) getFinalizer() string {}
    +                     |     |     :
    +                     |     |
    +                     |     +--- FexitProgramReconciler
    +                     |     |     func (r *FexitProgramReconciler) getFinalizer() string {}
    +                     |     |     :
    +                     |     :
    +bpfmanReconciler   --+
    +  ReconcilerCommon   |
    +                     +--- NamespaceProgramReconciler
    +                           |
    +                           +--- FentryNsProgramReconciler
    +                           |     func (r *FentryProgramReconciler) getFinalizer() string {}
    +                           |     :
    +                           |
    +                           +--- FexitNsProgramReconciler
    +                           |     func (r *FexitProgramReconciler) getFinalizer() string {}
    +                           |     :
    +                           :
    +
    +
    type ClusterProgramReconciler struct {
    +    ReconcilerCommon[BpfProgram, BpfProgramList]
    +}
    +
    +type NamespaceProgramReconciler struct {
    +    ReconcilerCommon[BpfNsProgram, BpfNsProgramList]
    +}
    +
    +

    Several functions were added to the bpfmanReconciler interface that are implemented by these structures.

    +
    type bpfmanReconciler[T BpfProg, TL BpfProgList[T]] interface {
    +    // BPF Cluster of Namespaced Reconciler
    +    getBpfList(ctx context.Context, opts []client.ListOption) (*TL, error)
    +    updateBpfStatus(ctx context.Context, bpfProgram *T, condition metav1.Condition) error
    +    createBpfProgram(
    +        attachPoint string,
    +        rec bpfmanReconciler[T, TL],
    +        annotations map[string]string,
    +    ) (*T, error)
    +
    +    // *Program Reconciler
    +  SetupWithManager(mgr ctrl.Manager) error
    +    :
    +}
    +
    +

    And the *Programs use the ClusterProgramReconciler or NamespaceProgramReconciler structs instead +of the ReconcilerCommon struct.

    +
    type TcProgramReconciler struct {
    +    ClusterProgramReconciler
    +    currentTcProgram *bpfmaniov1alpha1.TcProgram
    +    interfaces       []string
    +    ourNode          *v1.Node
    +}
    +
    +type TcNsProgramReconciler struct {
    +    NamespaceProgramReconciler
    +    currentTcNsProgram *bpfmaniov1alpha1.TcNsProgram
    +    interfaces         []string
    +    ourNode            *v1.Node
    +}
    +
    +:
    +
    +

    Naming

    +

    In the existing codebase, all the CRDs are cluster scoped:

    +
      +
    • BpfApplicationProgram
    • +
    • BpfProgram
    • +
    • FentryProgram
    • +
    • FexitProgram
    • +
    • KprobeProgram
    • +
    • ...
    • +
    +

    Common practice is for cluster scoped objects to include "Cluster" in the name and +for namespaced objects to not have an identifier. +So the current CRDs SHOULD have been named:

    +
      +
    • ClusterBpfApplicationProgram
    • +
    • ClusterBpfProgram
    • +
    • ClusterFentryProgram
    • +
    • ClusterFexitProgram
    • +
    • ClusterKprobeProgram
    • +
    • ...
    • +
    +

    Around the same time this feature is being developed, another feature is being developed +which will break the loading and attaching of eBPF programs in bpfman into two steps. +As part of this feature, all the CRDs will be completely reworked. +With this in mind, the plan for adding namespace scoped CRDs is to make the namespaced CRDs +carry the identifier. +After the load/attach split work is complete, the CRDs will be renamed to follow the common +convention in which the cluster-scoped CRD names are prefixed with "Cluster".

    +

    The current plan is for the namespaced scoped CRDs to use "NsProgram" identifier and cluster +scoped CRDs to use "Program" identifier. +With the new namespace scope feature, below are the list of CRDs supported by bpfman-operator:

    +
      +
    • BpfNsApplicationProgram
    • +
    • BpfApplicationProgram
    • +
    • BpfNsProgram
    • +
    • BpfProgram
    • +
    • FentryProgram
    • +
    • FexitProgram
    • +
    • KprobeProgram
    • +
    • TcNsProgram
    • +
    • TcProgram
    • +
    • TcxNsProgram
    • +
    • TcxProgram
    • +
    • TracepointProgram
    • +
    • UprobeNsProgram
    • +
    • UprobeProgram
    • +
    • XdpNsProgram
    • +
    • XdpProgram
    • +
    + + + + + + +
    +
    + + +
    + +
    + +
    + + +
    + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/main/design/daemonless/index.html b/main/design/daemonless/index.html index 92972b54a..995477614 100644 --- a/main/design/daemonless/index.html +++ b/main/design/daemonless/index.html @@ -12,7 +12,7 @@ - + @@ -458,6 +458,8 @@ + + @@ -579,6 +581,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -738,8 +760,6 @@ - - @@ -821,26 +841,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1135,6 +1135,8 @@ + + @@ -1369,6 +1371,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/developer-guide/api-spec/index.html b/main/developer-guide/api-spec/index.html index 6f6713e1c..02805fc89 100644 --- a/main/developer-guide/api-spec/index.html +++ b/main/developer-guide/api-spec/index.html @@ -458,6 +458,8 @@ + + @@ -579,6 +581,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -740,8 +762,6 @@ - - @@ -823,26 +843,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1145,6 +1145,8 @@ + + @@ -1198,6 +1200,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/developer-guide/configuration/index.html b/main/developer-guide/configuration/index.html index b8479ef05..16f1eb3b7 100644 --- a/main/developer-guide/configuration/index.html +++ b/main/developer-guide/configuration/index.html @@ -458,6 +458,8 @@ + + @@ -579,6 +581,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -740,8 +762,6 @@ - - @@ -823,26 +843,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1226,6 +1226,8 @@ + + @@ -1279,6 +1281,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/developer-guide/debugging/index.html b/main/developer-guide/debugging/index.html index 77559a7e1..3a5c9e848 100644 --- a/main/developer-guide/debugging/index.html +++ b/main/developer-guide/debugging/index.html @@ -458,6 +458,8 @@ + + @@ -579,6 +581,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -740,8 +762,6 @@ - - @@ -823,26 +843,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1145,6 +1145,8 @@ + + @@ -1198,6 +1200,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/developer-guide/develop-operator/index.html b/main/developer-guide/develop-operator/index.html index 31e905b03..8fdb42505 100644 --- a/main/developer-guide/develop-operator/index.html +++ b/main/developer-guide/develop-operator/index.html @@ -9,7 +9,7 @@ - + @@ -458,6 +458,8 @@ + + @@ -579,6 +581,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -740,8 +762,6 @@ - - @@ -822,26 +842,6 @@ - -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - @@ -1277,6 +1277,8 @@ + + @@ -1330,6 +1332,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + @@ -2105,7 +2127,7 @@

    Building

    :

    Running Locally in KIND

    -

    Deploying the bpfman-operator goes into more detail on ways to +

    Deploying the bpfman-operator goes into more detail on ways to launch bpfman in a Kubernetes cluster. To run locally in a Kind cluster with an up to date build simply run:

    cd bpfman-operator/
    diff --git a/main/developer-guide/documentation/index.html b/main/developer-guide/documentation/index.html
    index 5c9a16a06..2cec40cfe 100644
    --- a/main/developer-guide/documentation/index.html
    +++ b/main/developer-guide/documentation/index.html
    @@ -458,6 +458,8 @@
             
           
             
    +      
    +        
           
             
           
    @@ -579,6 +581,26 @@
       
       
       
    +    
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -740,8 +762,6 @@ - - @@ -823,26 +843,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1253,6 +1253,8 @@ + + @@ -1306,6 +1308,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/developer-guide/image-build/index.html b/main/developer-guide/image-build/index.html index 596a6f8a7..bba578ccd 100644 --- a/main/developer-guide/image-build/index.html +++ b/main/developer-guide/image-build/index.html @@ -458,6 +458,8 @@ + + @@ -579,6 +581,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -740,8 +762,6 @@ - - @@ -823,26 +843,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1295,6 +1295,8 @@ + + @@ -1348,6 +1350,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/developer-guide/k8s-selinux-distros/index.html b/main/developer-guide/k8s-selinux-distros/index.html index 466c1ec01..722c87640 100644 --- a/main/developer-guide/k8s-selinux-distros/index.html +++ b/main/developer-guide/k8s-selinux-distros/index.html @@ -458,6 +458,8 @@ + + @@ -579,6 +581,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -740,8 +762,6 @@ - - @@ -823,26 +843,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1145,6 +1145,8 @@ + + @@ -1198,6 +1200,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/developer-guide/linux-capabilities/index.html b/main/developer-guide/linux-capabilities/index.html index cd073b1ae..bf40496be 100644 --- a/main/developer-guide/linux-capabilities/index.html +++ b/main/developer-guide/linux-capabilities/index.html @@ -458,6 +458,8 @@ + + @@ -579,6 +581,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -740,8 +762,6 @@ - - @@ -823,26 +843,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1226,6 +1226,8 @@ + + @@ -1279,6 +1281,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/developer-guide/logging/index.html b/main/developer-guide/logging/index.html index ef3296f60..7f63dec5c 100644 --- a/main/developer-guide/logging/index.html +++ b/main/developer-guide/logging/index.html @@ -458,6 +458,8 @@ + + @@ -579,6 +581,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -740,8 +762,6 @@ - - @@ -823,26 +843,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1274,6 +1274,8 @@ + + @@ -1327,6 +1329,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/developer-guide/observability/index.html b/main/developer-guide/observability/index.html index dd0992065..deea97f56 100644 --- a/main/developer-guide/observability/index.html +++ b/main/developer-guide/observability/index.html @@ -458,6 +458,8 @@ + + @@ -579,6 +581,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -740,8 +762,6 @@ - - @@ -823,26 +843,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1241,6 +1241,8 @@ + + @@ -1294,6 +1296,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/developer-guide/release/index.html b/main/developer-guide/release/index.html index 5df297681..efcc987b3 100644 --- a/main/developer-guide/release/index.html +++ b/main/developer-guide/release/index.html @@ -458,6 +458,8 @@ + + @@ -579,6 +581,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -740,8 +762,6 @@ - - @@ -823,26 +843,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1325,6 +1325,8 @@ + + @@ -1378,6 +1380,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/developer-guide/shipping-bytecode/index.html b/main/developer-guide/shipping-bytecode/index.html index 8dc38745d..8fab0c8b8 100644 --- a/main/developer-guide/shipping-bytecode/index.html +++ b/main/developer-guide/shipping-bytecode/index.html @@ -458,6 +458,8 @@ + + @@ -579,6 +581,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -740,8 +762,6 @@ - - @@ -823,26 +843,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1268,6 +1268,8 @@ + + @@ -1321,6 +1323,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/developer-guide/testing/index.html b/main/developer-guide/testing/index.html index 597964f0f..26085d598 100644 --- a/main/developer-guide/testing/index.html +++ b/main/developer-guide/testing/index.html @@ -458,6 +458,8 @@ + + @@ -579,6 +581,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -740,8 +762,6 @@ - - @@ -823,26 +843,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1235,6 +1235,8 @@ + + @@ -1288,6 +1290,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/developer-guide/xdp-overview/index.html b/main/developer-guide/xdp-overview/index.html index 49a9c5439..3c9ac86d1 100644 --- a/main/developer-guide/xdp-overview/index.html +++ b/main/developer-guide/xdp-overview/index.html @@ -458,6 +458,8 @@ + + @@ -579,6 +581,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -740,8 +762,6 @@ - - @@ -823,26 +843,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1202,6 +1202,8 @@ + + @@ -1255,6 +1257,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/getting-started/building-bpfman/index.html b/main/getting-started/building-bpfman/index.html index 2fe6118e0..a6ea5cc64 100644 --- a/main/getting-started/building-bpfman/index.html +++ b/main/getting-started/building-bpfman/index.html @@ -460,6 +460,8 @@ + + @@ -659,9 +661,9 @@
  • - + - Install docker + Install docker or podman @@ -766,6 +768,15 @@ +
  • + +
  • + + + Building bpfman-operator + + +
  • @@ -822,6 +833,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -981,8 +1012,6 @@ - - @@ -1064,26 +1093,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1376,6 +1385,8 @@ + + @@ -1429,6 +1440,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + @@ -1958,9 +1989,9 @@
  • - + - Install docker + Install docker or podman @@ -2065,6 +2096,15 @@ +
  • + +
  • + + + Building bpfman-operator + + +
  • @@ -2119,10 +2159,8 @@

    Kernel Versions

  • Relaxed CAP_BPF Requirement: Prior to Kernel 5.19, all eBPF system calls required CAP_BPF. This required userspace programs that wanted to access eBPF maps to have the CAP_BPF Linux capability. With the kernel 5.19 change, CAP_BPF is only required for load and unload requests.
  • -
  • TCX: TCX support was added in Kernel 6.6 and is expected to be added to - bpfman in an upcoming release. TCX has performance improvements over TC and - adds support in the kernel for multiple TCX programs to run on a given TC hook - point.
  • +
  • TCX: TCX has performance improvements over TC and adds support in the kernel for multiple TCX + programs to run on a given TC hook point. TCX support was added in Kernel 6.6.
  • bpfman tested on older kernel versions:

      @@ -2211,9 +2249,9 @@

      Install perl

      apt based OS:

      sudo apt install perl
       
      -

      Install docker

      +

      Install docker or podman

      To build the bpfman-agent and bpfman-operator using the provided Makefile and the -make build-images command, docker needs to be installed. +make build-images command, docker or podman needs to be installed. There are several existing guides:

    +

    Building bpfman-operator

    +

    Building and deploying bpfman-operator is covered in it's own section. +See Deploying Example eBPF Programs On Kubernetes and +Developing the bpfman-operator.

    diff --git a/main/getting-started/cli-guide/index.html b/main/getting-started/cli-guide/index.html index 332d78e7f..56eac682d 100644 --- a/main/getting-started/cli-guide/index.html +++ b/main/getting-started/cli-guide/index.html @@ -460,6 +460,8 @@ + + @@ -581,6 +583,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -996,8 +1018,6 @@ - - @@ -1079,26 +1099,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1391,6 +1391,8 @@ + + @@ -1444,6 +1446,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/getting-started/example-bpf-k8s/index.html b/main/getting-started/example-bpf-k8s/index.html index 18c01c319..5b11c48bb 100644 --- a/main/getting-started/example-bpf-k8s/index.html +++ b/main/getting-started/example-bpf-k8s/index.html @@ -9,7 +9,7 @@ - + @@ -460,6 +460,8 @@ + + @@ -580,6 +582,26 @@ + +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + @@ -831,8 +853,6 @@ - - @@ -914,26 +934,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1226,6 +1226,8 @@ + + @@ -1279,6 +1281,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + @@ -1789,7 +1811,7 @@

    Deploying Example eBPF Pr

    This section will describe launching eBPF enabled applications on a Kubernetes cluster. The approach is slightly different when running on a Kubernetes cluster.

    This section assumes there is already a Kubernetes cluster running and bpfman is running in the cluster. -See Deploying the bpfman-operator for details on +See Deploying the bpfman-operator for details on deploying bpfman on a Kubernetes cluster, but the quickest solution is to run a Kubernetes KIND Cluster:

    cd bpfman/bpfman-operator/
     make run-on-kind
    @@ -1803,6 +1825,7 @@ 

    Loading eBPF Programs On Kubernetes
  • FexitProgram CRD: Fexit Sample yaml
  • KprobeProgram CRD: Kprobe Examples yaml
  • TcProgram CRD: TcProgram Examples yaml
  • +
  • TcxProgram CRD: TcxProgram Examples yaml
  • TracepointProgram CRD: Tracepoint Examples yaml
  • UprobeProgram CRD: Uprobe Examples yaml
  • XdpProgram CRD: XdpProgram Examples yaml
  • diff --git a/main/getting-started/example-bpf-local/index.html b/main/getting-started/example-bpf-local/index.html index 557bbe8f9..6e021e3ee 100644 --- a/main/getting-started/example-bpf-local/index.html +++ b/main/getting-started/example-bpf-local/index.html @@ -12,7 +12,7 @@ - + @@ -460,6 +460,8 @@ + + @@ -648,6 +650,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -807,8 +829,6 @@ - - @@ -890,26 +910,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1202,6 +1202,8 @@ + + @@ -1255,6 +1257,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/getting-started/example-bpf/index.html b/main/getting-started/example-bpf/index.html index 94285e210..bd7bee5d2 100644 --- a/main/getting-started/example-bpf/index.html +++ b/main/getting-started/example-bpf/index.html @@ -460,6 +460,8 @@ + + @@ -581,6 +583,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -879,8 +901,6 @@ - - @@ -962,26 +982,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1274,6 +1274,8 @@ + + @@ -1327,6 +1329,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/getting-started/launching-bpfman/index.html b/main/getting-started/launching-bpfman/index.html index 60e905781..3fa2363b3 100644 --- a/main/getting-started/launching-bpfman/index.html +++ b/main/getting-started/launching-bpfman/index.html @@ -460,6 +460,8 @@ + + @@ -663,6 +665,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -822,8 +844,6 @@ - - @@ -905,26 +925,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1217,6 +1217,8 @@ + + @@ -1270,6 +1272,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/developer-guide/operator-quick-start/index.html b/main/getting-started/operator-quick-start/index.html similarity index 59% rename from main/developer-guide/operator-quick-start/index.html rename to main/getting-started/operator-quick-start/index.html index a2d144b25..bf2290886 100644 --- a/main/developer-guide/operator-quick-start/index.html +++ b/main/getting-started/operator-quick-start/index.html @@ -9,10 +9,10 @@ - + - + @@ -246,11 +246,13 @@ + + -
  • - +
  • + @@ -266,12 +268,10 @@ - - -
  • +
  • @@ -437,6 +437,8 @@ + + @@ -458,6 +460,8 @@ + + @@ -468,12 +472,12 @@ -
  • +
  • - + - @@ -1737,7 +1759,7 @@

    Run bpfman From Release Image

    for deploying a released version of bpfman from an rpm as a systemd service and then use Deploying Example eBPF Programs On Local Host for further information on how to test and interact with bpfman.

    -

    Deploying the bpfman-operator contains +

    Deploying the bpfman-operator contains more details on deploying bpfman in a Kubernetes deployment and Deploying Example eBPF Programs On Kubernetes contains more details on interacting with bpfman running in a Kubernetes deployment. @@ -1790,7 +1812,7 @@

    Deploying Release Vers

  • There are other example programs in the Releases page.

    -

    Continue in Deploying the bpfman-operator or +

    Continue in Deploying the bpfman-operator or Deploying Example eBPF Programs On Kubernetes if desired. Keep in mind that prior to v0.4.0, bpfman was released as bpfd. So follow the release specific documentation.

    diff --git a/main/getting-started/running-rpm/index.html b/main/getting-started/running-rpm/index.html index 867c1bba1..fec4afa98 100644 --- a/main/getting-started/running-rpm/index.html +++ b/main/getting-started/running-rpm/index.html @@ -460,6 +460,8 @@ + + @@ -581,6 +583,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -873,8 +895,6 @@ - - @@ -956,26 +976,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1268,6 +1268,8 @@ + + @@ -1321,6 +1323,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/getting-started/troubleshooting/index.html b/main/getting-started/troubleshooting/index.html index de0edd2dd..75596923d 100644 --- a/main/getting-started/troubleshooting/index.html +++ b/main/getting-started/troubleshooting/index.html @@ -460,6 +460,8 @@ + + @@ -581,6 +583,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -804,8 +826,6 @@ - - @@ -887,26 +907,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1199,6 +1199,8 @@ + + @@ -1252,6 +1254,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/governance/CODE_OF_CONDUCT/index.html b/main/governance/CODE_OF_CONDUCT/index.html index cc685845e..e971cea66 100644 --- a/main/governance/CODE_OF_CONDUCT/index.html +++ b/main/governance/CODE_OF_CONDUCT/index.html @@ -458,6 +458,8 @@ + + @@ -579,6 +581,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -738,8 +760,6 @@ - - @@ -821,26 +841,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1133,6 +1133,8 @@ + + @@ -1186,6 +1188,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/governance/CONTRIBUTING/index.html b/main/governance/CONTRIBUTING/index.html index e52bb40e2..d8e956dd3 100644 --- a/main/governance/CONTRIBUTING/index.html +++ b/main/governance/CONTRIBUTING/index.html @@ -458,6 +458,8 @@ + + @@ -579,6 +581,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -740,8 +762,6 @@ - - @@ -974,26 +994,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1286,6 +1286,8 @@ + + @@ -1339,6 +1341,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/governance/GOVERNANCE/index.html b/main/governance/GOVERNANCE/index.html index 5a5865b66..b41e5813d 100644 --- a/main/governance/GOVERNANCE/index.html +++ b/main/governance/GOVERNANCE/index.html @@ -458,6 +458,8 @@ + + @@ -579,6 +581,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -738,8 +760,6 @@ - - @@ -821,26 +841,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1133,6 +1133,8 @@ + + @@ -1186,6 +1188,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/governance/MAINTAINERS/index.html b/main/governance/MAINTAINERS/index.html index 9fdc05001..ffdf44f6f 100644 --- a/main/governance/MAINTAINERS/index.html +++ b/main/governance/MAINTAINERS/index.html @@ -458,6 +458,8 @@ + + @@ -579,6 +581,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -738,8 +760,6 @@ - - @@ -821,26 +841,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1133,6 +1133,8 @@ + + @@ -1186,6 +1188,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/governance/MEETINGS/index.html b/main/governance/MEETINGS/index.html index 1f02f5452..c2bc3819c 100644 --- a/main/governance/MEETINGS/index.html +++ b/main/governance/MEETINGS/index.html @@ -458,6 +458,8 @@ + + @@ -579,6 +581,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -738,8 +760,6 @@ - - @@ -821,26 +841,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1133,6 +1133,8 @@ + + @@ -1186,6 +1188,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/governance/REVIEWING/index.html b/main/governance/REVIEWING/index.html index 48a310526..b79e00f29 100644 --- a/main/governance/REVIEWING/index.html +++ b/main/governance/REVIEWING/index.html @@ -12,7 +12,7 @@ - + @@ -458,6 +458,8 @@ + + @@ -579,6 +581,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -740,8 +762,6 @@ - - @@ -941,26 +961,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1253,6 +1253,8 @@ + + @@ -1306,6 +1308,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/governance/SECURITY/index.html b/main/governance/SECURITY/index.html index 503f66fed..65504f06b 100644 --- a/main/governance/SECURITY/index.html +++ b/main/governance/SECURITY/index.html @@ -456,6 +456,8 @@ + + @@ -577,6 +579,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -736,8 +758,6 @@ - - @@ -819,26 +839,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1131,6 +1131,8 @@ + + @@ -1184,6 +1186,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/index.html b/main/index.html index 3f75e3faf..fee8fcac4 100644 --- a/main/index.html +++ b/main/index.html @@ -541,6 +541,8 @@ + + @@ -662,6 +664,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -821,8 +843,6 @@ - - @@ -904,26 +924,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1216,6 +1216,8 @@ + + @@ -1269,6 +1271,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + @@ -1925,7 +1947,7 @@

    What is bpfman?

    on setting up your development environment and building bpfman.
  • Example eBPF Programs for some examples of eBPF programs written in Go, interacting with bpfman.
  • -
  • Deploying the bpfman-operator for +
  • Deploying the bpfman-operator for details on launching bpfman in a Kubernetes cluster.
  • Meet the Community for details on community meeting details.
  • diff --git a/main/quick-start/index.html b/main/quick-start/index.html index 98992f459..c70d7e877 100644 --- a/main/quick-start/index.html +++ b/main/quick-start/index.html @@ -516,6 +516,8 @@ + + @@ -637,6 +639,26 @@ +
  • + + + + + Deploying the bpfman-operator + + + + +
  • + + + + + + + + +
  • @@ -796,8 +818,6 @@ - - @@ -879,26 +899,6 @@ -
  • - - - - - Deploying the bpfman-operator - - - - -
  • - - - - - - - - -
  • @@ -1191,6 +1191,8 @@ + + @@ -1244,6 +1246,26 @@ + + + + + +
  • + + + + + bpfman CRDs - Cluster vs Namespace Scoped + + + + +
  • + + + + diff --git a/main/search/search_index.json b/main/search/search_index.json index 29bc95bae..9aec078db 100644 --- a/main/search/search_index.json +++ b/main/search/search_index.json @@ -1 +1 @@ -{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Introduction","text":"

    Formerly know as bpfd

    "},{"location":"#bpfman-an-ebpf-manager","title":"bpfman: An eBPF Manager","text":"

    bpfman operates as an eBPF manager, focusing on simplifying the deployment and administration of eBPF programs. Its notable features encompass:

    • System Overview: Provides insights into how eBPF is utilized in your system.
    • eBPF Program Loader: Includes a built-in program loader that supports program cooperation for XDP and TC programs, as well as deployment of eBPF programs from OCI images.
    • eBPF Filesystem Management: Manages the eBPF filesystem, facilitating the deployment of eBPF applications without requiring additional privileges.

    Our program loader and eBPF filesystem manager ensure the secure deployment of eBPF applications. Furthermore, bpfman includes a Kubernetes operator, extending these capabilities to Kubernetes. This allows users to confidently deploy eBPF through custom resource definitions across nodes in a cluster.

    "},{"location":"#quick-start","title":"Quick Start","text":"

    To get up and running with bpfman go straight to the quick start documentation.

    "},{"location":"#why-ebpf","title":"Why eBPF?","text":"

    eBPF is a powerful general-purpose framework that allows running sandboxed programs in the kernel. It can be used for many purposes, including networking, monitoring, tracing and security.

    "},{"location":"#why-ebpf-in-kubernetes","title":"Why eBPF in Kubernetes?","text":"

    Demand is increasing from both Kubernetes developers and users. Examples of eBPF in Kubernetes include:

    • Cilium and Calico CNIs
    • Pixie: Open source observability
    • KubeArmor: Container-aware runtime security enforcement system
    • Blixt: Gateway API L4 conformance implementation
    • NetObserv: Open source operator for network observability
    "},{"location":"#challenges-for-ebpf-in-kubernetes","title":"Challenges for eBPF in Kubernetes","text":"
    • Requires privileged pods:
      • eBPF-enabled apps require at least CAP_BPF permissions and potentially more depending on the type of program that is being attached.
      • Since the Linux capabilities are very broad it is challenging to constrain a pod to the minimum set of privileges required. This can allow them to do damage (either unintentionally or intentionally).
    • Handling multiple eBPF programs on the same eBPF hooks:
      • Not all eBPF hooks are designed to support multiple programs.
      • Some software using eBPF assumes exclusive use of an eBPF hook and can unintentionally eject existing programs when being attached. This can result in silent failures and non-deterministic failures.
    • Debugging problems with deployments is hard:
      • The cluster administrator may not be aware that eBPF programs are being used in a cluster.
      • It is possible for some eBPF programs to interfere with others in unpredictable ways.
      • SSH access or a privileged pod is necessary to determine the state of eBPF programs on each node in the cluster.
    • Lifecycle management of eBPF programs:
      • While there are libraries for the basic loading and unloading of eBPF programs, a lot of code is often needed around them for lifecycle management.
    • Deployment on Kubernetes is not simple:
      • It is an involved process that requires first writing a daemon that loads your eBPF bytecode and deploying it using a DaemonSet.
      • This requires careful design and intricate knowledge of the eBPF program lifecycle to ensure your program stays loaded and that you can easily tolerate pod restarts and upgrades.
      • In eBPF enabled K8s deployments today, the eBPF Program is often embedded into the userspace binary that loads and interacts with it. This means there's no easy way to have fine-grained versioning control of the bpfProgram in relation to it's accompanying userspace counterpart.
    "},{"location":"#what-is-bpfman","title":"What is bpfman?","text":"

    bpfman is a software stack that aims to make it easy to load, unload, modify and monitor eBPF programs whether on a single host, or in a Kubernetes cluster. bpfman includes the following core components:

    • bpfman: A system daemon that supports loading, unloading, modifying and monitoring of eBPF programs exposed over a gRPC API.
    • eBPF CRDS: bpfman provides a set of CRDs (XdpProgram, TcProgram, etc.) that provide a way to express intent to load eBPF programs as well as a bpfman generated CRD (BpfProgram) used to represent the runtime state of loaded programs.
    • bpfman-agent: The agent runs in a container in the bpfman daemonset and ensures that the requested eBPF programs for a given node are in the desired state.
    • bpfman-operator: An operator, built using Operator SDK, that manages the installation and lifecycle of bpfman-agent and the CRDs in a Kubernetes cluster.

    bpfman is developed in Rust and built on top of Aya, a Rust eBPF library.

    The benefits of this solution include the following:

    • Security:
      • Improved security because only the bpfman daemon, which can be tightly controlled, has the privileges needed to load eBPF programs, while access to the API can be controlled via standard RBAC methods. Within bpfman, only a single thread keeps these capabilities while the other threads (serving RPCs) do not.
      • Gives the administrators control over who can load programs.
      • Allows administrators to define rules for the ordering of networking eBPF programs. (ROADMAP)
    • Visibility/Debuggability:
      • Improved visibility into what eBPF programs are running on a system, which enhances the debuggability for developers, administrators, and customer support.
      • The greatest benefit is achieved when all apps use bpfman, but even if they don't, bpfman can provide visibility into all the eBPF programs loaded on the nodes in a cluster.
    • Multi-program Support:
      • Support for the coexistence of multiple eBPF programs from multiple users.
      • Uses the libxdp multiprog protocol to allow multiple XDP programs on single interface
      • This same protocol is also supported for TC programs to provide a common multi-program user experience across both TC and XDP.
    • Productivity:
      • Simplifies the deployment and lifecycle management of eBPF programs in a Kubernetes cluster.
      • Developers can stop worrying about program lifecycle (loading, attaching, pin management, etc.) and use existing eBPF libraries to interact with their program maps using well defined pin points which are managed by bpfman.
      • Developers can still use Cilium/libbpf/Aya/etc libraries for eBPF development, and load/unload with bpfman.
      • Provides eBPF Bytecode Image Specifications that allows fine-grained separate versioning control for userspace and kernelspace programs. This also allows for signing these container images to verify bytecode ownership.

    For more details, please see the following:

    • bpfman Overview for an overview of bpfman.
    • Quick Start for a quick installation of bpfman without having to download or build the code from source. Good for just getting familiar with bpfman and playing around with it.
    • Deploying Example eBPF Programs On Local Host for some examples of running bpfman on local host and using the CLI to install eBPF programs on the host.
    • Deploying Example eBPF Programs On Kubernetes for some examples of deploying eBPF programs through bpfman in a Kubernetes deployment.
    • Setup and Building bpfman for instructions on setting up your development environment and building bpfman.
    • Example eBPF Programs for some examples of eBPF programs written in Go, interacting with bpfman.
    • Deploying the bpfman-operator for details on launching bpfman in a Kubernetes cluster.
    • Meet the Community for details on community meeting details.

    We are a Cloud Native Computing Foundation sandbox project.

    "},{"location":"quick-start/","title":"Quick Start","text":"

    This section describes how to deploy bpfman quickly from pre-built release artifacts. Users can either deploy it locally via provided RPMs or in a kubernetes cluster via the provided container images and install yamls. See Releases for the complete set of bpfman releases.

    "},{"location":"quick-start/#deploy-released-rpm-from-copr-locally","title":"Deploy Released RPM from COPR Locally","text":"

    This section describes how to install an RPM built automatically by the Packit Service. The Packit Service builds RPMs for each release.

    To install an RPM generated by the Packit Service, the following packages need to be installed:

    dnf based OS:

    sudo dnf install -y dnf-plugins-core\n

    Additionally the bpfman copr repo needs to be enabled:

    sudo dnf copr enable @ebpf-sig/bpfman\n

    To see information about the latest released version of bpfman simply run

    sudo dnf info bpfman\n\nLast metadata expiration check: 0:03:10 ago on Mon 06 May 2024 10:37:37 AM EDT.\nAvailable Packages\nName         : bpfman\nVersion      : 0.4.2\nRelease      : 1.fc39\nArchitecture : src\nSize         : 41 M\nSource       : None\nRepository   : copr:copr.fedorainfracloud.org:group_ebpf-sig:bpfman\nSummary      : An eBPF program manager\nURL          : https://bpfman.io\nLicense      : Apache-2.0\nDescription  : An eBPF Program Manager.\n\nName         : bpfman\nVersion      : 0.4.2\nRelease      : 1.fc39\nArchitecture : x86_64\nSize         : 9.7 M\nSource       : bpfman-0.4.2-1.fc39.src.rpm\nRepository   : copr:copr.fedorainfracloud.org:group_ebpf-sig:bpfman\nSummary      : An eBPF program manager\nURL          : https://bpfman.io\nLicense      : Apache-2.0 AND Unicode-DFS-2016 AND BSD-3-Clause AND ISC AND MIT AND MPL-2.0\nDescription  : An eBPF Program Manager.\n

    Next install, either the latest version with:

    sudo dnf install bpfman \n

    Or install an older version with:

    sudo dnf install bpfman-<RELEASE_VERSION> \n

    bpfman is now installed but not running. To start the bpfman-rpc server process:

    sudo systemctl daemon-reload\nsudo systemctl enable bpfman.socket\nsudo systemctl start bpfman.socket\n

    Finally you can run one of the sample applications:

    sudo bpfman load image --image-url quay.io/bpfd-bytecode/tracepoint:latest tracepoint --tracepoint sched/sched_switch\n\nsudo bpfman list\n Program ID  Name          Type        Load Time                \n 1552        enter_openat  tracepoint  2024-05-06T10:50:57-0400 \n\nsudo bpfman unload 1552\n

    When ready to uninstall, determine the RPM that is currently loaded:

    $ sudo rpm -qa | grep bpfman\nbpfman-0.4.2-1.fc39.x86_64\n

    To stop bpfman and uninstall the RPM:

    sudo systemctl stop bpfman.socket\nsudo systemctl disable bpfman.socket\n\nsudo dnf erase -y bpfman-0.4.2-1.fc39.x86_64\n\nsudo systemctl daemon-reload\n
    "},{"location":"quick-start/#deploy-released-container-images-on-kubernetes","title":"Deploy Released Container Images on Kubernetes","text":"

    The quickest solution for running bpfman in a Kubernetes deployment is to run a local Kubernetes KIND Cluster:

    Note

    OpenShift has tighter security requirements and requires additional settings. When deploying bpfman on OpenShift, use the OperatorHub from the OpenShift console, search for ebpf, and install either the Bpfman Operator by Community or the eBPF Manager Operator by Red Hat. The Bpfman Operator by Community tracks the upstream releases of bpfman. The eBPF Manager Operator by Red Hat is based on bpfman at the time of the corresponding OpenShift release.

    kind create cluster --name=test-bpfman\n

    Next, deploy the bpfman CRDs:

    export BPFMAN_REL=0.5.4\nkubectl apply -f  https://github.com/bpfman/bpfman/releases/download/v${BPFMAN_REL}/bpfman-crds-install.yaml\n

    Next, deploy the bpfman-operator, which will also deploy the bpfman-daemon, which contains bpfman-rpc, bpfman Library and bpfman-agent:

    kubectl apply -f https://github.com/bpfman/bpfman/releases/download/v${BPFMAN_REL}/bpfman-operator-install.yaml\n

    Finally, deploy an example eBPF program:

    kubectl apply -f https://github.com/bpfman/bpfman/releases/download/v${BPFMAN_REL}/go-xdp-counter-install.yaml\n\nkubectl get xdpprograms\nNAME                     BPFFUNCTIONNAME   NODESELECTOR   STATUS\ngo-xdp-counter-example   xdp_stats         {}             ReconcileSuccess\n

    There are other example program install yamls in the artifacts for each Release payload.

    Use the following command to teardown the cluster:

    kind delete cluster -n test-bpfman\n
    "},{"location":"blog/","title":"Bpfman Blog","text":""},{"location":"blog/2023/11/25/a-new-logo-using-generative-ai-of-course/","title":"A New Logo: Using Generative AI, of course","text":"

    Since we renamed the project to bpfman we are in need of a new logo. Given that the tech buzz around Generative AI is infectious, we decided to explore using generative AI to create our new logo. What we found was that it was a great way to generate ideas, but a human (me) was still needed to create the final design.

    "},{"location":"blog/2023/11/25/a-new-logo-using-generative-ai-of-course/#the-brief","title":"The Brief","text":"

    I have a love of open source projects with animal mascots, so bpfman should be no different. The \"bee\" is used a lot for eBPF related projects. One such example is Crabby, the crab/bee hybrid, that I created for the Aya project.

    The logo should be cute and playful, but not too childish. As a nod to Podman, we'd like to use the same typeface and split color-scheme as they do, replacing purple with yellow.

    One bee is not enough! Since we're an eBPF manager, we need a more bees!

    via GIPHY

    And since those bees are bee-ing (sorry) managed, they should be organized. Maybe in a pyramid shape?

    "},{"location":"blog/2023/11/25/a-new-logo-using-generative-ai-of-course/#the-process","title":"The Process","text":"

    We used Bing Image Creator, which is backed by DALL-E 3.

    Initially we tried to use the following prompt:

    Logo for open source software project called \"bpfman\". \"bpf\" should be yellow and \"man\" should be black or grey. an illustration of some organized bees above the text. cute. playful

    Our AI overlords came up with:

    Not bad, but not quite what we were looking for. It's clear that as smart as AI is, it struggles with text, so whatever we need will need some manual post-processing. There are bees, if you squint a bit, but they're not very organized. Let's refine our prompt a bit:

    Logo for open source software project called \"bpfman\" as one word. The \"bpf\" should be yellow and \"man\" should be black or grey. an illustration of some organized bees above the text. cute. playful.

    That... is worse.

    Let's try again:

    Logo for a project called \"bpfman\". In the text \"bpfman\", \"bpf\" should be yellow and \"man\" should be black or grey. add an illustration of some organized bees above the text. cute and playful style.

    The bottom left one is pretty good! So I shared it with the rest of the maintainers to see what they thought.

    At this point the feedback that I got was the bees were too cute! We're a manager, and managers are serious business, so we need serious bees.

    Prompting the AI for the whole logo was far too ambitious, so I decided I would just use the AI to generate the bees and then I would add the text myself.

    I tried a few different prompts, but the one that worked best was:

    3 bees guarding a hive. stern expressions. simple vector style.

    The bottom right was exactly what I had in mind! With a little bit of post-processing, I ended up with this:

    Now it was time to solicit some feedback.

    "},{"location":"blog/2023/11/25/a-new-logo-using-generative-ai-of-course/#gathering-feedback","title":"Gathering Feedback","text":"

    After showing the logo to a few others, we decided that the bees were infact too stern. At this point we had a few options, like reverting back to our cute bees, however, this section in the [Bing Image Creator Terms of Service] was pointed out to me:

    Use of Creations. Subject to your compliance with this Agreement, the Microsoft Services Agreement, and our Content Policy, you may use Creations outside of the Online Services for any legal personal, non-commercial purpose.

    This means that we can't use the AI generated images for our logo.

    "},{"location":"blog/2023/11/25/a-new-logo-using-generative-ai-of-course/#was-it-all-for-nothing","title":"Was it all for nothing?","text":"

    Was it all for nothing? No! We learnt a lot from this process.

    Generative AI is great for generating ideas. Some of the logo compositions produced were great!

    It was also very useful to adjust the prompt based on feedback from team members so we could incorporate their ideas into the design.

    We also learnt that the AI is not great at text, so we should avoid using it for that.

    And finally, we learnt that we can't use the AI generated images for our logo. Well, not with the generator we used anyway.

    "},{"location":"blog/2023/11/25/a-new-logo-using-generative-ai-of-course/#the-semi-final-design-process","title":"The (Semi) Final Design Process","text":"

    I started from scratch, taking inspiration from the AI generated images. The bees were drawn first and composed around a hive - as our AI overlords suggested. I then added the text, and colours, but it still felt like it was missing something.

    What if we added a force field around the hive? That might be cool! And so, I added a force field around the hive and played around with the colours until I was happy.

    Here's what we ended up with:

    We consulted a few more people and got some feedback. The general consensus was that the logo was too busy... However, the reception to the force field was that the favicon I'd mocked would work better as the logo.

    "},{"location":"blog/2023/11/25/a-new-logo-using-generative-ai-of-course/#the-final-design","title":"The Final Design","text":"

    Here's the final design:

    Pretty cool, right? Even if I do say so myself.

    Our mascot is a queen bee, because she's the manager of the hive.

    The force field, is now no longer a force field - It's a pheramone cloud that represents the Queen Mandibular Pheromone (QMP) that the queen bee produces to keep the hive organized.

    "},{"location":"blog/2023/11/25/a-new-logo-using-generative-ai-of-course/#conclusion","title":"Conclusion","text":"

    I'm really happy with the result! I'm not a designer, so I'm sure there are things that could be improved, but I think it's a good start.

    What do you think? Join us on Slack and let us know!

    "},{"location":"blog/2024/02/27/bpfmans-integration-with-the-af_xdp-device-plugin-and-cni-for-kubernetes/","title":"bpfman's Integration with the AF_XDP Device Plugin and CNI for Kubernetes","text":"

    AF_XDP is an address/socket family that is optimized for high performance packet processing. It takes advantage of XDP (an in Kernel fastpath), which essentially runs an eBPF program as early as possible on a network driver's receive path, and redirects the packet to an AF_XDP socket.

    AF_XDP sockets (XSKs) are created in Userspace and have a 1:1 mapping with netdev queues. An XSKMAP is an eBPF map of AF_XDP sockets for a particular netdev. It's a simple key:value map where the key is the netdev's queue-id and the value is the AF_XDP socket that's attached to that queue. The eBPF program (at the XDP hook) will leverage the XSKMAP and the XDP_REDIRECT action to redirect packets to an AF_XDP socket. In the image below the XDP program is redirecting an incoming packet to the XSK attached to Queue 2.

    NOTE: If no XSK is attached to a queue, the XDP program will simply pass the packet to the Kernel Network Stack.

    +---------------------------------------------------+\n|     XSK A      |     XSK B       |      XSK C     |<---+  Userspace\n=========================================================|==========\n|    Queue 0     |     Queue 1     |     Queue 2    |    |  Kernel space\n+---------------------------------------------------+    |\n|                  Netdev eth0                      |    |\n+---------------------------------------------------+    |\n|                            +=============+        |    |\n|                            | key |  xsk  |        |    |\n|  +---------+               +=============+        |    |\n|  |         |               |  0  | xsk A |        |    |\n|  |         |               +-------------+        |    |\n|  |         |               |  1  | xsk B |        |    |\n|  | BPF     |               +-------------+        |    |\n|  | prog    |-- redirect -->|  2  | xsk C |-------------+\n|  | (XDP    |               +-------------+        |\n|  |  HOOK)  |                   xskmap             |\n|  |         |                                      |\n|  +---------+                                      |\n|                                                   |\n+---------------------------------------------------+\n

    The AF_XDP Device Plugin and CNI project provides the Kubernetes components to provision, advertise and manage AF_XDP networking devices for Kubernetes pods. These networking devices are typically used as a Secondary networking interface for a pod. A key goal of this project is to enable pods to run without any special privileges, without it pods that wish to use AF_XDP will need to run with elevated privileges in order to manage the eBPF program on the interface. The infrastructure will have little to no control over what these pods can load. Therefore it's ideal to leverage a central/infrastructure centric eBPF program management approach. This blog will discuss the eBPF program management journey for the AF_XDP Device Plugin and CNI.

    "},{"location":"blog/2024/02/27/bpfmans-integration-with-the-af_xdp-device-plugin-and-cni-for-kubernetes/#what-does-the-af_xdp-device-plugin-and-cni-do","title":"What does the AF_XDP Device Plugin and CNI do?","text":"

    For pods to create and use AF_XDP sockets on their interfaces, they can either:

    1. Create the AF_XDP socket on an interface already plumbed to the Pod (via SR-IOV Device Plugin and the Host CNI) --> But this requires CAP_BPF or CAP_SYS_ADMIN privileges in order to load the BPF program on the netdev.

    OR

    1. Use the AF_XDP Device Plugin (DP) and CNI in order to support a Pod without the aforementioned root like privileges.

      NOTE: Prior to kernel 5.19, all BPF sys calls required CAP_BPF, which are used to access maps shared between the BPF program and the userspace program. In kernel 5.19, a change went in that only requires CAP_BPF for map creation (BPF_MAP_CREATE) and loading programs (BPF_PROG_LOAD).

      In this scenario, the AF_XDP DP, will advertise resource pools (of netdevs) to Kubelet. When a Pod requests a resource from these pools, Kubelet will Allocate() one of these devices through the AF_XDP DP. The AF_XDP DP will load the eBPF program (to redirect packets to an AF_XDP socket) on the allocated device.

      The default behaviour of the AF_XDP DP (unless otherwise configured) is to take note of the XSKMAP File Descriptor (FD) for that netdev. It will also mount a Unix Domain Socket (UDS), as a hostpath mount, in the Pod. This UDS will be used by the AF_XDP application to perform a handshake with the AF_XDP DP to retrieve the XSKMAP FD. The application needs the XSKMAP FD to \"attach\" AF_XDP sockets it creates to the netdev queues.

      NOTE: Newer versions of the AF_XDP DP support eBPF map pinning which eliminate the need to perform this (non trivial) handshake with AF_XDP pods. It now mounts the pinned XSKMAP into the Pod using a hostpath mount. The downside of this approach is that the AF_XDP DP now needs to manage several eBPF File Systems (BPFFS), one per pod.

      The AF_XDP CNI (like any CNI) has the task of moving the netdev (with the loaded eBPF program) into the Pod namespace. It also does a few other important things:

      • It does not rename the netdev (to allow the DP to avoid IF_INDEX clashes as it manages the AF_XDP resource pools).
      • The CNI is also capable of configuring hardware filters on the NIC.
      • Finally, the CNI also unloads the eBPF program from the netdev and clear any hardware filters when the Pod is terminated.

      NOTE 1: The AF_XDP CNI manages the unloading of the eBPF program due to the AF_XDP DP not being aware of when a pod terminates (it's only invoked by Kubelet during pod creation).

      NOTE 2: Prior to bpfman integration, the CNI was extended to signal the AF_XDP DP on pod termination (via gRPC) in an effort to support eBPF map pinning directly in the AF_XDP DP. The AF_XDP DP was managing BPFFS(es) for map pinning and needed to be signalled to clean them up.

    "},{"location":"blog/2024/02/27/bpfmans-integration-with-the-af_xdp-device-plugin-and-cni-for-kubernetes/#bpfman-integration","title":"bpfman Integration","text":"

    Prior to bpfman integration the AF_XDP Device Plugin and CNI managed the eBPF program for redirecting incoming packets to AF_XDP sockets, its associated map (XSKMAP), and/or several BPFFS.

    "},{"location":"blog/2024/02/27/bpfmans-integration-with-the-af_xdp-device-plugin-and-cni-for-kubernetes/#integration-benefits","title":"Integration benefits","text":"

    So what are the benefits of bpfman integration for the AF_XDP DP and CNI?

    • Removes code for loading and managing eBPF from the AF_XDP DP and CNI codebase.

    • This presented a difficulty particularly when trying to find/update appropriate base container images to use for the AF_XDP device plugin. Different images supported different versions of eBPF management libraries (i.e libbpf or libxdp) which forced multiple changes around the loading and attaching of the base eBPF program.

    • Additionally the CNI runs as a binary on the Kubernetes node so we would need to statically compile libbpf/libxdp as part of the CNI.

    • More diverse XDP program support through bpfman's eBPF Bytecode Image Specification. Not only do the AF_XDP eBPF programs no longer need to be stored in the Device Plugin itself, but it's now configurable on a per pool basis.

    • No longer required to leverage Hostpath volume mounts to mount the AF_XDP maps inside a Pod. But rather take advantage of the bpfman CSI support to ensure that maps are pinned in the context of the Pod itself and not in a BPFFS on the host (then shared to the Pod).

    "},{"location":"blog/2024/02/27/bpfmans-integration-with-the-af_xdp-device-plugin-and-cni-for-kubernetes/#af_xdp-device-plugin-ebpf-programmap-management","title":"AF_XDP Device Plugin eBPF program/map management","text":"

    The role of the AF_XDP DP in eBPF program/map management prior to bpfman integration:

    • Loads the default AF_XDP BPF prog onto the netdev at Pod creation and manages info regarding the XSKMAP for that netdev.

    • Mounts a UDS as a hostpath volume in the Pod OR creates a BPFFS per netdev and pins the XSKMAP to it, then mounts this BPFFS as a hostpath volume in the Pod.

    • Shares the XSKMAP file descriptor via UDS (involves a handshake with the Pod).

    The role of the AF_XDP DP in eBPF program/map management after bpfman integration:

    • Uses bpfman's client APIs to load the BPF prog.

    • Shares the XSKMAP (that bpfman pinned ) with the Pod as a hostpath volume.

    "},{"location":"blog/2024/02/27/bpfmans-integration-with-the-af_xdp-device-plugin-and-cni-for-kubernetes/#af_xdp-cni-ebpf-programmap-management","title":"AF_XDP CNI eBPF program/map management","text":"

    The role of the AF_XDP CNI in eBPF program/map management prior to bpfman integration:

    • Unloads the eBPF program when a device is returned to the Host network namespace.

    The role of the AF_XDP CNI in eBPF program/map management after bpfman integration:

    • Uses gRPC to signal to the Device Plugin to request bpfman to unload the eBPF program using the client APIs.
    "},{"location":"blog/2024/02/27/bpfmans-integration-with-the-af_xdp-device-plugin-and-cni-for-kubernetes/#is-there-a-working-example","title":"Is there a working example?","text":"

    The bpfman integration with the AF_XDP Device Plugin and CNI was demo'ed as part of a series of demos that show the migration of a DPDK application to AF_XDP (without) any application modification. The demo can be watched below:

    "},{"location":"blog/2024/02/27/bpfmans-integration-with-the-af_xdp-device-plugin-and-cni-for-kubernetes/#af_xdp-dp-and-cnis-integration-with-bpfman-in-images","title":"AF_XDP DP and CNI's integration with bpfman in images","text":"

    The following sections will present the evolution of the AF_XDP DP and CNI from independent eBPF program management to leveraging bpfman to manage eBPF programs on their behalf.

    "},{"location":"blog/2024/02/27/bpfmans-integration-with-the-af_xdp-device-plugin-and-cni-for-kubernetes/#af_xdp-dp-and-cni-managing-ebpf-programs-independently","title":"AF_XDP DP and CNI managing eBPF programs independently","text":"

    The following diagram details how the AF_XDP DP and CNI worked prior to bpfman integration.

    1. Setup Subfunctions on the network devices (if the are supported/being used).

    2. Create an AF_XDP DP and CNI configuration file to setup the device resource pools and deploy the DP and CNI.

    3. When the AF_XDP DP runs it will discover the netdevs on the host and create the resource pools.

    4. The AF_XDP DP registers the resource pools with Kubelet.

    5. When a pod (that requests an AF_XDP resource) is started, Kubelet will send an Allocate() request to the AF_XDP DP. The AF_XDP DP loads the eBPF program on the interface and mounts the UDS in the pod and sets some environment variables in the pod using the Downward API.

    NOTE: In the case where eBPF map pinning is used rather than the UDS, the AF_XDP DP will create a BPFFS where it pins the XSKMAP and mounts the BPFFS as a hostpath volume in the pod.

    1. The AF_XDP DP signals success to the Kubelet so that the device is added to the pod.

    2. Kubelet triggers multus, which in turn triggers the AF_XDP CNI. The CNI does the relevant network configuration and moves the netdev into the pod network namespace.

    3. The application in the pod start and initiates a handshake with the AF_XDP DP over the mounted UDS to retrieve the XSKMAP FD.

    "},{"location":"blog/2024/02/27/bpfmans-integration-with-the-af_xdp-device-plugin-and-cni-for-kubernetes/#af_xdp-dp-and-cni-integrated-with-bpfman-no-csi","title":"AF_XDP DP and CNI integrated with bpfman (no csi)","text":"

    The following diagram details how the AF_XDP DP and CNI worked after bpfman integration.

    The main difference here is that when the Allocate() request comes in from Kubelet, the AF_XDP DP uses the bpfman client API to load the eBPF program on the relevant netdev. It takes note of where bpfman pins the XSKMAP and mounts this directory as a hostpath volume in the pod.

    "},{"location":"blog/2024/02/27/bpfmans-integration-with-the-af_xdp-device-plugin-and-cni-for-kubernetes/#af_xdp-dp-and-cni-integrated-with-bpfman-with-csi","title":"AF_XDP DP and CNI integrated with bpfman (with csi)","text":"

    The following diagram details how the AF_XDP DP and CNI will work with bpfman leveraging the new CSI implementation.

    The pod will include a volume definition as follows:

       volumes:\n   - name: bpf-maps\n     csi:\n       driver: csi.bpfman.dev\n       volumeAttributes:\n         csi.bpfman.dev/thru-annotations: true\n

    The idea here is when the Allocate() request comes in from Kubelet, the AF_XDP DP uses the bpfman client API to load the eBPF program on the relevant netdev. The AF_XDP DP will annotate the pod with the XdpProgram name, map and mountpath. When the bpfman CSI plugin is triggered by Kubelet, it will retrieve the information it needs from the pod annotations in order to pin the map inside the Pod.

    "},{"location":"blog/2023/11/23/bpfd-becomes-bpfman/","title":"bpfd becomes bpfman","text":"

    Bpfd is now bpfman! We've renamed the project to better reflect the direction we're taking. We're still the same project, just with a new name.

    "},{"location":"blog/2023/11/23/bpfd-becomes-bpfman/#why-the-name-change","title":"Why the name change?","text":"

    We've been using the name bpfd for a while now, but we were not the first to use it. There were projects before us that used the name bpfd, but since most were inactive, originally we didn't see this as an issue.

    More recently though the folks at Meta have started using the name systemd-bpfd for their proposed addition to systemd.

    In addition, we've been thinking about the future of the project, and particularly about security and whether it's wise to keep something with CAP_BPF capabilities running as a daemon - even if we've been very careful. This is similar to the issues faced by docker which eventually lead to the creation of podman.

    This issue led us down the path of redesigning the project to be daemonless. We'll be implementing these changes in the coming months and plan to perform our first release as bpfman in Q1 of 2024.

    The 'd' in bpfd stood for daemon, so with our new design and the confusion surrounding the name bpfd we though it was time for a change.

    Since we're a BPF manager, we're now bpfman! It's also a nice homage to podman, which we're big fans of.

    "},{"location":"blog/2023/11/23/bpfd-becomes-bpfman/#what-does-this-mean-for-me","title":"What does this mean for me?","text":"

    If you're a developer of bpfman you will need to update your Git remotes to point at our new organization and repository name. Github will redirect these for a while, but we recommend updating your remotes as soon as possible.

    If you're a user of bpfd or the bpfd-operator then version 0.3.1 will be the last release under the bpfd name. We will continue to support you as best we can, but we recommend upgrading to bpfman as soon as our first release is available.

    "},{"location":"blog/2023/11/23/bpfd-becomes-bpfman/#whats-next","title":"What's next?","text":"

    We've hinted at some of the changes we're planning, and of course, our roadmap is always available in Github. It's worth mentioning that we're also planning to expand our release packages to include RPMs and DEBs, making it even easier to install bpfman on your favorite Linux distribution.

    "},{"location":"blog/2023/11/23/bpfd-becomes-bpfman/#thanks","title":"Thanks!","text":"

    We'd like to thank everyone who has contributed to bpfd over the years. We're excited about the future of bpfman and we hope you are too! Please bear with us as we make this transition, and if you have any questions or concerns, please reach out to us on Slack. We're in the '#bpfd' channel, but we'll be changing that to '#bpfman' soon.

    "},{"location":"blog/2024/02/26/technical-challenges-for-attaching-ebpf-programs-in-containers/","title":"Technical Challenges for Attaching eBPF Programs in Containers","text":"

    We recently added support for attaching uprobes inside containers. The purpose of this blog is to give a brief overview of the feature, to document the technical challenges encountered, and describe our solutions for those challenges. In particular, how to attach an eBPF program inside of a container, and how to find the host Process ID (PID) on the node for the container?

    The solutions seem relatively straightforward now that they are done, but we found limited information elsewhere, so we thought it would be helpful to document them here.

    The uprobe implementation will be used as the example in this blog, but the concepts can (and will eventually) be applied to other program types.

    "},{"location":"blog/2024/02/26/technical-challenges-for-attaching-ebpf-programs-in-containers/#introduction","title":"Introduction","text":"

    A \"uprobe\" (user probe) is a type of eBPF program that can be attached to a specific location in a user-space application. This allows developers and system administrators to dynamically instrument a user-space binary to inspect its behavior, measure performance, or debug issues without modifying the application's source code or binary. When the program execution reaches the location to which the uprobe is attached, the eBPF program associated with the uprobe is executed.

    bpfman support for uprobes has existed for some time. We recently extended this support to allow users to attach uprobes inside of containers both in the general case of a container running on a Linux server and also for containers running in a Kubernetes cluster.

    The following is a bpfman command line example for loading a uprobe inside a container:

    bpfman load image --image-url quay.io/bpfman-bytecode/uprobe:latest uprobe --fn-name \"malloc\" --target \"libc\" --container-pid 102745\n

    The above command instructs bpfman to attach a uprobe to the malloc function in the libc library for the container with PID 102745. The main addition here is the ability to specify a container-pid, which is the PID of the container as it is known to the host server.

    The term \"target\" as used in the above bpfman command (and the CRD below) describes the library or executable that we want to attach the uprobe to. The fn-name (the name of the function within that target) and/or an explicit \"offset\" can be used to identify a specific offset from the beginning of the target. We also use the term \"target\" more generally to describe the intended location of the uprobe.

    For Kubernetes, the CRD has been extended to include a \"container selector\" to describe one or more containers as shown in the following example.

    apiVersion: bpfman.io/v1alpha1\nkind: UprobeProgram\nmetadata:\n  labels:\n    app.kubernetes.io/name: uprobeprogram\n  name: uprobe-example-containers\nspec:\n  # Select all nodes\n  nodeselector: {}\n  bpffunctionname: my_uprobe\n  func_name: malloc\n  # offset: 0 # optional offset w/in function\n  target: libc\n  retprobe: false\n  # pid: 0 # optional pid to execute uprobe for\n  bytecode:\n    image:\n      url: quay.io/bpfman-bytecode/uprobe:latest\n  containers:      <=== New section for specifying containers to attach uprobe to\n    namespace: bpfman\n    pods:\n      matchLabels:\n        name: bpfman-daemon\n    containernames:\n      - bpfman\n      - bpfman-agent\n

    In the Kubernetes case, the container selector (containers) is used to identify one or more containers in which to attach the uprobe. If containers identifies any containers on a given node, the bpfman agent on that node will determine their host PIDs and make the calls to bpfman to attach the uprobes.

    "},{"location":"blog/2024/02/26/technical-challenges-for-attaching-ebpf-programs-in-containers/#attaching-uprobes-in-containers","title":"Attaching uprobes in containers","text":"

    A Linux \"mount namespace\" is a feature that isolates the mount points seen by a group of processes. This means that processes in different mount namespaces can have different views of the filesystem. A container typically has its own mount namespace that is isolated both from those of other containers and its parent. Because of this, files that are visible in one container are likely not visible to other containers or even to the parent host (at least not directly). To attach a uprobe to a file in a container, we need to have access to that container's mount namespace so we can see the file to which the uprobe needs to be attached.

    From a high level, attaching a uprobe to an executable or library in a container is relatively straight forward. bpfman needs to change to the mount namespace of the container, attach the uprobe to the target in that container, and then return to our own mount namespace so that we can save the needed state and continue processing other requests.

    The main challenges are:

    1. Changing to the mount namespace of the target container.
    2. Returning to the bpfman mount namespace.
    3. setns (at least for the mount namespace) can't be called from a multi-threaded application, and bpfman is currently multithreaded.
    4. How to find the right PID for the target container.
    "},{"location":"blog/2024/02/26/technical-challenges-for-attaching-ebpf-programs-in-containers/#the-mount-namespace","title":"The Mount Namespace","text":"

    To enter the container namespace, bpfman uses the sched::setns function from the Rust nix crate. The setns function requires the file descriptor for the mount namespace of the target container.

    For a given container PID, the namespace file needed by the setns function can be found in the /proc/<PID>/ns/ directory. An example listing for the PID 102745 directory is shown below:

    sudo ls -l /proc/102745/ns/\ntotal 0\nlrwxrwxrwx 1 root root 0 Feb 15 12:10 cgroup -> 'cgroup:[4026531835]'\nlrwxrwxrwx 1 root root 0 Feb 15 12:10 ipc -> 'ipc:[4026532858]'\nlrwxrwxrwx 1 root root 0 Feb 15 12:10 mnt -> 'mnt:[4026532856]'\nlrwxrwxrwx 1 root root 0 Feb 15 12:07 net -> 'net:[4026532860]'\nlrwxrwxrwx 1 root root 0 Feb 15 12:10 pid -> 'pid:[4026532859]'\nlrwxrwxrwx 1 root root 0 Feb 15 12:10 pid_for_children -> 'pid:[4026532859]'\nlrwxrwxrwx 1 root root 0 Feb 15 12:10 time -> 'time:[4026531834]'\nlrwxrwxrwx 1 root root 0 Feb 15 12:10 time_for_children -> 'time:[4026531834]'\nlrwxrwxrwx 1 root root 0 Feb 15 12:10 user -> 'user:[4026531837]'\nlrwxrwxrwx 1 root root 0 Feb 15 12:10 uts -> 'uts:[4026532857]'\n

    In this case, the mount namespace file is /proc/102745/ns/mnt.

    NOTE: How to find the PID and the relationship between parent and child PIDs is described in the \"Finding The PID\" section below.

    When running directly on a Linux server, bpfman has access to the host /proc directory and can access the mount namespace file for any PID. However, on Kubernetes, bpfman runs in a container, so it doesn't have access to the namespace files of other containers or the /proc directory of the host by default. Therefore, in the Kubernetes implementation, /proc is mounted in the bpfman container so it has access to the ns directories of other containers.

    "},{"location":"blog/2024/02/26/technical-challenges-for-attaching-ebpf-programs-in-containers/#returning-to-the-bpfman-mount-namespace","title":"Returning to the bpfman Mount Namespace","text":"

    After bpfman does a setns to the target container mount namespace, it has access to the target binary in that container. However, it only has access to that container's view of the filesystem, and in most cases, this does not include access to bpfman's filesystem or the host filesystem. As a result, bpfman loses the ability to access its own mount namespace file.

    However, before calling setns, bpfman has access to it's own mount namespace file. Therefore, to avoid getting stranded in a different mount namespace, bpfman also opens its own mount namespace file prior to calling setns so it already has the file descriptor that will allow it to call setns to return to its own mount namespace.

    "},{"location":"blog/2024/02/26/technical-challenges-for-attaching-ebpf-programs-in-containers/#running-setns-from-a-multi-threaded-process","title":"Running setns From a Multi-threaded Process","text":"

    Calling setns to a mount namespace doesn't work from a multi-threaded process.

    To work around this issue, the logic was moved to a standalone single-threaded executable called bpfman-ns that does the job of entering the namespace, attaching the uprobe, and then returning to the bpfman namespace to save the needed info.

    "},{"location":"blog/2024/02/26/technical-challenges-for-attaching-ebpf-programs-in-containers/#finding-the-pid","title":"Finding the PID","text":""},{"location":"blog/2024/02/26/technical-challenges-for-attaching-ebpf-programs-in-containers/#finding-a-host-container-pid-on-a-linux-server","title":"Finding a Host Container PID on a Linux Server","text":"

    This section provides an overview of PID namespaces and shows several ways to find the host PID for a container.

    "},{"location":"blog/2024/02/26/technical-challenges-for-attaching-ebpf-programs-in-containers/#tldr","title":"tl;dr","text":"

    If you used Podman or Docker to run your container, and you gave the container a unique name, the following commands can be used to find the host PID of a container.

    podman inspect -f '{{.State.Pid}}' <CONTAINER_NAME>\n

    or, similarly,

    docker inspect -f '{{.State.Pid}}'  <CONTAINER_NAME>\n
    "},{"location":"blog/2024/02/26/technical-challenges-for-attaching-ebpf-programs-in-containers/#overview-of-pid-namespaces-and-container-host-pids","title":"Overview of PID namespaces and Container Host PIDs","text":"

    Each container has a PID namespace. Each PID namespace (other than the root) is contained within a parent PID namespace. In general, this relationship is hierarchical and PID namespaces can be nested within other PID namespaces. In this section, we will just cover the case of a root PID namepsace on a Linux server that has containers with PID namespaces that are direct children of the root. The multi-level case is described in the section on Nested Containers with kind below.

    The PID namespaces can be listed using the lsns -t pid command. Before we start any containers, we just have the one root pid namespace as shown below.

    sudo lsns -t pid\n        NS TYPE NPROCS PID USER COMMAND\n4026531836 pid     325   1 root /usr/lib/systemd/systemd rhgb --switched-root --system --deserialize 30\n

    Now lets start a container with the following command in a new shell:

    podman run -it --name=container_1 fedora:latest /bin/bash\n

    NOTE: In this section, we are using podman to run containers. However, all of the same commands can also be used with docker.

    Now back on the host we have:

    sudo lsns -t pid\n        NS TYPE NPROCS    PID USER      COMMAND\n4026531836 pid     337      1 root      /usr/lib/systemd/systemd rhgb --switched-root --system --deserialize 30\n4026532948 pid       1 150342 user_abcd /bin/bash\n

    We can see that the host PID for the container we just started is 150342.

    Now let's start another container in a new shell with the same command (except with a different name), and run the lsns command again on the host.

    podman run -it --name=container_2 fedora:latest /bin/bash\n

    On the host:

    sudo lsns -t pid\n        NS TYPE NPROCS    PID USER      COMMAND\n4026531836 pid     339      1 root      /usr/lib/systemd/systemd rhgb --switched-root --system --deserialize 30\n4026532948 pid       1 150342 user_abcd /bin/bash\n4026533041 pid       1 150545 user_abcd /bin/bash\n

    We now have 3 pid namespaces -- one for root and two for the containers. Since we already know that the first container had PID 150342 we can conclude that the second container has PID 150545. However, what would we do if we didn't already know the PID for one of the containers?

    If the container we were interested in was running a unique command, we could use that to disambiguate. However, in this case, both are running the same /bin/bash command.

    If something unique is running inside of the container, we can use the ps -e -o pidns,pid,args command to get some info.

    For example, run sleep 1111 in container_1, then

    sudo ps -e -o pidns,pid,args | grep 'sleep 1111'\n4026532948  150778 sleep 1111\n4026531836  151002 grep --color=auto sleep 1111\n

    This tells us that the sleep 1111 command is running in PID namespace 4026532948. And,

    sudo lsns -t pid | grep 4026532948\n4026532948 pid       2 150342 user_abcd /bin/bash\n

    Tells us that the container's host PID is 150342.

    Alternatively, we could run lsns inside of container_1.

    dnf install -y util-linux\nlsns -t pid\n        NS TYPE NPROCS PID USER COMMAND\n4026532948 pid       2   1 root /bin/bash\n

    This tells us a few interesting things.

    1. Inside the container, the PID is 1,
    2. We can't see any of the other PID namespaces inside the container.
    3. The container PID namespace is 4026532948.

    With the container PID namespace, we can run the lsns -t pid | grep 4026532948 command as we did above to find the container's host PID

    Finally, the container runtime knows the pid mapping. As mentioned at the beginning of this section, if the unique name of the container is known, the following command can be used to get the host PID.

    podman inspect -f '{{.State.Pid}}' container_1\n150342\n
    "},{"location":"blog/2024/02/26/technical-challenges-for-attaching-ebpf-programs-in-containers/#how-bpfman-agent-finds-the-pid-on-kubernetes","title":"How bpfman Agent Finds the PID on Kubernetes","text":"

    When running on Kubernetes, the \"containers\" field in the UprobeProgram CRD can be used to identify one or more containers using the following information:

    • Namespace
    • Pod Label
    • Container Name

    If the container selector matches any containers on a given node, the bpfman-agent determines the host PID for those containers and then calls bpfman to attach the uprobe in the container with the given PID.

    From what we can tell, there is no way to find the host PID for a container running in a Kubernetes pod from the Kubernetes interface. However, the container runtime does know this mapping.

    The bpfman-agent implementation uses multiple steps to find the set of PIDs on a given node (if any) for the containers that are identified by the container selector.

    1. It uses the Kubernetes interface to get a list of pods on the local node that match the container selector.
    2. It uses use crictl with the names of the pods found to get the pod IDs
    3. It uses crictl with the pod ID to find the containers in those pods and then checks whether any match the container selector.
    4. Finally, it uses crictl with the pod IDs found to get the host PIDs for the containers.

    As an example, the bpfman.io_v1alpha1_uprobe_uprobeprogram_containers.yaml file can be used with the kubectl apply -f command to install uprobes on two of the containers in the bpfman-agent pod. The bpfman code does this programmatically, but we will step through the process of finding the host PIDs for the two containers here using cli commands to demonstrate how it works.

    We will use a kind deployment with bpfman for this demo. See Deploy Locally via KIND for instructions on how to get this running.

    The container selector in the above yaml file is the following.

      containers:\n    namespace: bpfman\n    pods:\n      matchLabels:\n        name: bpfman-daemon\n    containernames:\n      - bpfman\n      - bpfman-agent\n

    bpfman accesses the Kubernetes API and uses crictl from the bpfman-agent container. However, the bpfman-agent container doesn't have a shell by default, so we will run the examples from the bpfman-deployment-control-plane node, which will yield the same results. bpfman-deployment-control-plane is a docker container in our kind cluster, so enter the container.

    docker exec -it c84cae77f800 /bin/bash\n
    Install crictl.

    apt update\napt install wget\nVERSION=\"v1.28.0\"\nwget https://github.com/kubernetes-sigs/cri-tools/releases/download/$VERSION/crictl-$VERSION-linux-amd64.tar.gz\ntar zxvf crictl-$VERSION-linux-amd64.tar.gz -C /usr/local/bin\nrm -f crictl-$VERSION-linux-amd64.tar.gz\n

    First use kubectl to get the list of pods that match our container selector.

    kubectl get pods -n bpfman -l name=bpfman-daemon\nNAME                  READY   STATUS    RESTARTS   AGE\nbpfman-daemon-cv9fm   3/3     Running   0          6m54s\n

    NOTE: The bpfman code also filters on the local node, but we only have one node in this deployment, so we'll ignore that here.

    Now, use crictl with the name of the pod found to get the pod ID.

    crictl pods --name bpfman-daemon-cv9fm\nPOD ID              CREATED             STATE               NAME                  NAMESPACE           ATTEMPT             RUNTIME\ne359900d3eca5       46 minutes ago      Ready               bpfman-daemon-cv9fm   bpfman              0                   (default)\n

    Now, use the pod ID to get the list of containers in the pod.

    crictl ps --pod e359900d3eca5\nCONTAINER           IMAGE               CREATED             STATE               NAME                    ATTEMPT             POD ID              POD\n5eb3b4e5b45f8       50013f94a28d1       48 minutes ago      Running             node-driver-registrar   0                   e359900d3eca5       bpfman-daemon-cv9fm\n629172270a384       e507ecf33b1f8       48 minutes ago      Running             bpfman-agent            0                   e359900d3eca5       bpfman-daemon-cv9fm\n6d2420b80ddf0       86a517196f329       48 minutes ago      Running             bpfman                  0                   e359900d3eca5       bpfman-daemon-cv9fm\n

    Now use the container IDs for the containers identified in the container selector to get the PIDs of the containers.

    # Get PIDs for bpfman-agent container\ncrictl inspect 629172270a384 | grep pid\n    \"pid\": 2158,\n            \"pid\": 1\n            \"type\": \"pid\"\n\n# Get PIDs for bpfman container\ncrictl inspect 6d2420b80ddf0 | grep pid\n    \"pid\": 2108,\n            \"pid\": 1\n            \"type\": \"pid\"\n

    From the above output, we can tell that the host PID for the bpfman-agent container is 2158, and the host PID for the bpfman container is 2108. So, now bpfman-agent would have the information needed to call bpfman with a request to install a uprobe in the containers.

    "},{"location":"blog/2024/02/26/technical-challenges-for-attaching-ebpf-programs-in-containers/#nested-containers-with-kind","title":"Nested Containers with kind","text":"

    kind is a tool for running local Kubernetes clusters using Docker container \u201cnodes\u201d. The kind cluster we used for the previous section had a single node.

    $ kubectl get nodes\nNAME                              STATUS   ROLES           AGE   VERSION\nbpfman-deployment-control-plane   Ready    control-plane   24h   v1.27.3\n

    We can see the container for that node on the base server from Docker as follows.

    docker ps\nCONTAINER ID   IMAGE                  COMMAND                  CREATED        STATUS        PORTS                       NAMES\nc84cae77f800   kindest/node:v1.27.3   \"/usr/local/bin/entr\u2026\"   25 hours ago   Up 25 hours   127.0.0.1:36795->6443/tcp   bpfman-deployment-control-plane\n

    Our cluster has a number of pods as shown below.

    kubectl get pods -A\nNAMESPACE            NAME                                                      READY   STATUS    RESTARTS   AGE\nbpfman               bpfman-daemon-cv9fm                                       3/3     Running   0          24h\nbpfman               bpfman-operator-7f67bc7c57-bpw9v                          2/2     Running   0          24h\nkube-system          coredns-5d78c9869d-7tw9b                                  1/1     Running   0          24h\nkube-system          coredns-5d78c9869d-wxwfn                                  1/1     Running   0          24h\nkube-system          etcd-bpfman-deployment-control-plane                      1/1     Running   0          24h\nkube-system          kindnet-lbzw4                                             1/1     Running   0          24h\nkube-system          kube-apiserver-bpfman-deployment-control-plane            1/1     Running   0          24h\nkube-system          kube-controller-manager-bpfman-deployment-control-plane   1/1     Running   0          24h\nkube-system          kube-proxy-sz8v9                                          1/1     Running   0          24h\nkube-system          kube-scheduler-bpfman-deployment-control-plane            1/1     Running   0          24h\nlocal-path-storage   local-path-provisioner-6bc4bddd6b-22glj                   1/1     Running   0          24h\n

    Using the lsns command in the node's docker container, we can see that it has a number of PID namespaces (1 for each container that is running in the pods in the cluster), and all of these containers are nested inside of the docker \"node\" container shown above.

    lsns -t pid\n        NS TYPE NPROCS   PID USER  COMMAND\n# Note: 12 rows have been deleted below to save space\n4026532861 pid      17     1 root  /sbin/init\n4026532963 pid       1   509 root  kube-scheduler --authentication-kubeconfig=/etc/kubernetes/scheduler.conf --authorization-kubeconfig=/etc/kubernetes/scheduler.conf --bind-addre\n4026532965 pid       1   535 root  kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/etc/kubernetes/controller-manager.conf --authorization-kubeconfi\n4026532967 pid       1   606 root  kube-apiserver --advertise-address=172.18.0.2 --allow-privileged=true --authorization-mode=Node,RBAC --client-ca-file=/etc/kubernetes/pki/ca.crt\n4026532969 pid       1   670 root  etcd --advertise-client-urls=https://172.18.0.2:2379 --cert-file=/etc/kubernetes/pki/etcd/server.crt --client-cert-auth=true --data-dir=/var/lib\n4026532972 pid       1  1558 root  local-path-provisioner --debug start --helper-image docker.io/kindest/local-path-helper:v20230510-486859a6 --config /etc/config/config.json\n4026533071 pid       1   957 root  /usr/local/bin/kube-proxy --config=/var/lib/kube-proxy/config.conf --hostname-override=bpfman-deployment-control-plane\n4026533073 pid       1  1047 root  /bin/kindnetd\n4026533229 pid       1  1382 root  /coredns -conf /etc/coredns/Corefile\n4026533312 pid       1  1896 65532 /usr/local/bin/kube-rbac-proxy --secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8174/ --logtostderr=true --v=0\n4026533314 pid       1  1943 65532 /bpfman-operator --health-probe-bind-address=:8175 --metrics-bind-address=127.0.0.1:8174 --leader-elect\n4026533319 pid       1  2108 root  ./bpfman system service --timeout=0 --csi-support\n4026533321 pid       1  2158 root  /bpfman-agent --health-probe-bind-address=:8175 --metrics-bind-address=127.0.0.1:8174\n4026533323 pid       1  2243 root  /csi-node-driver-registrar --v=5 --csi-address=/csi/csi.sock --kubelet-registration-path=/var/lib/kubelet/plugins/csi-bpfman/csi.sock\n
    We can see the bpfman containers we were looking at earlier in the output above. Let's take a deeper look at the bpfman-agent container that has a PID of 2158 on the Kubernetes node container and a PID namespace of 4026533321. If we go back to the base server, we can find the container's PID there.

    sudo lsns -t pid | grep 4026533321\n4026533321 pid       1 222225 root  /bpfman-agent --health-probe-bind-address=:8175 --metrics-bind-address=127.0.0.1:8174\n

    This command tells us that the PID of our bpfman-agent is 222225 on the base server. The information for this PID is contained in /proc/222225. The following command will show the PID mappings for that one container at each level.

    sudo grep NSpid /proc/222225/status\nNSpid:  222225  2158    1\n

    The output above tells us that the PIDs for the bpfman-agent container are 222225 on the base server, 2158 in the Docker \"node\" container, and 1 inside the container itself.

    "},{"location":"blog/2024/02/26/technical-challenges-for-attaching-ebpf-programs-in-containers/#moving-forward","title":"Moving Forward","text":"

    As always, there is more work to do. The highest priority goals are to support additional eBPF program types and to use the Container Runtime Interface directly.

    We chose uprobes first because we had a user with a specific need. However, there are use cases for other eBPF program types.

    We used crictl in this first implementation because it already exists, supports multiple container runtimes, handles the corner cases, and is maintained. This allowed us to focus on the bpfman implementation and get the feature done more quickly. However, it would be better to access the container runtime interface directly rather than using an external executable.

    "},{"location":"blog/2023/09/07/bpfman-a-novel-way-to-manage-ebpf/","title":"bpfman: A Novel Way to Manage eBPF","text":"

    In today's cloud ecosystem, there's a demand for low-level system access to enable high-performance observability, security, and networking functionality for applications. Historically these features have been implemented in user space, however, the ability to program such functionality into the kernel itself can provide many benefits including (but not limited to) performance. Regardless, many Linux users still opt away from in-tree or kernel module development due to the slow rate of iteration and ensuing large management burden. eBPF has emerged as a technology in the Linux Kernel looking to change all that.

    eBPF is a simple and efficient way to dynamically load programs into the kernel at runtime, with safety and performance provided by the kernel itself using a Just-In-Time (JIT) compiler and verification process. There are a wide variety of program types one can create with eBPF, which include everything from networking applications to security systems.

    However, eBPF is still a fairly nascent technology and it's not all kittens and rainbows. The process of developing, testing, deploying, and maintaining eBPF programs is not a road well traveled yet, and the story gets even more complicated when you want to deploy your programs in a multi-node system, such as a Kubernetes cluster. It was these kinds of problems that motivated the creation of bpfman, a system daemon for loading and managing eBPF programs in both traditional systems and Kubernetes clusters. In this blog post, we'll discuss the problems bpfman can help solve, and how to deploy and use it.

    "},{"location":"blog/2023/09/07/bpfman-a-novel-way-to-manage-ebpf/#current-challenges-with-developing-and-deploying-ebpf-programs","title":"Current Challenges with Developing and Deploying eBPF Programs","text":"

    While some organizations have had success developing, deploying, and maintaining production software which includes eBPF programs, the barrier to entry is still very high.

    Following the basic eBPF development workflow, which often involves many hours trying to interpret and fix mind-bending eBPF verifier errors, the process of deploying a program in testing and staging environments often results in a lot of custom program loading and management functionality specific to the application. When moving to production systems in environments like Kubernetes clusters the operational considerations continue to compound.

    Security is another significant challenge, which we will cover in more depth in a follow-on blog. However, at a high level, applications that use eBPF typically load their own eBPF programs, which requires at least CAP_BPF. Many BPF programs and attach points require additional capabilities from CAP_SYS_PTRACE, CAP_NET_ADMIN and even including CAP_SYS_ADMIN. These privileges include capabilities that aren\u2019t strictly necessary for eBPF and are too coarsely grained to be useful. Since the processes that load eBPF are usually long-lived and often don\u2019t drop privileges it leaves a wide attack surface.

    While it doesn't solve all the ergonomic and maintenance problems associated with adopting eBPF, bpfman does try to address several of these issues -- particularly as it pertains to security and the lifecycle management of eBPF programs. In the coming sections, we will go into more depth about what eBPF does, and how it can help reduce the costs associated with deploying and managing eBPF-powered workloads.

    "},{"location":"blog/2023/09/07/bpfman-a-novel-way-to-manage-ebpf/#bpfman-overview","title":"bpfman Overview","text":"

    The bpfman project provides a software stack that makes it easy to manage the full lifecycle of eBPF programs. In particular, it can load, unload, modify, and monitor eBPF programs on a single host, or across a full Kubernetes cluster. The key components of bpfman include the bpfman daemon itself which can run independently on any Linux box, an accompanying Kubernetes Operator designed to bring first-class support to clusters via Custom Resource Definitions (CRDs), and eBPF program packaging.

    These components will be covered in more detail in the following sections.

    "},{"location":"blog/2023/09/07/bpfman-a-novel-way-to-manage-ebpf/#bpfman-daemon","title":"bpfman Daemon","text":"

    The bpfman daemon works directly with the operating system to manage eBPF programs. It loads, updates, and unloads eBPF programs, pins maps, and provides visibility into the eBPF programs loaded on a system. Currently, bpfman fully supports XDP, TC, Tracepoint, uProbe, and kProbe eBPF programs. In addition, bpfman can display information about all types of eBPF programs loaded on a system whether they were loaded by bpfman or some other mechanism. bpfman is developed in the Rust programming language and uses Aya, an eBPF library which is also developed in Rust.

    When used on an individual server, bpfman runs as a system daemon, and applications communicate with it using a gRPC API. bpfman can also be used via a command line which in turn uses the gRPC API. The following is an example of using bpfman to load and attach an xdp program.

    bpfman load-from-image -g GLOBAL_u8=01 -i quay.io/bpfman-bytecode/xdp_pass:latest xdp -i eth0 -p 100\n

    This architecture is depicted in the following diagram.

    Using bpfman in this manner significantly improves security because the API is secured using mTLS, and only bpfman needs the privileges required to load and manage eBPF programs and maps.

    Writing eBPF code is tough enough as it is. Typically, an eBPF-based application would need to also implement support for the lifecycle management of the required eBPF programs. bpfman does that for you and allows you to focus on developing your application.

    Another key functional advantage that bpfman offers over libbpf or the Cilium ebpf-go library is support for multiple XDP programs. Standard XDP only allows a single XDP program on a given interface, while bpfman supports loading multiple XDP programs on each interface using the multi-prog protocol defined in libxdp. This allows the user to add, delete, update, prioritize, and re-prioritize the multiple programs on each interface. There is also support to configure whether the flow of execution should terminate and return or continue to the next program in the list based on the return value.

    While TC natively supports multiple programs on each attach point, it lacks the controls and flexibility enabled by the multi-prog protocol. bpfman therefore also supports the same XDP multi-prog solution for TC programs which has the added benefit of a consistent user experience for both XDP and TC programs.

    eBPF programs are also difficult to debug on a system. The visibility provided by bpfman can be a key tool in understanding what is deployed and how they may interact.

    "},{"location":"blog/2023/09/07/bpfman-a-novel-way-to-manage-ebpf/#bpfman-kubernetes-support","title":"bpfman Kubernetes Support","text":"

    The benefits of bpfman are brought to Kubernetes by the bpfman operator. The bpfman operator is developed in Go using the Operator SDK framework, so it should be familiar to most Kubernetes application developers. The bpfman operator deploys a daemonset, containing both bpfman and the bpfman agent processes on each node. Rather than making requests directly to bpfman with the gRPC API or CLI as described above, Kubernetes applications use bpfman custom resource definitions (CRDs) to make requests to bpfman to load and attach eBPF programs. bpfman uses two types of CRDs; Program CRDs for each eBPF program type (referred to as *Program CRDs, where * = Xdp, Tc, etc.) created by the application to express the desired state of an eBPF program on the cluster, and per node BpfProgram CRDs created by the bpfman agent to report the current state of the eBPF program on each node.

    Using XDP as an example, the application can request that an XDP program be loaded on multiple nodes using the XdpProgram CRD, which includes the necessary information such as the bytecode image to load, interface to attach it to, and priority. An XdpProgram CRD that would do the same thing as the CLI command shown above on every node in a cluster is shown below.

    apiVersion: bpfman.io/v1alpha1\nkind: XdpProgram\nmetadata:\n  labels:\n    app.kubernetes.io/name: xdpprogram\n  name: xdp-pass-all-nodes\nspec:\n  name: pass\n  # Select all nodes\n  nodeselector: {}\n  interfaceselector:\n    primarynodeinterface: true\n  priority: 0\n  bytecode:\n    image:\n      url: quay.io/bpfman-bytecode/xdp_pass:latest\n  globaldata:\n    GLOBAL_u8:\n      - 0x01\n

    The bpfman agent on each node watches for the *Program CRDs, and makes calls to the local instance of bpfman as necessary to ensure that the state on the local node reflects the state requested in the *Program CRD. The bpfman agent on each node in turn creates and updates a BpfProgram object for the *Program CRD that reflects the state of the program on that node and reports the eBPF map information for the program. The following is the BpfProgram CRD on one node for the above XdpProgram CRD.

    kubectl get bpfprograms.bpfman.io xdp-pass-all-nodes-bpfman-deployment-control-plane-eth0 -o yaml\n
    apiVersion: bpfman.io/v1alpha1\nkind: BpfProgram\nmetadata:\n  annotations:\n    bpfman.io.xdpprogramcontroller/interface: eth0\n  creationTimestamp: \"2023-08-29T22:08:12Z\"\n  finalizers:\n  - bpfman.io.xdpprogramcontroller/finalizer\n  generation: 1\n  labels:\n    bpfman.io/ownedByProgram: xdp-pass-all-nodes\n    kubernetes.io/hostname: bpfman-deployment-control-plane\n  name: xdp-pass-all-nodes-bpfman-deployment-control-plane-eth0\n  ownerReferences:\n  - apiVersion: bpfman.io/v1alpha1\n    blockOwnerDeletion: true\n    controller: true\n    kind: XdpProgram\n    name: xdp-pass-all-nodes\n    uid: 838dc2f8-a348-427e-9dc4-f6a6ea621930\n  resourceVersion: \"2690\"\n  uid: 5a622961-e5b0-44fe-98af-30756b2d0b62\nspec:\n  type: xdp\nstatus:\n  conditions:\n  - lastTransitionTime: \"2023-08-29T22:08:14Z\"\n    message: Successfully loaded bpfProgram\n    reason: bpfmanLoaded\n    status: \"True\"\n    type: Loaded\n

    Finally, the bpfman operator watches for updates to the BpfProgram objects and reports the global state of each eBPF program. If the program was successfully loaded on every selected node, it will report success, otherwise, it will identify the node(s) that had a problem. The following is the XdpProgram CRD as updated by the operator.

    kubectl get xdpprograms.bpfman.io xdp-pass-all-nodes -o yaml\n
    apiVersion: bpfman.io/v1alpha1\nkind: XdpProgram\nmetadata:\n  annotations:\n    kubectl.kubernetes.io/last-applied-configuration: |\n      {\"apiVersion\":\"bpfman.io/v1alpha1\",\"kind\":\"XdpProgram\",\"metadata\":{\"annotations\":{},\"labels\":{\"app.kubernetes.io/name\":\"xdpprogram\"},\"name\":\"xdp-pass-all-nodes\"},\"spec\":{\"bytecode\":{\"image\":{\"url\":\"quay.io/bpfman-bytecode/xdp_pass:latest\"}},\"globaldata\":{\"GLOBAL_u8\":[1]},\"interfaceselector\":{\"primarynodeinterface\":true},\"nodeselector\":{},\"priority\":0,\"bpffunctionname\":\"pass\"}}\n  creationTimestamp: \"2023-08-29T22:08:12Z\"\n  finalizers:\n  - bpfman.io.operator/finalizer\n  generation: 2\n  labels:\n    app.kubernetes.io/name: xdpprogram\n  name: xdp-pass-all-nodes\n  resourceVersion: \"2685\"\n  uid: 838dc2f8-a348-427e-9dc4-f6a6ea621930\nspec:\n  bytecode:\n    image:\n      imagepullpolicy: IfNotPresent\n      url: quay.io/bpfman-bytecode/xdp_pass:latest\n  globaldata:\n    GLOBAL_u8: 0x01\n  interfaceselector:\n    primarynodeinterface: true\n  mapownerselector: {}\n  nodeselector: {}\n  priority: 0\n  proceedon:\n  - pass\n  - dispatcher_return\n  name: pass\nstatus:\n  conditions:\n  - lastTransitionTime: \"2023-08-29T22:08:12Z\"\n    message: Waiting for Program Object to be reconciled to all nodes\n    reason: ProgramsNotYetLoaded\n    status: \"True\"\n    type: NotYetLoaded\n  - lastTransitionTime: \"2023-08-29T22:08:12Z\"\n    message: bpfProgramReconciliation Succeeded on all nodes\n    reason: ReconcileSuccess\n    status: \"True\"\n    type: ReconcileSuccess\n

    More details about this process can be seen here

    "},{"location":"blog/2023/09/07/bpfman-a-novel-way-to-manage-ebpf/#ebpf-program-packaging","title":"eBPF program packaging","text":"

    The eBPF Bytecode Image specification was created as part of the bpfman project to define a way to package eBPF bytecode as OCI container images. Its use was illustrated in the CLI and XdpProgram CRD examples above in which the XDP program was loaded from quay.io/bpfman-bytecode/xdp_pass:latest. The initial motivation for this image spec was to facilitate the deployment of eBPF programs in container orchestration systems such as Kubernetes, where it is necessary to provide a portable way to distribute bytecode to all nodes that need it. However, bytecode images have proven useful on standalone Linux systems as well. When coupled with BPF CO-RE (Compile Once \u2013 Run Everywhere), portability is further enhanced in that applications can use the same bytecode images across different kernel versions without the need to recompile them for each version. Another benefit of bytecode containers is image signing. There is currently no way to sign and validate raw eBPF bytecode. However, the bytecode containers can be signed and validated by bpfman using sigstore to improve supply chain security.

    "},{"location":"blog/2023/09/07/bpfman-a-novel-way-to-manage-ebpf/#key-benefits-of-bpfman","title":"Key benefits of bpfman","text":"

    This section reviews some of the key benefits of bpfman. These benefits mostly apply to both standalone and Kubernetes deployments, but we will focus on the benefits for Kubernetes here.

    "},{"location":"blog/2023/09/07/bpfman-a-novel-way-to-manage-ebpf/#security","title":"Security","text":"

    Probably the most compelling benefit of using bpfman is enhanced security. When using bpfman, only the bpfman daemon, which can be tightly controlled, needs the privileges required to load eBPF programs, while access to the API can be controlled via standard RBAC methods on a per-application and per-CRD basis. Additionally, the signing and validating of bytecode images enables supply chain security.

    "},{"location":"blog/2023/09/07/bpfman-a-novel-way-to-manage-ebpf/#visibility-and-debuggability","title":"Visibility and Debuggability","text":"

    eBPF programs can interact with each other in unexpected ways. The multi-program support described above helps control these interactions by providing a common mechanism to prioritize and control the flow between the programs. However, there can still be problems, and there may be eBPF programs running on nodes that were loaded by other mechanisms that you don\u2019t even know about. bpfman helps here too by reporting all of the eBPF programs running on all of the nodes in a Kubernetes cluster.

    "},{"location":"blog/2023/09/07/bpfman-a-novel-way-to-manage-ebpf/#productivity","title":"Productivity","text":"

    As described above, managing the lifecycle of eBPF programs is something that each application currently needs to do on its own. It is even more complicated to manage the lifecycle of eBPF programs across a Kubernetes cluster. bpfman does this for you so you don't have to. eBPF bytecode images help here as well by simplifying the distribution of eBPF bytecode to multiple nodes in a cluster, and also allowing separate fine-grained versioning control for user space and kernel space code.

    "},{"location":"blog/2023/09/07/bpfman-a-novel-way-to-manage-ebpf/#demonstration","title":"Demonstration","text":"

    This demonstration is adapted from the instructions documented by Andrew Stoycos here.

    These instructions use kind and bpfman release v0.2.1. It should also be possible to run this demo on other environments such as minikube or an actual cluster.

    Another option is to build the code yourself and use make run-on-kind

    to create the cluster as is described in the given links. Then, start with step 5.

    "},{"location":"blog/2023/09/07/bpfman-a-novel-way-to-manage-ebpf/#run-the-demo","title":"Run the demo","text":"

    1. Create Kind Cluster

    kind create cluster --name=test-bpfman\n

    2. Deploy Cert manager

    kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.11.0/cert-manager.yaml\n

    3. Deploy bpfman Crds

    kubectl apply -f  https://github.com/bpfman/bpfman/releases/download/v0.2.1/bpfman-crds-install-v0.2.1.yaml\n

    4. Deploy bpfman-operator

    kubectl apply -f https://github.com/bpfman/bpfman/releases/download/v0.2.1/bpfman-operator-install-v0.2.1.yaml\n

    5. Verify the deployment

    kubectl get pods -A\n
    NAMESPACE            NAME                                              READY   STATUS    RESTARTS   AGE\nbpfman                 bpfman-daemon-nkzpf                                 2/2     Running   0          28s\nbpfman                 bpfman-operator-77d697fdd4-clrf7                    2/2     Running   0          33s\ncert-manager         cert-manager-99bb69456-x8n84                      1/1     Running   0          57s\ncert-manager         cert-manager-cainjector-ffb4747bb-pt4hr           1/1     Running   0          57s\ncert-manager         cert-manager-webhook-545bd5d7d8-z5brw             1/1     Running   0          57s\nkube-system          coredns-565d847f94-gjjft                          1/1     Running   0          61s\nkube-system          coredns-565d847f94-mf2cq                          1/1     Running   0          61s\nkube-system          etcd-test-bpfman-control-plane                      1/1     Running   0          76s\nkube-system          kindnet-lv6f9                                     1/1     Running   0          61s\nkube-system          kube-apiserver-test-bpfman-control-plane            1/1     Running   0          76s\nkube-system          kube-controller-manager-test-bpfman-control-plane   1/1     Running   0          77s\nkube-system          kube-proxy-dtmvb                                  1/1     Running   0          61s\nkube-system          kube-scheduler-test-bpfman-control-plane            1/1     Running   0          78s\nlocal-path-storage   local-path-provisioner-684f458cdd-8gxxv           1/1     Running   0          61s\n

    Note that we have the bpfman-operator, bpf-daemon and cert-manager pods running.

    6. Deploy the XDP counter program and user space application

    kubectl apply -f https://github.com/bpfman/bpfman/releases/download/v0.2.1/go-xdp-counter-install-v0.2.1.yaml\n

    7. Confirm that the programs are loaded

    Userspace program:

    kubectl get pods -n go-xdp-counter\n
    NAME                      READY   STATUS              RESTARTS   AGE\ngo-xdp-counter-ds-9lpgp   0/1     ContainerCreating   0          5s\n

    XDP program:

    kubectl get xdpprograms.bpfman.io -o wide\n
    NAME                     BPFFUNCTIONNAME   NODESELECTOR   PRIORITY   INTERFACESELECTOR               PROCEEDON\ngo-xdp-counter-example   stats             {}             55         {\"primarynodeinterface\":true}   [\"pass\",\"dispatcher_return\"]\n

    8. Confirm that the counter program is counting packets.

    Notes:

    • The counters are updated every 5 seconds, and stats are being collected for the pod's primary node interface, which may not have a lot of traffic. However, running the kubectl command below generates traffic on that interface, so run the command a few times and give it a few seconds in between to confirm whether the counters are incrementing.
    • Replace \"go-xdp-counter-ds-9lpgp\" with the go-xdp-counter pod name for your deployment.
    kubectl logs go-xdp-counter-ds-9lpgp -n go-xdp-counter | tail\n
    2023/09/05 16:58:21 1204 packets received\n2023/09/05 16:58:21 13741238 bytes received\n\n2023/09/05 16:58:24 1220 packets received\n2023/09/05 16:58:24 13744258 bytes received\n\n2023/09/05 16:58:27 1253 packets received\n2023/09/05 16:58:27 13750364 bytes received\n

    9. Deploy the xdp-pass-all-nodes program with priority set to 50 and proceedon set to drop as shown below

    kubectl apply -f - <<EOF\napiVersion: bpfman.io/v1alpha1\nkind: XdpProgram\nmetadata:\n  labels:\n    app.kubernetes.io/name: xdpprogram\n  name: xdp-pass-all-nodes\nspec:\n  name: pass\n  nodeselector: {}\n  interfaceselector:\n    primarynodeinterface: true\n  priority: 50\n  proceedon:\n    - drop\n  bytecode:\n    image:\n      url: quay.io/bpfman-bytecode/xdp_pass:latest\nEOF\n

    10. Verify both XDP programs are loaded.

    kubectl get xdpprograms.bpfman.io -o wide\n
    NAME                     BPFFUNCTIONNAME   NODESELECTOR   PRIORITY   INTERFACESELECTOR               PROCEEDON\ngo-xdp-counter-example   stats             {}             55         {\"primarynodeinterface\":true}   [\"pass\",\"dispatcher_return\"]\nxdp-pass-all-nodes       pass              {}             50         {\"primarynodeinterface\":true}   [\"drop\"]\n

    The priority setting determines the order in which programs attached to the same interface are executed by the dispatcher with a lower number being a higher priority. The go-xdp-counter-example program was loaded at priority 55, so the xdp-pass-all-nodes program will execute before the go-xdp-counter-example program.

    The proceedon setting tells the dispatcher whether to \"proceed\" to execute the next lower priority program attached to the same interface depending on the program's return value. When we set proceedon to drop, execution will proceed only if the program returns XDP_DROP. However, the xdp-pass-all-nodes program only returns XDP_PASS, so execution will terminate after it runs.

    Therefore, by loading the xdp-pass-all-nodes program in this way, we should have effectively stopped the go-xdp-counter-example program from running. Let's confirm that.

    11. Verify that packet counts are not being updated anymore

    Run the following command several times

    kubectl logs go-xdp-counter-ds-9lpgp -n go-xdp-counter | tail\n
    2023/09/05 17:10:27 1395 packets received\n2023/09/05 17:10:27 13799730 bytes received\n\n2023/09/05 17:10:30 1395 packets received\n2023/09/05 17:10:30 13799730 bytes received\n\n2023/09/05 17:10:33 1395 packets received\n2023/09/05 17:10:33 13799730 bytes received\n

    12. Now, change the priority of the xdp-pass program to 60

    kubectl apply -f - <<EOF\napiVersion: bpfman.io/v1alpha1\nkind: XdpProgram\nmetadata:\n  labels:\n    app.kubernetes.io/name: xdpprogram\n  name: xdp-pass-all-nodes\nspec:\n  name: pass\n  # Select all nodes\n  nodeselector: {}\n  interfaceselector:\n    primarynodeinterface: true\n  priority: 60\n  proceedon:\n    - drop\n  bytecode:\n    image:\n      url: quay.io/bpfman-bytecode/xdp_pass:latest\nEOF\n

    13. Confirm that packets are being counted again

    Run the following command several times

    kubectl logs go-xdp-counter-ds-9lpgp -n go-xdp-counter | tail\n
    2023/09/05 17:12:21 1435 packets received\n2023/09/05 17:12:21 13806214 bytes received\n\n2023/09/05 17:12:24 1505 packets received\n2023/09/05 17:12:24 13815359 bytes received\n\n2023/09/05 17:12:27 1558 packets received\n2023/09/05 17:12:27 13823065 bytes received\n

    We can see that the counters are incrementing again.

    14. Clean everything up

    Delete the programs

    kubectl delete xdpprogram xdp-pass-all-nodes\nkubectl delete -f https://github.com/bpfman/bpfman/releases/download/v0.2.0/go-xdp-counter-install-v0.2.0.yaml\n

    And/or, delete the whole kind cluster

    kind delete clusters test-bpfman\n
    "},{"location":"blog/2023/09/07/bpfman-a-novel-way-to-manage-ebpf/#joining-the-bpfman-community","title":"Joining the bpfman community","text":"

    If you're interested in bpfman and want to get involved, you can connect with the community in multiple ways. If you have some simple questions or need some help feel free to start a discussion. If you find an issue, or you want to request a new feature, please create an issue. If you want something a little more synchronous, the project maintains a #bpfman channel on Kubernetes Slack and we have a weekly community meeting where everyone can join and bring topics to discuss about the project. We hope to see you there!

    "},{"location":"blog/2024/01/15/bpfmans-shift-towards-a-daemonless-design-and-using-sled-a-high-performance-embedded-database/","title":"bpfman's Shift Towards a Daemonless Design and Using Sled: a High Performance Embedded Database","text":"

    As part of issue #860 the community has steadily been converting all of the internal state management to go through a sled database instance which is part of the larger effort to make bpfman completely damonless.

    This article will go over the reasons behind the change and dive into some of the details of the actual implementation.

    "},{"location":"blog/2024/01/15/bpfmans-shift-towards-a-daemonless-design-and-using-sled-a-high-performance-embedded-database/#why","title":"Why?","text":"

    State management in bpfman has always been a headache, not because there's a huge amount of disparate data but there's multiple representations of the same data. Additionally the delicate filesystem interactions and layout previously used to ensure persistence across restarts often led to issues.

    Understanding the existing flow of data in bpfman can help make this a bit clearer:

    With this design there was a lot of data wrangling required to convert the tonic generated rust bindings for the protocol buffer API into data structures that were useful for bpfman. Specifically, data would arrive via GRPC server as specified in bpfman.v1.rs where rust types are inferred from the protobuf definition. In rpc.rs data was then converted to an internal set of structures defined in command.rs. Prior to pull request #683 there was an explosion of types, with each bpfman command having it's own set of internal structures and enums. Now, most of the data for a program that bpfman needs internally for all commands to manage an eBPF program is stored in the ProgramData structure, which we'll take a deeper look at a bit later. Additionally, there is extra complexity for XDP and TC program types which rely on an eBPF dispatcher program to provide multi-program support on a single network interface, however this article will try to instead focus on the simpler examples.

    The tree of data stored by bpfman is quite complex and this is made even more complicated since bpfman has to be persistent across restarts. To support this, raw data was often flushed to disk in the form of JSON files (all types in command.rs needed to implement serde's Serialize and Deserialize). Specific significance would also be encoded to bpfman's directory structure, i.e all program related information was encoded in /run/bpfd/programs/<ID>. The extra infrastructure and failure modes introduced by this process was a constant headache, pushing the community to find a better solution.

    "},{"location":"blog/2024/01/15/bpfmans-shift-towards-a-daemonless-design-and-using-sled-a-high-performance-embedded-database/#why-sled","title":"Why Sled?","text":"

    Sled is an open source project described in github as \"the champagne of beta embedded databases\". The \"reasons\" for choosing an embedded database from the project website are pretty much spot on:

    Embedded databases are useful in several cases:\n\n- you want to store data on disk, without facing the complexity of files\n- you want to be simple, without operating an external database\n- you want to be fast, without paying network costs\n- using disk storage as a building block in your system\n

    As discussed in the previous section, persistence across restarts, is one of bpfman's core design constraints, and with sled we almost get it for free! Additionally due to the pervasive nature of data management to bpfman's core workflow the data-store needed to be kept as simple and light weight as possible, ruling out heavier production-ready external database systems such as MySQL or Redis.

    Now this mostly focused on why embedded dbs in general, but why did we choose sled...well because it's written in :crab: Rust :crab: of course! Apart from the obvious we took a small dive into the project before rewriting everything by transitioning the OCI bytecode image library to use the db rather than the filesystem. Overall the experience was extremely positive due to the following:

    • No more dealing directly with the filesystem, the sled instance is flushed to the fs automatically every 500 ms by default and for good measure we manually flush it before shutting down.
    • The API is extremely simple, traditional get and insert operations function as expected.
    • Error handling with sled:Error is relatively simple and easy to map explicitly to a bpfmanError
    • The db \"tree\" concept makes it easy to have separate key-spaces within the same instance.
    "},{"location":"blog/2024/01/15/bpfmans-shift-towards-a-daemonless-design-and-using-sled-a-high-performance-embedded-database/#transitioning-to-sled","title":"Transitioning to Sled","text":"

    Using the new embedded database started with the creation of a sled instance which could be easily shared across all of the modules in bpfman. To do this we utilized a globally available [lazy_static] variable called ROOT_DB in main.rs:

    #[cfg(not(test))]\nlazy_static! {\n    pub static ref ROOT_DB: Db = Config::default()\n        .path(STDIR_DB)\n        .open()\n        .expect(\"Unable to open root database\");\n}\n\n#[cfg(test)]\nlazy_static! {\n    pub static ref ROOT_DB: Db = Config::default()\n        .temporary(true)\n        .open()\n        .expect(\"Unable to open temporary root database\");\n}\n

    This block creates OR opens the filesystem backed database at /var/lib/bpfman/db database only when the ROOT_DB variable is first accessed, and also allows for the creation of a temporary db instance if running in unit tests. With this setup all of the modules within bpfman can now easily access the database instance by simply using it i.e use crate::ROOT_DB.

    Next the existing bpfman structures needed to be flattened in order to work with the db, the central ProgramData can be used to demonstrate how this was completed. Prior to the recent sled conversion that structure looked like:

    /// ProgramInfo stores information about bpf programs that are loaded and managed\n/// by bpfd.\n#[derive(Debug, Serialize, Deserialize, Clone, Default)]\npub(crate) struct ProgramData {\n    // known at load time, set by user\n    name: String,\n    location: Location,\n    metadata: HashMap<String, String>,\n    global_data: HashMap<String, Vec<u8>>,\n    map_owner_id: Option<u32>,\n\n    // populated after load\n    kernel_info: Option<KernelProgramInfo>,\n    map_pin_path: Option<PathBuf>,\n    maps_used_by: Option<Vec<u32>>,\n\n    // program_bytes is used to temporarily cache the raw program data during\n    // the loading process.  It MUST be cleared following a load so that there\n    // is not a long lived copy of the program data living on the heap.\n    #[serde(skip_serializing, skip_deserializing)]\n    program_bytes: Vec<u8>,\n}\n

    This worked well enough, but as mentioned before the process of flushing the data to disk involved manual serialization to JSON, which needed to occur at a specific point in time (following program load) which made disaster recovery almost impossible and could sometimes result in lost or partially reconstructed state.

    With sled the first idea was to completely flatten ALL of bpfman's data into a single key-space, so that program.name now simply turns into a db.get(\"program_<ID>_name\"), however removing all of the core structures would have resulted in a complex diff which would have been hard to review and merge. Therefore a more staged approach was taken, the ProgramData structure was kept around, and now looks like:

    /// ProgramInfo stores information about bpf programs that are loaded and managed\n/// by bpfman.\n#[derive(Debug, Clone)]\npub(crate) struct ProgramData {\n    // Prior to load this will be a temporary Tree with a random ID, following\n    // load it will be replaced with the main program database tree.\n    db_tree: sled::Tree,\n\n    // populated after load, randomly generated prior to load.\n    id: u32,\n\n    // program_bytes is used to temporarily cache the raw program data during\n    // the loading process.  It MUST be cleared following a load so that there\n    // is not a long lived copy of the program data living on the heap.\n    program_bytes: Vec<u8>,\n}\n

    All of the fields are now removed in favor of a private reference to the unique [sled::Tree] instance for this ProgramData which is named using the unique kernel id for the program. Each sled::Tree represents a single logical key-space / namespace / bucket which allows key generation to be kept simple, i.e db.get(\"program_<ID>_name\") now can be db_tree_prog_0000.get(\"program_name). Additionally getters and setters are now built for each existing field so that access to the db can be controlled and the serialization/deserialization process can be hidden from the caller:

    ...\npub(crate) fn set_name(&mut self, name: &str) -> Result<(), BpfmanError> {\n    self.insert(\"name\", name.as_bytes())\n}\n\npub(crate) fn get_name(&self) -> Result<String, BpfmanError> {\n    self.get(\"name\").map(|v| bytes_to_string(&v))\n}\n...\n

    Therefore, ProgramData is now less of a container for program data and more of a wrapper for accessing program data. The getters/setters act as a bridge between standard Rust types and the raw bytes stored in the database, i.e the [sled::IVec type].

    Once this was completed for all the relevant fields on all the relevant types, see pull request #874, the data bpfman needed for it's managed eBPF programs was now automatically synced to disk :partying_face:

    "},{"location":"blog/2024/01/15/bpfmans-shift-towards-a-daemonless-design-and-using-sled-a-high-performance-embedded-database/#tradeoffs","title":"Tradeoffs","text":"

    All design changes come with some tradeoffs: for bpfman's conversion to using sled the main negative ended up being with the complexity introduced with the [sled::IVec type]. It is basically just a thread-safe reference-counting pointer to a raw byte slice, and the only type raw database operations can be performed with. Previously when using serde_json all serialization/deserialization was automatically handled, however with sled the conversion is manual handled internally. Therefore, instead of a library handling the conversion of a rust string (std::string::String) to raw bytes &[u8] bpfman has to handle it internally, using [std::string::String::as_bytes] and bpfman::utils::bytes_to_string:

    pub(crate) fn bytes_to_string(bytes: &[u8]) -> String {\n    String::from_utf8(bytes.to_vec()).expect(\"failed to convert &[u8] to string\")\n}\n

    For strings, conversion was simple enough, but when working with more complex rust data types like HashMaps and Vectors this became a bit more of an issue. For Vectors, we simply flatten the structure into a group of key/values with indexes encoded into the key:

        pub(crate) fn set_kernel_map_ids(&mut self, map_ids: Vec<u32>) -> Result<(), BpfmanError> {\n        let map_ids = map_ids.iter().map(|i| i.to_ne_bytes()).collect::<Vec<_>>();\n\n        map_ids.iter().enumerate().try_for_each(|(i, v)| {\n            sled_insert(&self.db_tree, format!(\"kernel_map_ids_{i}\").as_str(), v)\n        })\n    }\n

    The sled scan_prefix(<K>) api then allows for easy fetching and rebuilding of the vector:

        pub(crate) fn get_kernel_map_ids(&self) -> Result<Vec<u32>, BpfmanError> {\n        self.db_tree\n            .scan_prefix(\"kernel_map_ids_\".as_bytes())\n            .map(|n| n.map(|(_, v)| bytes_to_u32(v.to_vec())))\n            .map(|n| {\n                n.map_err(|e| {\n                    BpfmanError::DatabaseError(\"Failed to get map ids\".to_string(), e.to_string())\n                })\n            })\n            .collect()\n    }\n

    For HashMaps, we follow a similar paradigm, except the map key is encoded in the database key:

        pub(crate) fn set_metadata(\n        &mut self,\n        data: HashMap<String, String>,\n    ) -> Result<(), BpfmanError> {\n        data.iter().try_for_each(|(k, v)| {\n            sled_insert(\n                &self.db_tree,\n                format!(\"metadata_{k}\").as_str(),\n                v.as_bytes(),\n            )\n        })\n    }\n\n    pub(crate) fn get_metadata(&self) -> Result<HashMap<String, String>, BpfmanError> {\n    self.db_tree\n        .scan_prefix(\"metadata_\")\n        .map(|n| {\n            n.map(|(k, v)| {\n                (\n                    bytes_to_string(&k)\n                        .strip_prefix(\"metadata_\")\n                        .unwrap()\n                        .to_string(),\n                    bytes_to_string(&v).to_string(),\n                )\n            })\n        })\n        .map(|n| {\n            n.map_err(|e| {\n                BpfmanError::DatabaseError(\"Failed to get metadata\".to_string(), e.to_string())\n            })\n        })\n        .collect()\n    }\n

    The same result could be achieved by creating individual database trees for each Vector/HashMap instance, however our goal was to keep the layout as flat as possible. Although this resulted in some extra complexity within the data layer, the overall benefits still outweighed the extra code once the conversion was complete.

    "},{"location":"blog/2024/01/15/bpfmans-shift-towards-a-daemonless-design-and-using-sled-a-high-performance-embedded-database/#moving-forward-and-getting-involved","title":"Moving forward and Getting Involved","text":"

    Once the conversion to sled is fully complete, see issue #860, the project will be able to completely transition to becoming a library without having to worry about data and state management.

    If you are interested in in memory databases, eBPF, Rust, or any of the technologies discussed today please don't hesitate to reach out at kubernetes slack on channel #bpfman or join one of the community meetings to get involved.

    "},{"location":"blog/2024/01/04/community-meeting-january-4-2024/","title":"Community Meeting: January 4, 2024","text":""},{"location":"blog/2024/01/04/community-meeting-january-4-2024/#welcome-to-2024","title":"Welcome to 2024!","text":"

    Welcome to the first bpfman Community Meeting of 2024. We are happy to start off a new year and excited for all the changes in store for bpfman in 2024!

    Below were some of the discussion points from this weeks Community Meeting.

    • bpfman-csi Needs To Become Its Own Binary
    • Kubernetes Support For Attaching uprobes In Containers
    • Building The Community
    "},{"location":"blog/2024/01/04/community-meeting-january-4-2024/#bpfman-csi-needs-to-become-its-own-binary","title":"bpfman-csi Needs To Become Its Own Binary","text":"

    Some of the next work items for bpfman revolve around removing the async code from the code base, make bpfman-core a rust library, and removing all the gRPC logic. Dave (@dave-tucker) is currently investigating this. One area to help out is to take the bpfman-csi thread and making it it's own binary. This may require making bpfman a bin and lib crate (which is fine, just needs a lib.rs and to be very careful about what we\u2019re exporting). Andrew (@astoycos) is starting to take a look at this.

    "},{"location":"blog/2024/01/04/community-meeting-january-4-2024/#kubernetes-support-for-attaching-uprobes-in-containers","title":"Kubernetes Support For Attaching uprobes In Containers","text":"

    Base support for attaching uprobes in containers is currently merged. Andre (@anfredette) pushed PR#875 for the integration with Kubernetes. The hard problems are solved, like getting the Container PID, but the current PR has some shortcuts to get the functionality working before the holiday break. So the PR#875 is not ready for review, but Dave (@dave-tucker) and Andre (@anfredette) may have a quick review to verify the design principles.

    "},{"location":"blog/2024/01/04/community-meeting-january-4-2024/#building-the-community","title":"Building The Community","text":"

    Short discussion on building the Community. In a previous meeting, Dave (@dave-tucker) suggested capturing the meeting minutes in blogs. By placing in a blog, they become searchable from search engines. Billy (@billy99) re-raised this topic and volunteered to start capturing the content. In future meetings, we may use the transcript feature from Google Meet to capture the content and try generating the blog via ChatGTP.

    "},{"location":"blog/2024/01/04/community-meeting-january-4-2024/#light-hearted-moments-and-casual-conversations","title":"Light-hearted Moments and Casual Conversations","text":"

    Amidst the technical discussions, the community members took a moment to share some light-hearted moments and casual conversations. Topics ranged from the challenges of post-holiday credit card bills to the complexities of managing family schedules during exam week. The discussion touched on the quirks of public school rules and the unique challenges of parenting during exam periods.

    The meeting ended on a friendly note, with plans for further collaboration and individual tasks assigned for the upcoming days. Participants expressed their commitment to pushing updates and improvements, with a promise to reconvene in the near future.

    "},{"location":"blog/2024/01/04/community-meeting-january-4-2024/#attendees","title":"Attendees","text":"
    • Andre Fredette (Red Hat)
    • Andrew Stoycos (Red Hat)
    • Billy McFall (Red Hat)
    • Dave Tucker (Red Hat)
    "},{"location":"blog/2024/01/04/community-meeting-january-4-2024/#bpfman-community-info","title":"bpfman Community Info","text":"

    A friendly reminder that the Community Meetings are every Thursday 10am-11am Eastern US Time and all are welcome!

    Google Meet joining info:

    • Google Meet
    • Or dial: (US) +1 984-221-0859 PIN: 613 588 790#
    • Agenda Document
    "},{"location":"blog/2024/01/19/community-meeting-january-11-and-18-2024/","title":"Community Meeting: January 11 and 18, 2024","text":""},{"location":"blog/2024/01/19/community-meeting-january-11-and-18-2024/#hit-the-ground-running","title":"Hit the Ground Running","text":"

    Another set of bpfman Community Meetings for 2024. There is a lot going on with bpfman in Q1 of 2024. Spending a lot of time making bpfman daemonless. I bailed for a ski trip after the Jan 11 meeting, so the notes didn't get written up. So this summary will include two weeks of meetings.

    Below were some of the discussion points from the last two weeks Community Meetings.

    • Manpage/CLI TAB Completion Questions (Jan 11)
    • Kubernetes Support for Attaching uprobes in Containers (Jan 11)
    • netify Preview in Github Removed (Jan 11)
    • RPM Builds and Socket Activation (Jan 18)
    • KubeCon EU Discussion (Jan 18)
    "},{"location":"blog/2024/01/19/community-meeting-january-11-and-18-2024/#january-11-2024","title":"January 11, 2024","text":""},{"location":"blog/2024/01/19/community-meeting-january-11-and-18-2024/#manpagecli-tab-completion-questions-jan-11","title":"Manpage/CLI TAB Completion Questions (Jan 11)","text":"

    The bpfman CLI now has TAB Completion and man pages. However, a couple nits need to be cleaned up Issue#913 and Billy (@billy99) wanted to clarify a few issues encountered. The current implementation for both features is using an environment variable to set the destination directory for the generated files. Other features don't work this way and there was a discussion on the proper location for the generated files. The decision was to use .output/..

    There was another discussion around clap (Rust CLI crate) and passing variables to clap from the Cargo.toml file. In the CLI code, #[command(author, version, about, long_about = None)] implies to pull the values from the Config.toml file, but we aren\u2019t setting any of those variables. Also, for cargo xtask build-man-page and cargo xtask build-completion they pull from the xtask Cargo.toml file. The decision was to set the variables implicitly in code and not pull from Cargo.toml.

    "},{"location":"blog/2024/01/19/community-meeting-january-11-and-18-2024/#kubernetes-support-for-attaching-uprobes-in-containers-jan-11","title":"Kubernetes Support for Attaching uprobes in Containers (Jan 11)","text":"

    Andre (@anfredette) is working on a feature to enable attaching uprobes in other Containers. Currently, bpfman only supports attaching uprobes within the bpfman container. There was a discussion on proper way to format a query to the KubeAPI server to match on NodeName on a Pod list. The discussion included so code walk through. Andrew (@astoycos) found a possible solution client-go:Issue#410 and Dave (@dave-tucker) suggested kubernetes-api:podspec-v1-core.

    "},{"location":"blog/2024/01/19/community-meeting-january-11-and-18-2024/#netify-preview-in-github-removed-jan-11","title":"netify Preview in Github Removed (Jan 11)","text":"

    Lastly, there was a discussion on the netify preview being removed from github and a reminder why. Dave (@dave-tucker) explained that with the docs release history now in place, \"current\" is from a branch and it is not easy to preview. So for now, document developers need to run mkdocs locally (See generate-documention).

    "},{"location":"blog/2024/01/19/community-meeting-january-11-and-18-2024/#attendees-jan-11","title":"Attendees (Jan 11)","text":"
    • Andre Fredette (Red Hat)
    • Andrew Stoycos (Red Hat)
    • Billy McFall (Red Hat)
    • Dave Tucker (Red Hat)
    • Shane Utt (Kong)
    "},{"location":"blog/2024/01/19/community-meeting-january-11-and-18-2024/#january-18-2024","title":"January 18, 2024","text":""},{"location":"blog/2024/01/19/community-meeting-january-11-and-18-2024/#rpm-builds-and-socket-activation-jan-18","title":"RPM Builds and Socket Activation (Jan 18)","text":"

    RPM Builds for bpfman went in fairly recently and Billy (@billy99) had some questions around their implementation. RPM and Socket Activation were developed and merged around the same time and the RPM builds are not installing socket activation properly. Just verifying that RPMs should be installing the bpfman.socket file. And they should. There were also some questions on how to build RPMs locally. Verified that packit build locally is the way forward.

    Note: Socket activation was added to RPM Builds along with documentation on building and using RPMs in PR#922

    "},{"location":"blog/2024/01/19/community-meeting-january-11-and-18-2024/#kubecon-eu-discussion-jan-18","title":"KubeCon EU Discussion (Jan 18)","text":"

    With KubeCon EU just around the corner (March 19-22, 2024 in Paris), discussion around bpfman talks and who was attending. Dave (@dave-tucker) is probably attending and Shane (@shaneutt) might attend. So if you are planning on attending KubeCon EU and are interested in bpfman or just eBPF, keep an eye out for these guys for some lively discussions!

    "},{"location":"blog/2024/01/19/community-meeting-january-11-and-18-2024/#attendees-jan-18","title":"Attendees (Jan 18)","text":"
    • Billy McFall (Red Hat)
    • Dave Tucker (Red Hat)
    • Shane Utt (Kong)
    "},{"location":"blog/2024/01/19/community-meeting-january-11-and-18-2024/#bpfman-community-info","title":"bpfman Community Info","text":"

    A friendly reminder that the Community Meetings are every Thursday 10am-11am Eastern US Time and all are welcome!

    Google Meet joining info:

    • Google Meet
    • Or dial: (US) +1 984-221-0859 PIN: 613 588 790#
    • Agenda Document
    "},{"location":"design/daemonless/","title":"Daemonless bpfd","text":""},{"location":"design/daemonless/#introduction","title":"Introduction","text":"

    The bpfd daemon is a userspace daemon that runs on the host and responds to gRPC API requests over a unix socket, to load, unload and list the eBPF programs on a host.

    The rationale behind running as a daemon was because something needs to be listening on the unix socket for API requests, and that we also maintain some state in-memory about the programs that have been loaded. However, since this daemon requires root privileges to load and unload eBPF programs it is a security risk for this to be a long-running - even with the mitigations we have in place to drop privileges and run as a non-root user. This risk is equivalent to that of something like Docker.

    This document describes the design of a daemonless bpfd, which is a bpfd that runs only runs when required, for example, to load or unload an eBPF program.

    "},{"location":"design/daemonless/#design","title":"Design","text":"

    The daemonless bpfd is a single binary that collects some of the functionality from both bpfd and bpfctl.

    :note: Daemonless, not rootless. Since CAP_BPF (and more) is required to load and unload eBPF programs, we will still need to run as root. But at least we can run as root for a shorter period of time.

    "},{"location":"design/daemonless/#command-bpfd-system-service","title":"Command: bpfd system service","text":"

    This command will run the bpfd gRPC API server - for one or more of the gRPC API services we support.

    It will listen on a unix socket (or tcp socket) for API requests - provided via a positional argument, defaulting to unix:///var/run/bpfd.sock. It will shutdown after a timeout of inactivity - provided by a --timeout flag defaulting to 5 seconds.

    It will support being run as a systemd service, via socket activation, which will allow it to be started on demand when a request is made to the unix socket. When in this mode it will not create the unix socket itself, but will instead use LISTEN_FDS to determine the file descriptor of the unix socket to use.

    Usage in local development (or packaged in a container) is still possible by running as follows:

    sudo bpfd --timeout=0 unix:///var/run/bpfd.sock\n

    :note: The bpfd user and group will be deprecated. We will also remove some of the unit-file complexity (i.e directories) and handle this in bpfd itself.

    "},{"location":"design/daemonless/#command-bpfd-load-file","title":"Command: bpfd load file","text":"

    As the name suggests, this command will load an eBPF program from a file. This was formerly bpfctl load-from-file.

    "},{"location":"design/daemonless/#command-bpfd-load-image","title":"Command: bpfd load image","text":"

    As the name suggests, this command will load an eBPF program from a container image. This was formerly bpfctl load-from-image.

    "},{"location":"design/daemonless/#command-bpfd-unload","title":"Command: bpfd unload","text":"

    This command will unload an eBPF program. This was formerly bpfctl unload.

    "},{"location":"design/daemonless/#command-bpfd-list","title":"Command: bpfd list","text":"

    This command will list the eBPF programs that are currently loaded. This was formerly bpfctl list.

    "},{"location":"design/daemonless/#command-bpfd-pull","title":"Command: bpfd pull","text":"

    This command will pull the bpfd container image from a registry. This was formerly bpfctl pull.

    "},{"location":"design/daemonless/#command-bpfd-images","title":"Command: bpfd images","text":"

    This command will list the bpfd container images that are available. This command didn't exist, but makes sense to add.

    "},{"location":"design/daemonless/#command-bpfd-version","title":"Command: bpfd version","text":"

    This command will print the version of bpfd. This command didn't exist, but makes sense to add.

    "},{"location":"design/daemonless/#state-management","title":"State Management","text":"

    This is perhaps the most significant change from how bpfd currently works.

    Currently bpfd maintains state in-memory about the programs that have been loaded (by bpfd, and the kernel). Some of this state is flushed to disk, so if bpfd is restarted it can reconstruct it.

    Flushing to disk and state reconstruction is cumbersome at present and having to move all state management out of in-memory stores is a forcing function to improve this. We will replace the existing state management with sled, which gives us a familiar API to work with while also being fast, reliable and persistent.

    "},{"location":"design/daemonless/#metrics-and-monitoring","title":"Metrics and Monitoring","text":"

    While adding metrics and monitoring is not a goal of this design, it should nevertheless be a consideration. In order to provide metrics to Prometheus or OpenTelemetry we will require an additional exporter process.

    We can either:

    1. Use the bpfd socket and retrieve metrics via the gRPC API
    2. Place state access + metrics gathering functions in a library, such that they could be used directly by the exporter process without requiring the bpfd socket.

    The latter would be more inline with how podman-prometheus-exporter works. The benefit here is that, the metrics exporter process can be long running with less privileges - whereas if it were to hit the API over the socket it would effectively negate the point of being daemonless in the first place since collection will likley occur more frequently than the timeout on the socket.

    "},{"location":"design/daemonless/#benefits","title":"Benefits","text":"

    The benefits of this design are:

    • No long-running daemon with root privileges
    • No need to run as a non-root user, this is important since the number of capabilities required is only getting larger.
    • We only need to ship a single binary.
    • We can use systemd socket activation to start bpfd on demand + timeout after a period of inactivity.
    • Forcs us to fix state management, since we can never rely on in-memory state.
    • Bpfd becomes more modular - if we wish to add programs for runtime enforcement, metrics, or any other purpose then it's design is decoupled from that of bpfd. It could be another binary, or a subcommand on the CLI etc...
    "},{"location":"design/daemonless/#drawbacks","title":"Drawbacks","text":"

    None yet.

    "},{"location":"design/daemonless/#backwards-compatibility","title":"Backwards Compatibility","text":"
    • The bpfctl command will be removed and all functionality folded into bpfd
    • The bpfd command will be renamed to bpfd system service
    "},{"location":"developer-guide/api-spec/","title":"API Specification","text":"

    Packages:

    • bpfman.io/v1alpha1
    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1","title":"bpfman.io/v1alpha1","text":"

    Package v1alpha1 contains API Schema definitions for the bpfman.io API group.

    Resource Types:

    • BpfApplication
    • BpfProgram
    • FentryProgram
    • FexitProgram
    • KprobeProgram
    • TcProgram
    • TracepointProgram
    • UprobeProgram
    • XdpProgram
    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.BpfApplication","title":"BpfApplication","text":"

    BpfApplication is the Schema for the bpfapplications API

    Field Description apiVersion string bpfman.io/v1alpha1 kind string BpfApplication metadata Kubernetes meta/v1.ObjectMeta Refer to the Kubernetes API documentation for the fields of the metadata field. spec BpfApplicationSpec BpfAppCommon BpfAppCommon

    (Members of BpfAppCommon are embedded into this type.)

    programs []BpfApplicationProgram

    Programs is a list of bpf programs supported for a specific application. It\u2019s possible that the application can selectively choose which program(s) to run from this list.

    status BpfApplicationStatus"},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.BpfProgram","title":"BpfProgram","text":"

    BpfProgram is the Schema for the Bpfprograms API

    Field Description apiVersion string bpfman.io/v1alpha1 kind string BpfProgram metadata Kubernetes meta/v1.ObjectMeta Refer to the Kubernetes API documentation for the fields of the metadata field. spec BpfProgramSpec type string (Optional)

    Type specifies the bpf program type

    status BpfProgramStatus (Optional)"},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.FentryProgram","title":"FentryProgram","text":"

    FentryProgram is the Schema for the FentryPrograms API

    Field Description apiVersion string bpfman.io/v1alpha1 kind string FentryProgram metadata Kubernetes meta/v1.ObjectMeta Refer to the Kubernetes API documentation for the fields of the metadata field. spec FentryProgramSpec FentryProgramInfo FentryProgramInfo

    (Members of FentryProgramInfo are embedded into this type.)

    BpfAppCommon BpfAppCommon

    (Members of BpfAppCommon are embedded into this type.)

    status FentryProgramStatus (Optional)"},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.FexitProgram","title":"FexitProgram","text":"

    FexitProgram is the Schema for the FexitPrograms API

    Field Description apiVersion string bpfman.io/v1alpha1 kind string FexitProgram metadata Kubernetes meta/v1.ObjectMeta Refer to the Kubernetes API documentation for the fields of the metadata field. spec FexitProgramSpec FexitProgramInfo FexitProgramInfo

    (Members of FexitProgramInfo are embedded into this type.)

    BpfAppCommon BpfAppCommon

    (Members of BpfAppCommon are embedded into this type.)

    status FexitProgramStatus (Optional)"},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.KprobeProgram","title":"KprobeProgram","text":"

    KprobeProgram is the Schema for the KprobePrograms API

    Field Description apiVersion string bpfman.io/v1alpha1 kind string KprobeProgram metadata Kubernetes meta/v1.ObjectMeta Refer to the Kubernetes API documentation for the fields of the metadata field. spec KprobeProgramSpec KprobeProgramInfo KprobeProgramInfo

    (Members of KprobeProgramInfo are embedded into this type.)

    BpfAppCommon BpfAppCommon

    (Members of BpfAppCommon are embedded into this type.)

    status KprobeProgramStatus (Optional)"},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.TcProgram","title":"TcProgram","text":"

    TcProgram is the Schema for the TcProgram API

    Field Description apiVersion string bpfman.io/v1alpha1 kind string TcProgram metadata Kubernetes meta/v1.ObjectMeta Refer to the Kubernetes API documentation for the fields of the metadata field. spec TcProgramSpec TcProgramInfo TcProgramInfo

    (Members of TcProgramInfo are embedded into this type.)

    BpfAppCommon BpfAppCommon

    (Members of BpfAppCommon are embedded into this type.)

    status TcProgramStatus (Optional)"},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.TracepointProgram","title":"TracepointProgram","text":"

    TracepointProgram is the Schema for the TracepointPrograms API

    Field Description apiVersion string bpfman.io/v1alpha1 kind string TracepointProgram metadata Kubernetes meta/v1.ObjectMeta Refer to the Kubernetes API documentation for the fields of the metadata field. spec TracepointProgramSpec TracepointProgramInfo TracepointProgramInfo

    (Members of TracepointProgramInfo are embedded into this type.)

    BpfAppCommon BpfAppCommon

    (Members of BpfAppCommon are embedded into this type.)

    status TracepointProgramStatus (Optional)"},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.UprobeProgram","title":"UprobeProgram","text":"

    UprobeProgram is the Schema for the UprobePrograms API

    Field Description apiVersion string bpfman.io/v1alpha1 kind string UprobeProgram metadata Kubernetes meta/v1.ObjectMeta Refer to the Kubernetes API documentation for the fields of the metadata field. spec UprobeProgramSpec UprobeProgramInfo UprobeProgramInfo

    (Members of UprobeProgramInfo are embedded into this type.)

    BpfAppCommon BpfAppCommon

    (Members of BpfAppCommon are embedded into this type.)

    status UprobeProgramStatus (Optional)"},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.XdpProgram","title":"XdpProgram","text":"

    XdpProgram is the Schema for the XdpPrograms API

    Field Description apiVersion string bpfman.io/v1alpha1 kind string XdpProgram metadata Kubernetes meta/v1.ObjectMeta Refer to the Kubernetes API documentation for the fields of the metadata field. spec XdpProgramSpec XdpProgramInfo XdpProgramInfo

    (Members of XdpProgramInfo are embedded into this type.)

    BpfAppCommon BpfAppCommon

    (Members of BpfAppCommon are embedded into this type.)

    status XdpProgramStatus (Optional)"},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.BpfAppCommon","title":"BpfAppCommon","text":"

    (Appears on: BpfApplicationSpec, FentryProgramSpec, FexitProgramSpec, KprobeProgramSpec, TcProgramSpec, TracepointProgramSpec, UprobeProgramSpec, XdpProgramSpec)

    BpfAppCommon defines the common attributes for all BpfApp programs

    Field Description nodeselector Kubernetes meta/v1.LabelSelector

    NodeSelector allows the user to specify which nodes to deploy the bpf program to. This field must be specified, to select all nodes use standard metav1.LabelSelector semantics and make it empty.

    globaldata map[string][]byte (Optional)

    GlobalData allows the user to set global variables when the program is loaded with an array of raw bytes. This is a very low level primitive. The caller is responsible for formatting the byte string appropriately considering such things as size, endianness, alignment and packing of data structures.

    bytecode BytecodeSelector

    Bytecode configures where the bpf program\u2019s bytecode should be loaded from.

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.BpfApplicationProgram","title":"BpfApplicationProgram","text":"

    (Appears on: BpfApplicationSpec)

    BpfApplicationProgram defines the desired state of BpfApplication

    Field Description type EBPFProgType

    Type specifies the bpf program type

    xdp XdpProgramInfo (Optional)

    xdp defines the desired state of the application\u2019s XdpPrograms.

    tc TcProgramInfo (Optional)

    tc defines the desired state of the application\u2019s TcPrograms.

    tcx TcProgramInfo (Optional)

    tcx defines the desired state of the application\u2019s TcPrograms.

    fentry FentryProgramInfo (Optional)

    fentry defines the desired state of the application\u2019s FentryPrograms.

    fexit FexitProgramInfo (Optional)

    fexit defines the desired state of the application\u2019s FexitPrograms.

    kprobe KprobeProgramInfo (Optional)

    kprobe defines the desired state of the application\u2019s KprobePrograms.

    kretprobe KprobeProgramInfo (Optional)

    kretprobe defines the desired state of the application\u2019s KretprobePrograms.

    uprobe UprobeProgramInfo (Optional)

    uprobe defines the desired state of the application\u2019s UprobePrograms.

    uretprobe UprobeProgramInfo (Optional)

    uretprobe defines the desired state of the application\u2019s UretprobePrograms.

    tracepoint TracepointProgramInfo (Optional)

    tracepoint defines the desired state of the application\u2019s TracepointPrograms.

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.BpfApplicationSpec","title":"BpfApplicationSpec","text":"

    (Appears on: BpfApplication)

    BpfApplicationSpec defines the desired state of BpfApplication

    Field Description BpfAppCommon BpfAppCommon

    (Members of BpfAppCommon are embedded into this type.)

    programs []BpfApplicationProgram

    Programs is a list of bpf programs supported for a specific application. It\u2019s possible that the application can selectively choose which program(s) to run from this list.

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.BpfApplicationStatus","title":"BpfApplicationStatus","text":"

    (Appears on: BpfApplication)

    BpfApplicationStatus defines the observed state of BpfApplication

    Field Description BpfProgramStatusCommon BpfProgramStatusCommon

    (Members of BpfProgramStatusCommon are embedded into this type.)

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.BpfProgramCommon","title":"BpfProgramCommon","text":"

    (Appears on: FentryProgramInfo, FexitProgramInfo, KprobeProgramInfo, TcProgramInfo, TracepointProgramInfo, UprobeProgramInfo, XdpProgramInfo)

    BpfProgramCommon defines the common attributes for all BPF programs

    Field Description bpffunctionname string

    BpfFunctionName is the name of the function that is the entry point for the BPF program

    mapownerselector Kubernetes meta/v1.LabelSelector (Optional)

    MapOwnerSelector is used to select the loaded eBPF program this eBPF program will share a map with. The value is a label applied to the BpfProgram to select. The selector must resolve to exactly one instance of a BpfProgram on a given node or the eBPF program will not load.

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.BpfProgramConditionType","title":"BpfProgramConditionType (string alias)","text":"

    BpfProgramConditionType is a condition type to indicate the status of a BPF program at the individual node level.

    Value Description

    \"BytecodeSelectorError\"

    BpfProgCondBytecodeSelectorError indicates that an error occurred when trying to process the bytecode selector.

    \"Loaded\"

    BpfProgCondLoaded indicates that the eBPF program was successfully loaded into the kernel on a specific node.

    \"MapOwnerNotFound\"

    BpfProgCondMapOwnerNotFound indicates that the eBPF program sharing a map with another eBPF program and that program does not exist.

    \"MapOwnerNotLoaded\"

    BpfProgCondMapOwnerNotLoaded indicates that the eBPF program sharing a map with another eBPF program and that program is not loaded.

    \"NoContainersOnNode\"

    BpfProgCondNoContainersOnNode indicates that there are no containers on the node that match the container selector.

    \"None\"

    None of the above conditions apply

    \"NotLoaded\"

    BpfProgCondNotLoaded indicates that the eBPF program has not yet been loaded into the kernel on a specific node.

    \"NotSelected\"

    BpfProgCondNotSelected indicates that the eBPF program is not scheduled to be loaded on a specific node.

    \"NotUnLoaded\"

    BpfProgCondUnloaded indicates that in the midst of trying to remove the eBPF program from the kernel on the node, that program has not yet been removed.

    \"Unloaded\"

    BpfProgCondUnloaded indicates that the eBPF program has been unloaded from the kernel on a specific node.

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.BpfProgramSpec","title":"BpfProgramSpec","text":"

    (Appears on: BpfProgram)

    BpfProgramSpec defines the desired state of BpfProgram

    Field Description type string (Optional)

    Type specifies the bpf program type

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.BpfProgramStatus","title":"BpfProgramStatus","text":"

    (Appears on: BpfProgram)

    BpfProgramStatus defines the observed state of BpfProgram TODO Make these a fixed set of metav1.Condition.types and metav1.Condition.reasons

    Field Description conditions []Kubernetes meta/v1.Condition

    Conditions houses the updates regarding the actual implementation of the bpf program on the node Known .status.conditions.type are: \u201cAvailable\u201d, \u201cProgressing\u201d, and \u201cDegraded\u201d

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.BpfProgramStatusCommon","title":"BpfProgramStatusCommon","text":"

    (Appears on: BpfApplicationStatus, FentryProgramStatus, FexitProgramStatus, KprobeProgramStatus, TcProgramStatus, TracepointProgramStatus, UprobeProgramStatus, XdpProgramStatus)

    BpfProgramStatusCommon defines the BpfProgram status

    Field Description conditions []Kubernetes meta/v1.Condition

    Conditions houses the global cluster state for the eBPFProgram. The explicit condition types are defined internally.

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.BytecodeImage","title":"BytecodeImage","text":"

    (Appears on: BytecodeSelector)

    BytecodeImage defines how to specify a bytecode container image.

    Field Description url string

    Valid container image URL used to reference a remote bytecode image.

    imagepullpolicy PullPolicy (Optional)

    PullPolicy describes a policy for if/when to pull a bytecode image. Defaults to IfNotPresent.

    imagepullsecret ImagePullSecretSelector (Optional)

    ImagePullSecret is the name of the secret bpfman should use to get remote image repository secrets.

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.BytecodeSelector","title":"BytecodeSelector","text":"

    (Appears on: BpfAppCommon)

    BytecodeSelector defines the various ways to reference bpf bytecode objects.

    Field Description image BytecodeImage

    Image used to specify a bytecode container image.

    path string

    Path is used to specify a bytecode object via filepath.

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.ContainerSelector","title":"ContainerSelector","text":"

    (Appears on: UprobeProgramInfo)

    ContainerSelector identifies a set of containers. For example, this can be used to identify a set of containers in which to attach uprobes.

    Field Description namespace string (Optional)

    Target namespaces.

    pods Kubernetes meta/v1.LabelSelector

    Target pods. This field must be specified, to select all pods use standard metav1.LabelSelector semantics and make it empty.

    containernames []string (Optional)

    Name(s) of container(s). If none are specified, all containers in the pod are selected.

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.EBPFProgType","title":"EBPFProgType (string alias)","text":"

    (Appears on: BpfApplicationProgram)

    EBPFProgType defines the supported eBPF program types

    Value Description

    \"Fentry\"

    ProgTypeFentry refers to the Fentry program type.

    \"Fexit\"

    ProgTypeFexit refers to the Fexit program type.

    \"Kprobe\"

    ProgTypeKprobe refers to the Kprobe program type.

    \"Kretprobe\"

    ProgTypeKretprobe refers to the Kprobe program type.

    \"TC\"

    ProgTypeTC refers to the TC program type.

    \"TCX\"

    ProgTypeTCX refers to the TCx program type.

    \"Tracepoint\"

    ProgTypeTracepoint refers to the Tracepoint program type.

    \"Uprobe\"

    ProgTypeUprobe refers to the Uprobe program type.

    \"Uretprobe\"

    ProgTypeUretprobe refers to the Uretprobe program type.

    \"XDP\"

    ProgTypeXDP refers to the XDP program type.

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.FentryProgramInfo","title":"FentryProgramInfo","text":"

    (Appears on: BpfApplicationProgram, FentryProgramSpec)

    FentryProgramInfo defines the Fentry program details

    Field Description BpfProgramCommon BpfProgramCommon

    (Members of BpfProgramCommon are embedded into this type.)

    func_name string

    Function to attach the fentry to.

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.FentryProgramSpec","title":"FentryProgramSpec","text":"

    (Appears on: FentryProgram)

    FentryProgramSpec defines the desired state of FentryProgram

    Field Description FentryProgramInfo FentryProgramInfo

    (Members of FentryProgramInfo are embedded into this type.)

    BpfAppCommon BpfAppCommon

    (Members of BpfAppCommon are embedded into this type.)

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.FentryProgramStatus","title":"FentryProgramStatus","text":"

    (Appears on: FentryProgram)

    FentryProgramStatus defines the observed state of FentryProgram

    Field Description BpfProgramStatusCommon BpfProgramStatusCommon

    (Members of BpfProgramStatusCommon are embedded into this type.)

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.FexitProgramInfo","title":"FexitProgramInfo","text":"

    (Appears on: BpfApplicationProgram, FexitProgramSpec)

    FexitProgramInfo defines the Fexit program details

    Field Description BpfProgramCommon BpfProgramCommon

    (Members of BpfProgramCommon are embedded into this type.)

    func_name string

    Function to attach the fexit to.

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.FexitProgramSpec","title":"FexitProgramSpec","text":"

    (Appears on: FexitProgram)

    FexitProgramSpec defines the desired state of FexitProgram

    Field Description FexitProgramInfo FexitProgramInfo

    (Members of FexitProgramInfo are embedded into this type.)

    BpfAppCommon BpfAppCommon

    (Members of BpfAppCommon are embedded into this type.)

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.FexitProgramStatus","title":"FexitProgramStatus","text":"

    (Appears on: FexitProgram)

    FexitProgramStatus defines the observed state of FexitProgram

    Field Description BpfProgramStatusCommon BpfProgramStatusCommon

    (Members of BpfProgramStatusCommon are embedded into this type.)

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.ImagePullSecretSelector","title":"ImagePullSecretSelector","text":"

    (Appears on: BytecodeImage)

    ImagePullSecretSelector defines the name and namespace of an image pull secret.

    Field Description name string

    Name of the secret which contains the credentials to access the image repository.

    namespace string

    Namespace of the secret which contains the credentials to access the image repository.

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.InterfaceSelector","title":"InterfaceSelector","text":"

    (Appears on: TcProgramInfo, XdpProgramInfo)

    InterfaceSelector defines interface to attach to.

    Field Description interfaces []string (Optional)

    Interfaces refers to a list of network interfaces to attach the BPF program to.

    primarynodeinterface bool (Optional)

    Attach BPF program to the primary interface on the node. Only \u2018true\u2019 accepted.

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.KprobeProgramInfo","title":"KprobeProgramInfo","text":"

    (Appears on: BpfApplicationProgram, KprobeProgramSpec)

    KprobeProgramInfo defines the common fields for KprobeProgram

    Field Description BpfProgramCommon BpfProgramCommon

    (Members of BpfProgramCommon are embedded into this type.)

    func_name string

    Functions to attach the kprobe to.

    offset uint64 (Optional)

    Offset added to the address of the function for kprobe. Not allowed for kretprobes.

    retprobe bool (Optional)

    Whether the program is a kretprobe. Default is false

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.KprobeProgramSpec","title":"KprobeProgramSpec","text":"

    (Appears on: KprobeProgram)

    KprobeProgramSpec defines the desired state of KprobeProgram

    Field Description KprobeProgramInfo KprobeProgramInfo

    (Members of KprobeProgramInfo are embedded into this type.)

    BpfAppCommon BpfAppCommon

    (Members of BpfAppCommon are embedded into this type.)

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.KprobeProgramStatus","title":"KprobeProgramStatus","text":"

    (Appears on: KprobeProgram)

    KprobeProgramStatus defines the observed state of KprobeProgram

    Field Description BpfProgramStatusCommon BpfProgramStatusCommon

    (Members of BpfProgramStatusCommon are embedded into this type.)

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.ProgramConditionType","title":"ProgramConditionType (string alias)","text":"

    ProgramConditionType is a condition type to indicate the status of a BPF program at the cluster level.

    Value Description

    \"DeleteError\"

    ProgramDeleteError indicates that the BPF program was marked for deletion, but deletion was unsuccessful.

    \"NotYetLoaded\"

    ProgramNotYetLoaded indicates that the program in question has not yet been loaded on all nodes in the cluster.

    \"ReconcileError\"

    ProgramReconcileError indicates that an unforeseen situation has occurred in the controller logic, and the controller will retry.

    \"ReconcileSuccess\"

    BpfmanProgConfigReconcileSuccess indicates that the BPF program has been successfully reconciled.

    TODO: we should consider removing \u201creconciled\u201d type logic from the public API as it\u2019s an implementation detail of our use of controller runtime, but not necessarily relevant to human users or integrations.

    See: https://github.com/bpfman/bpfman/issues/430

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.PullPolicy","title":"PullPolicy (string alias)","text":"

    (Appears on: BytecodeImage)

    PullPolicy describes a policy for if/when to pull a container image

    Value Description

    \"Always\"

    PullAlways means that bpfman always attempts to pull the latest bytecode image. Container will fail If the pull fails.

    \"IfNotPresent\"

    PullIfNotPresent means that bpfman pulls if the image isn\u2019t present on disk. Container will fail if the image isn\u2019t present and the pull fails.

    \"Never\"

    PullNever means that bpfman never pulls an image, but only uses a local image. Container will fail if the image isn\u2019t present

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.TcProceedOnValue","title":"TcProceedOnValue (string alias)","text":"

    (Appears on: TcProgramInfo)

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.TcProgramInfo","title":"TcProgramInfo","text":"

    (Appears on: BpfApplicationProgram, TcProgramSpec)

    TcProgramInfo defines the tc program details

    Field Description BpfProgramCommon BpfProgramCommon

    (Members of BpfProgramCommon are embedded into this type.)

    interfaceselector InterfaceSelector

    Selector to determine the network interface (or interfaces)

    priority int32

    Priority specifies the priority of the tc program in relation to other programs of the same type with the same attach point. It is a value from 0 to 1000 where lower values have higher precedence.

    direction string

    Direction specifies the direction of traffic the tc program should attach to for a given network device.

    proceedon []TcProceedOnValue (Optional)

    ProceedOn allows the user to call other tc programs in chain on this exit code. Multiple values are supported by repeating the parameter.

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.TcProgramSpec","title":"TcProgramSpec","text":"

    (Appears on: TcProgram)

    TcProgramSpec defines the desired state of TcProgram

    Field Description TcProgramInfo TcProgramInfo

    (Members of TcProgramInfo are embedded into this type.)

    BpfAppCommon BpfAppCommon

    (Members of BpfAppCommon are embedded into this type.)

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.TcProgramStatus","title":"TcProgramStatus","text":"

    (Appears on: TcProgram)

    TcProgramStatus defines the observed state of TcProgram

    Field Description BpfProgramStatusCommon BpfProgramStatusCommon

    (Members of BpfProgramStatusCommon are embedded into this type.)

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.TracepointProgramInfo","title":"TracepointProgramInfo","text":"

    (Appears on: BpfApplicationProgram, TracepointProgramSpec)

    TracepointProgramInfo defines the Tracepoint program details

    Field Description BpfProgramCommon BpfProgramCommon

    (Members of BpfProgramCommon are embedded into this type.)

    names []string

    Names refers to the names of kernel tracepoints to attach the bpf program to.

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.TracepointProgramSpec","title":"TracepointProgramSpec","text":"

    (Appears on: TracepointProgram)

    TracepointProgramSpec defines the desired state of TracepointProgram

    Field Description TracepointProgramInfo TracepointProgramInfo

    (Members of TracepointProgramInfo are embedded into this type.)

    BpfAppCommon BpfAppCommon

    (Members of BpfAppCommon are embedded into this type.)

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.TracepointProgramStatus","title":"TracepointProgramStatus","text":"

    (Appears on: TracepointProgram)

    TracepointProgramStatus defines the observed state of TracepointProgram

    Field Description BpfProgramStatusCommon BpfProgramStatusCommon

    (Members of BpfProgramStatusCommon are embedded into this type.)

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.UprobeProgramInfo","title":"UprobeProgramInfo","text":"

    (Appears on: BpfApplicationProgram, UprobeProgramSpec)

    UprobeProgramInfo contains the information about the uprobe program

    Field Description BpfProgramCommon BpfProgramCommon

    (Members of BpfProgramCommon are embedded into this type.)

    func_name string (Optional)

    Function to attach the uprobe to.

    offset uint64 (Optional)

    Offset added to the address of the function for uprobe.

    target string

    Library name or the absolute path to a binary or library.

    retprobe bool (Optional)

    Whether the program is a uretprobe. Default is false

    pid int32 (Optional)

    Only execute uprobe for given process identification number (PID). If PID is not provided, uprobe executes for all PIDs.

    containers ContainerSelector (Optional)

    Containers identifes the set of containers in which to attach the uprobe. If Containers is not specified, the uprobe will be attached in the bpfman-agent container. The ContainerSelector is very flexible and even allows the selection of all containers in a cluster. If an attempt is made to attach uprobes to too many containers, it can have a negative impact on on the cluster.

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.UprobeProgramSpec","title":"UprobeProgramSpec","text":"

    (Appears on: UprobeProgram)

    UprobeProgramSpec defines the desired state of UprobeProgram

    Field Description UprobeProgramInfo UprobeProgramInfo

    (Members of UprobeProgramInfo are embedded into this type.)

    BpfAppCommon BpfAppCommon

    (Members of BpfAppCommon are embedded into this type.)

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.UprobeProgramStatus","title":"UprobeProgramStatus","text":"

    (Appears on: UprobeProgram)

    UprobeProgramStatus defines the observed state of UprobeProgram

    Field Description BpfProgramStatusCommon BpfProgramStatusCommon

    (Members of BpfProgramStatusCommon are embedded into this type.)

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.XdpProceedOnValue","title":"XdpProceedOnValue (string alias)","text":"

    (Appears on: XdpProgramInfo)

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.XdpProgramInfo","title":"XdpProgramInfo","text":"

    (Appears on: BpfApplicationProgram, XdpProgramSpec)

    XdpProgramInfo defines the common fields for all XdpProgram types

    Field Description BpfProgramCommon BpfProgramCommon

    (Members of BpfProgramCommon are embedded into this type.)

    interfaceselector InterfaceSelector

    Selector to determine the network interface (or interfaces)

    priority int32

    Priority specifies the priority of the bpf program in relation to other programs of the same type with the same attach point. It is a value from 0 to 1000 where lower values have higher precedence.

    proceedon []XdpProceedOnValue"},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.XdpProgramSpec","title":"XdpProgramSpec","text":"

    (Appears on: XdpProgram)

    XdpProgramSpec defines the desired state of XdpProgram

    Field Description XdpProgramInfo XdpProgramInfo

    (Members of XdpProgramInfo are embedded into this type.)

    BpfAppCommon BpfAppCommon

    (Members of BpfAppCommon are embedded into this type.)

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.XdpProgramStatus","title":"XdpProgramStatus","text":"

    (Appears on: XdpProgram)

    XdpProgramStatus defines the observed state of XdpProgram

    Field Description BpfProgramStatusCommon BpfProgramStatusCommon

    (Members of BpfProgramStatusCommon are embedded into this type.)

    Generated with gen-crd-api-reference-docs.

    "},{"location":"developer-guide/configuration/","title":"Configuration","text":""},{"location":"developer-guide/configuration/#bpfman-configuration-file","title":"bpfman Configuration File","text":"

    bpfman looks for a configuration file to be present at /etc/bpfman/bpfman.toml. If no file is found, defaults are assumed. There is an example at scripts/bpfman.toml, similar to:

    [interfaces]\n  [interfaces.eth0]\n  xdp_mode = \"drv\" # Valid xdp modes are \"hw\", \"skb\" and \"drv\". Default: \"drv\", but will fall back to \"skb\" on failure.\n\n[signing]\nallow_unsigned = true\nverify_enabled = true\n\n[database]\nmax_retries = 10\nmillisec_delay = 1000\n
    "},{"location":"developer-guide/configuration/#config-section-interfaces","title":"Config Section: [interfaces]","text":"

    This section of the configuration file allows the XDP Mode for a given interface to be set. If not set, the default value of skb will be used. Multiple interfaces can be configured.

    [interfaces]\n  [interfaces.eth0]\n  xdp_mode = \"drv\"\n  [interfaces.eth1]\n  xdp_mode = \"hw\"\n  [interfaces.eth2]\n  xdp_mode = \"skb\"\n

    Valid fields:

    • xdp_mode: XDP Mode for a given interface. Valid values: [\"drv\"|\"hw\"|\"skb\"]
    "},{"location":"developer-guide/configuration/#config-section-signing","title":"Config Section: [signing]","text":"

    This section of the configuration file allows control over whether signatures on OCI packaged eBPF bytecode as container images are verified, and whether they are required to be signed via cosign.

    By default, images are verified, and unsigned images are allowed. See eBPF Bytecode Image Specifications for more details on building and shipping bytecode in a container image.

    Valid fields:

    • allow_unsigned: Flag indicating whether unsigned images are allowed. Valid values: [\"true\"|\"false\"]

    • verify_enabled: Flag indicating whether signatures should be verified. Valid values: [\"true\"|\"false\"]

    "},{"location":"developer-guide/configuration/#config-section-database","title":"Config Section: [database]","text":"

    bpfman uses an embedded database to store state and persistent data on disk which can only be accessed synchronously by a single process at a time. To avoid returning database lock errors and enhance the user experience, bpfman performs retries when opening of the database. The number of retries and the time between retries is configurable.

    Valid fields:

    • max_retries: The number of times to retry opening the database on a given request.
    • millisec_delay: Time in milliseconds to wait between retry attempts.
    "},{"location":"developer-guide/configuration/#config-section-registry","title":"Config Section: [registry]","text":"

    bpfman uses the latest public container images for the xdp and tc dispatchers by default. Optionally, the configuration values for these images are user-configurable. For example, it may be desirable in certain enterprise environments to source the xdp and tc dispatcher images from a self-hosted OCI image registry. In this case, the default values for the xdp and tc dispatcher images can be overridden below.

    Valid fields:

    • xdp_dispatcher_image: The locator of the xdp dispatcher image in the format quay.io/bpfman/xdp-dispatcher:latest
    • tc_dispatcher_image: The locator of the tc dispatcher image in the format quay.io/bpfman/tc-dispatcher:latest
    "},{"location":"developer-guide/debugging/","title":"Debugging using VSCode and lldb on a remote machine or VM","text":"
    1. Install code-lldb vscode extension
    2. Add a configuration to .vscode/launch.json like the following (customizing for a given system using the comment in the configuration file):

          {\n        \"name\": \"Remote debug bpfman\",\n        \"type\": \"lldb\",\n        \"request\": \"launch\",\n        \"program\": \"<ABSOLUTE_PATH>/github.com/bpfman/bpfman/target/debug/bpfman\", // Local path to latest debug binary.\n        \"initCommands\": [\n            \"platform select remote-linux\", // Execute `platform list` for a list of available remote platform plugins.\n            \"platform connect connect://<IP_ADDRESS_OF_VM>:8175\", // replace <IP_ADDRESS_OF_VM>\n            \"settings set target.inherit-env false\",\n        ],\n        \"env\": {\n            \"RUST_LOG\": \"debug\"\n        },\n        \"cargo\": {\n            \"args\": [\n                \"build\",\n                \"--bin=bpfman\",\n                \"--package=bpfman\"\n            ],\n            \"filter\": {\n                \"name\": \"bpfman\",\n                \"kind\": \"bin\"\n            }\n        },\n        \"cwd\": \"${workspaceFolder}\",\n    },\n
    3. On the VM or Server install lldb-server:

      dnf based OS:

          sudo dnf install lldb\n

      apt based OS:

          sudo apt install lldb\n
    4. Start lldb-server on the VM or Server (make sure to do this in the ~/home directory)

          cd ~\n    sudo lldb-server platform --server --listen 0.0.0.0:8081\n
    5. Add breakpoints as needed via the vscode GUI and then hit F5 to start debugging!

    "},{"location":"developer-guide/develop-operator/","title":"Developing the bpfman-operator","text":"

    This section is intended to give developer level details regarding the layout and design of the bpfman-operator. At its core the operator was implemented using the operator-sdk framework which make those docs another good resource if anything is missed here.

    "},{"location":"developer-guide/develop-operator/#high-level-design-overview","title":"High level design overview","text":"

    This repository houses two main processes, the bpfman-agent and the bpfman-operator along with CRD api definitions for BpfProgram and *Program Objects. The following diagram depicts how all these components work together to create a functioning operator.

    "},{"location":"developer-guide/develop-operator/#building-and-deploying","title":"Building and Deploying","text":"

    For building and deploying the bpfman-operator simply see the attached make help output.

    $ make help\n\nUsage:\n  make <target>\n\nGeneral\n  help             Display this help.\n\nLocal Dependencies\n  kustomize        Download kustomize locally if necessary.\n  controller-gen   Download controller-gen locally if necessary.\n  register-gen     Download register-gen locally if necessary.\n  informer-gen     Download informer-gen locally if necessary.\n  lister-gen       Download lister-gen locally if necessary.\n  client-gen       Download client-gen locally if necessary.\n  envtest          Download envtest-setup locally if necessary.\n  opm              Download opm locally if necessary.\n\nDevelopment\n  manifests        Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects.\n  generate         Generate ALL auto-generated code.\n  generate-register  Generate register code see all `zz_generated.register.go` files.\n  generate-deepcopy  Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations see all `zz_generated.register.go` files.\n  generate-typed-clients  Generate typed client code\n  generate-typed-listers  Generate typed listers code\n  generate-typed-informers  Generate typed informers code\n  vendors          Refresh vendors directory.\n  fmt              Run go fmt against code.\n  verify           Verify all the autogenerated code\n  lint             Run linter (golangci-lint).\n  test             Run Unit tests.\n  test-integration  Run Integration tests.\n  bundle           Generate bundle manifests and metadata, then validate generated files.\n  build-release-yamls  Generate the crd install bundle for a specific release version.\n\nBuild\n  build            Build bpfman-operator and bpfman-agent binaries.\n  build-images     Build bpfman-agent and bpfman-operator images.\n  build-operator-image  Build bpfman-operator image.\n  build-agent-image  Build bpfman-agent image.\n  push-images      Push bpfman-agent and bpfman-operator images.\n  load-images-kind  Load bpfman-agent, and bpfman-operator images into the running local kind devel cluster.\n  bundle-build     Build the bundle image.\n  bundle-push      Push the bundle image.\n  catalog-update   Generate catalog yaml file.\n  catalog-build    Build a catalog image.\n  catalog-push     Push a catalog image.\n\nCRD Deployment\n  install          Install CRDs into the K8s cluster specified in ~/.kube/config.\n  uninstall        Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.\n\nVanilla K8s Deployment\n  setup-kind       Setup Kind cluster\n  destroy-kind     Destroy Kind cluster\n  deploy           Deploy bpfman-operator to the K8s cluster specified in ~/.kube/config with the csi driver initialized.\n  undeploy         Undeploy bpfman-operator from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.\n  kind-reload-images  Reload locally build images into a kind cluster and restart the ds and deployment so they're picked up.\n  run-on-kind      Kind Deploy runs the bpfman-operator on a local kind cluster using local builds of bpfman, bpfman-agent, and bpfman-operator\n\nOpenshift Deployment\n  deploy-openshift  Deploy bpfman-operator to the Openshift cluster specified in ~/.kube/config.\n  undeploy-openshift  Undeploy bpfman-operator from the Openshift cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.\n  catalog-deploy   Deploy a catalog image.\n  catalog-undeploy  Undeploy a catalog image.\n
    "},{"location":"developer-guide/develop-operator/#project-layout","title":"Project Layout","text":"

    The bpfman-operator project layout is guided by the recommendations from both the operator-sdk framework and the standard golang project-layout. The following is a brief description of the main directories under bpfman-operator/ and their contents.

    NOTE: Bolded directories contain auto-generated code

    • apis/v1alpha1/*_types.go: Contains the K8s CRD api definitions (*_types.go) for each version.
    • apis/v1alpha1/zz_generated.*.go: Contains the auto-generated register (zz_generate.register.go) and deepcopy (zz_generated.deepcopy.go) methods.
    • bundle/: Contains the OLM bundle manifests and metadata for the operator. More details can be found in the operator-sdk documentation.
    • cmd/: Contains the main entry-points for the bpfman-operator and bpfman-agent processes.
    • config/: Contains the configuration files for launching the bpfman-operator on a cluster.
      • bpfman-deployment/: Contains static deployment yamls for the bpfman-daemon, this includes two containers, one for bpfman and the other for the bpfman-agent. This DaemonSet yaml is NOT deployed statically by kustomize, instead it's statically copied into the operator image which is then responsible for deploying and configuring the bpfman-daemon DaemonSet. Lastly, this directory also contains the default config used to configure the bpfman-daemon, along with the cert-manager certificates used to encrypt communication between the bpfman-agent and bpfman.
      • bpfman-operator-deployment/: Contains the static deployment yaml for the bpfman-operator. This is deployed statically by kustomize.
      • crd/: Contains the CRD manifests for all of the bpfman-operator APIs.
        • bases/: Is where the actual CRD definitions are stored. These definitions are auto-generated by controller-gen.
        • patches/: Contains kustomize patch files for each Program Type, which enables a conversion webhook for the CRD and adds a directive for certmanager to inject CA into the CRD.
      • default/: Contains the default deployment configuration for the bpfman-operator.
      • manifests/: Contains the bases for generating OLM manifests.
      • openshift/: Contains the Openshift specific deployment configuration for the bpfman-operator.
      • prometheus/: Contains the prometheus manifests used to deploy Prometheus to a cluster. At the time of writing this the bpfman-operator is NOT exposing any metrics to prometheus, but this is a future goal.
      • rbac/: Contains rbac yamls for getting bpfman and the bpfman-operator up and running on Kubernetes.
        • bpfman-agent/: Contains the rbac yamls for the bpfman-agent. They are automatically generated by kubebuilder via build tags in the bpfman-agent controller code.
        • bpfman-operator/: Contains the rbac yamls for the bpfman-operator. They are automatically generated by kubebuilder via build tags in the bpfman-operator controller code.
      • samples/: Contains sample CR definitions that can be deployed by users for each of our supported APIs.
      • scorecard/: Contains the scorecard manifests used to deploy scorecard to a cluster. At the time of writing this the bpfman-operator is NOT running any scorecard tests.
      • test/: Contains the test manifests used to deploy the bpfman-operator to a kind cluster for integration testing.
    • controllers/: Contains the controller implementations for all of the bpfman-operator APIs. Each controller is responsible for reconciling the state of the cluster with the desired state defined by the user. This is where the source of truth for the auto-generated RBAC can be found, keep an eye out for //+kubebuilder:rbac:groups=bpfman.io comment tags.
      • bpfmanagent/: Contains the controller implementations which reconcile user created *Program types to multiple BpfProgram objects.
      • bpfmanoperator/: Contains the controller implementations which reconcile global BpfProgram object state back to the user by ensuring the user created *Program objects are reporting the correct status.
    • hack/: Contains any scripts+static files used by the bpfman-operator to facilitate development.
    • internal/: Contains all private library code and is used by the bpfman-operator and bpfman-agent controllers.
    • pkg/: Contains all public library code this is consumed externally and internally.
      • client/: Contains the autogenerated clientset, informers and listers for all of the bpfman-operator APIs. These are autogenerated by the k8s.io/code-generator project, and can be consumed by users wishing to programmatically interact with bpfman specific APIs.
      • helpers/: Contains helper functions which can be consumed by users wishing to programmatically interact with bpfman specific APIs.
    • test/integration/: Contains integration tests for the bpfman-operator. These tests are run against a kind cluster and are responsible for testing the bpfman-operator in a real cluster environment. It uses the kubernetes-testing-framework project to programmatically spin-up all of the required infrastructure for our unit tests.
    • Makefile: Contains all of the make targets used to build, test, and generate code used by the bpfman-operator.
    "},{"location":"developer-guide/develop-operator/#rpc-protobuf-generation","title":"RPC Protobuf Generation","text":"

    Technically part of the bpfman API, the RPC Protobufs are usually not coded until a bpfman feature is integrated into the bpfman-operator and bpfman-agent code. To modify the RPC Protobuf definition, edit proto/bpfman.proto. Then to generate the protobufs from the updated RPC Protobuf definitions:

    cd bpfman/\ncargo xtask build-proto\n

    This will generate:

    • bpfman-api/src/bpfman.v1.rs: Generated Rust Protobuf source code.
    • clients/gobpfman/v1/: Directory that contains the generated Go Client code for interacting with bpfman over RPC from a Go application.

    When editing proto/bpfman.proto, follow best practices describe in Proto Best Practices.

    Note

    cargo xtask build-proto also pulls in proto/csi.proto (which is in the same directory as proto/bpfman.proto). proto/csi.proto is taken from container-storage-interface/spec/csi.proto. See container-storage-interface/spec/spec.md for more details.

    "},{"location":"developer-guide/develop-operator/#generated-files","title":"Generated Files","text":"

    The operator-sdk framework will generate multiple categories of files (Custom Resource Definitions (CRD), RBAC ClusterRole, Webhook Configuration, typed client, listeners and informers code, etc). If any of the bpfman-operator/apis/v1alpha1/*Program_types.go files are modified, then regenerate these files using:

    cd bpfman-operator/\nmake generate\n

    This command will generate all auto-generated code. There are commands to generate each sub-category if needed. See make help to list all the generate commands.

    "},{"location":"developer-guide/develop-operator/#building","title":"Building","text":"

    To run in Kubernetes, bpfman components need to be containerized. However, building container images can take longer than just building the code. During development, it may be quicker to find and fix build errors by just building the code. To build the code:

    cd bpfman-operator/\nmake build\n

    To build the container images, run the following command:

    cd bpfman-operator/\nmake build-images\n

    If the make build command is skipped above, the code will be built in the build-images command. If the make build command is run, the built code will be leveraged in this step. This command generates the following images:

    docker images\nREPOSITORY                       TAG      IMAGE ID       CREATED          SIZE\nquay.io/bpfman/bpfman            latest   69df038ccea3   43 seconds ago   515MB\nquay.io/bpfman/bpfman-agent      latest   f6af33c5925b   2 minutes ago    464MB\nquay.io/bpfman/bpfman-operator   latest   4fe444b7abf1   2 minutes ago    141MB\n:\n
    "},{"location":"developer-guide/develop-operator/#running-locally-in-kind","title":"Running Locally in KIND","text":"

    Deploying the bpfman-operator goes into more detail on ways to launch bpfman in a Kubernetes cluster. To run locally in a Kind cluster with an up to date build simply run:

    cd bpfman-operator/\nmake run-on-kind\n

    The make run-on-kind will run the make build-images if the images do not exist or need updating.

    Then rebuild and load a fresh build run:

    cd bpfman-operator/\nmake build-images\nmake kind-reload-images\n

    Which will rebuild the bpfman-operator and bpfman-agent images, and load them into the kind cluster.

    By default, the make run-on-kind uses the local images described above. The container images used for bpfman, bpfman-agent, and bpfman-operator can also be manually configured:

    BPFMAN_IMG=<your/image/url> BPFMAN_AGENT_IMG=<your/image/url> BPFMAN_OPERATOR_IMG=<your/image/url> make run-on-kind\n
    "},{"location":"developer-guide/develop-operator/#testing-locally","title":"Testing Locally","text":"

    See Kubernetes Operator Tests.

    "},{"location":"developer-guide/develop-operator/#troubleshooting","title":"Troubleshooting","text":""},{"location":"developer-guide/develop-operator/#metricshealth-port-issues","title":"Metrics/Health port issues","text":"

    In some scenarios, the health and metric ports may are already in use by other services on the system. When this happens the bpfman-agent container fails to deploy. The ports currently default to 8175 and 8174.

    The ports are passed in through the daemonset.yaml for the bpfman-daemon and deployment.yaml and manager_auth_proxy_patch.yaml for the bpfman-operator. The easiest way to change which ports are used is to update these yaml files and rebuild the container images. The container images need to be rebuilt because the bpfman-daemon is deployed from the bpfman-operator and the associated yaml files are copied into the bpfman-operator image.

    If rebuild the container images is not desirable, then the ports can be changed on the fly. For the bpfman-operator, the ports can be updated by editing the bpfman-operator Deployment.

    kubectl edit deployment -n bpfman bpfman-operator\n\napiVersion: apps/v1\nkind: Deployment\n:\nspec:\n  template:\n  :\n  spec:\n    containers:\n    -args:\n      - --secure-listen-address=0.0.0.0:8443\n      - --upstream=http://127.0.0.1:8174/        <-- UPDATE\n      - --logtostderr=true\n      - --v=0\n      name: kube-rbac-proxy\n      :\n    - args:\n      - --health-probe-bind-address=:8175        <-- UPDATE\n      - --metrics-bind-address=127.0.0.1:8174    <-- UPDATE\n      - --leader-elect\n      :\n      livenessProbe:\n          failureThreshold: 3\n          httpGet:\n            path: /healthz\n            port: 8175                           <-- UPDATE\n            scheme: HTTP\n            :\n      name: bpfman-operator\n      readinessProbe:\n          failureThreshold: 3\n          httpGet:\n            path: /readyz\n            port: 8175                           <-- UPDATE\n            scheme: HTTP\n      :\n

    For the bpfman-daemon, the ports could be updated by editing the bpfman-daemon DaemonSet. However, if bpfman-daemon is restarted for any reason by the bpfman-operator, the changes will be lost. So it is recommended to update the ports for the bpfman-daemon via the bpfman bpfman-config ConfigMap.

    kubectl edit configmap -n bpfman bpfman-config\n\napiVersion: v1\ndata:\n  bpfman.agent.healthprobe.addr: :8175                    <-- UPDATE\n  bpfman.agent.image: quay.io/bpfman/bpfman-agent:latest\n  bpfman.agent.log.level: info\n  bpfman.agent.metric.addr: 127.0.0.1:8174                <-- UPDATE\n  bpfman.image: quay.io/bpfman/bpfman:latest\n  bpfman.log.level: debug\nkind: ConfigMap\n:\n
    "},{"location":"developer-guide/documentation/","title":"Documentation","text":"

    This section describes how to modify the related documentation around bpfman. All bpfman's documentation is written in Markdown, and leverages mkdocs to generate a static site, which is hosted on netlify.

    If this is the first time building using mkdocs, jump to the Development Environment Setup section for help installing the tooling.

    "},{"location":"developer-guide/documentation/#documentation-notes","title":"Documentation Notes","text":"

    This section describes some notes on the dos and don'ts when writing documentation.

    "},{"location":"developer-guide/documentation/#website-management","title":"Website Management","text":"

    The headings and layout of the website, as well as other configuration settings, are managed from the mkdocs.yml file in the project root directory.

    "},{"location":"developer-guide/documentation/#markdown-style","title":"Markdown Style","text":"

    When writing documentation via a Markdown file, the following format has been followed:

    • Text on a given line should not exceed 100 characters, unless it's example syntax or a link that should be broken up.
    • Each new sentence should start on a new line. That way, if text needs to be inserted, whole paragraphs don't need to be adjusted.
    • Links to other markdown files are relative to the file the link is placed in.
    "},{"location":"developer-guide/documentation/#governance-files","title":"Governance Files","text":"

    There are a set of well known governance files that are typically placed in the root directory of most projects, like README.md, MAINTAINERS.md, CONTRIBUTING.md, etc. mkdocs expects all files used in the static website to be located under a common directory, docs/ for bpfman. To reference the governance files from the static website, a directory (docs/governance/) was created with a file for each governance file, the only contains --8<-- and the file name. This indicates to mkdocs to pull the additional file from the project root directory.

    For example: docs/governance/MEETINGS.md

    Note

    This works for the website generation, but if a Markdown file is viewed through Github (not the website), the link is broken. So these files should only be linked from docs/index.md and mkdocs.yml.

    "},{"location":"developer-guide/documentation/#docsdeveloper-guideapi-specmd","title":"docs/developer-guide/api-spec.md","text":"

    The file docs/developer-guide/api-spec.md documents the CRDs used in a Kubernetes deployment. The contents are auto-generated when PRs are pushed to Github.

    The contents can be generated locally by running the command ./scripts/api-docs/generate.sh apidocs.html from the root bpfman directory.

    "},{"location":"developer-guide/documentation/#generate-documentation","title":"Generate Documentation","text":"

    If you would like to test locally, build and preview the generated documentation, from the bpfman root directory, use mkdocs to build:

    cd bpfman/\nmkdocs build\n

    Note

    If mkdocs build gives you an error, make sure you have the mkdocs packages listed below installed.

    To preview from a build on a local machine, start the mkdocs dev-server with the command below, then open up http://127.0.0.1:8000/ in your browser, and you'll see the default home page being displayed:

    mkdocs serve\n

    To preview from a build on a remote machine, start the mkdocs dev-server with the command below, then open up http://<ServerIP>:8000/ in your browser, and you'll see the default home page being displayed:

    mkdocs serve -a 0.0.0.0:8000\n
    "},{"location":"developer-guide/documentation/#development-environment-setup","title":"Development Environment Setup","text":"

    The recommended installation method is using pip.

    pip install -r requirements.txt \n

    Once installed, ensure the mkdocs is in your PATH:

    mkdocs -V\nmkdocs, version 1.4.3 from /home/$USER/.local/lib/python3.11/site-packages/mkdocs (Python 3.11)\n

    Note

    If you have an older version of mkdocs installed, you may need to use the --upgrade option (e.g., pip install --upgrade mkdocs) to get it to work.

    "},{"location":"developer-guide/documentation/#document-images","title":"Document Images","text":"

    Source of images used in the example documentation can be found in bpfman Upstream Images. Request access if required.

    "},{"location":"developer-guide/image-build/","title":"bpfman Container Images","text":"

    Container images for bpfman are automatically built and pushed to quay.io/ under the :latest tag whenever code is merged into the main branch of the bpfman and bpfman-operator repositories.

    • quay.io/bpfman: This repository contains images needed to run bpfman. It contains the xdp-dispatcher and tc-dispatcher eBPF container images, which are used by bpfman to allow multiple XDP or TC programs to be loaded on a given interface. It also includes the container images which are used to deploy bpfman in a Kubernetes deployment:
      • bpfman: Packages all the bpfman binaries, including bpfman CLI, bpfman-ns and bpfman-rpc.
      • bpfman-agent: Agent that listens to KubeAPI Server and makes calls to bpfman to load or unload eBPF programs based on user intent.
      • bpfman-operator: Operator for deploying bpfman.
      • tc-dispatcher: eBPF container image containing the TC Dispatcher, which is used by bpfman to manage and allow multiple TC based programs to be loaded on a given TC hook point.
      • xdp-dispatcher: eBPF container image containing the XDP Dispatcher, which is used by bpfman to manage and allow multiple TC based programs to be loaded on a given XDP hook point.
      • csi-node-driver-registrar: CSI Driver used by bpfman.
      • bpfman-operator-bundle: Image containing all the CRDs (Custom-Resource-Definitions) used by bpfman-agent to define Kubernetes objects used to manage eBPF programs.
    • quay.io/bpfman-bytecode: This repository contains eBPF container images for all of the generated bytecode from examples/ and integration-test/.
    • quay.io/bpfman-userspace: This repository contains userspace container images for all of the example programs in examples/.
    "},{"location":"developer-guide/image-build/#multiple-architecture-support","title":"Multiple Architecture Support","text":"

    All bpfman related container images that are automatically built and pushed to quay.io/ contain a manifest file and images built for the following architectures:

    • x86_64
    • arm64
    • ppc64le
    • s390x
    "},{"location":"developer-guide/image-build/#locally-build-bpfman-operator-and-bpfman-agent-container-images","title":"Locally Build bpfman-operator and bpfman-agent Container Images","text":"

    When testing or developing in bpfman-operator, it may be necessary to run with updated changes to the bpfman-operator or bpfman-agent container images. The local Makefile will build and load both images based on the current changes:

    cd bpfman-operator/\n\nmake build-images\nmake run-on-kind\n
    "},{"location":"developer-guide/image-build/#locally-build-bpfman-container-image","title":"Locally Build bpfman Container Image","text":"

    When testing or developing in bpfman-operator, it may be necessary to run with updated changes to bpfman. By default, bpfman-agent uses quay.io/bpfman/bpfman:latest. To build the bpfman binaries in a container image, run:

    cd bpfman/\n\ndocker build -f ./Containerfile.bpfman.local . -t quay.io/$QUAY_USER/bpfman:test\n

    Use any registry, image name and tag, above is just an example. Next, build and deploy the bpfman-operator and bpfman-agent with the locally built bpfman container image.

    cd bpfman-operator/\n\nBPFMAN_IMG=quay.io/$QUAY_USER/bpfman:test make build-images\nBPFMAN_IMG=quay.io/$QUAY_USER/bpfman:test make run-on-kind\n

    To use, the Kind cluster must have access to the image. So either the image needs to be pushed to a registry and made public (make public via the repo GUI after the push) before executing the make run-on-kind command shown above:

    docker push quay.io/$QUAY_USER/bpfman:test\n

    OR it can be loaded into the kind cluster after the cluster is running:

    kind load docker-image quay.io/$QUAY_USER/bpfman:test --name bpfman-deployment\n

    Now the image should be running in the Kind cluster:

    kubectl get pods -A\n NAMESPACE   NAME                               READY   STATUS    RESTARTS   AGE\n bpfman      bpfman-daemon-87fqg                3/3     Running   0          16m\n bpfman      bpfman-operator-7f67bc7c57-bc6lk   2/2     Running   0          16m\n :\n\nkubectl describe pod -n bpfman bpfman-daemon-87fqg\n Name:             bpfman-daemon-87fqg\n Namespace:        bpfman\n :\n Containers:\n  bpfman:\n    Container ID:  containerd://1777d1810f3648f43df775e9d9af79406eaffc5694aa712da04c3f4e578093b3\n    Image:         quay.io/$QUAY_USER/bpfman:test\n    Image ID:      quay.io/$QUAY_USER/bpfman@sha256:f2c94b7acff6b463fc55232a1896816283521dd1ba5560b0d0779af99f811cd0\n:\n
    "},{"location":"developer-guide/image-build/#locally-build-tc-or-xdp-dispatcher-container-image","title":"Locally Build TC or XDP Dispatcher Container Image","text":"

    The TC and XDP Dispatcher images are automatically built and pushed to quay.io/ under the :latest tag whenever code is merged into the main branch of the github.com/bpfman/bpfman. If a dispatcher container image needs to be built locally, use the following steps.

    Build the object files:

    cargo xtask build-ebpf --libbpf-dir ~/src/libbpf/\n\n$ ls .output/tc_dispatcher.bpf/\nbpf_arm64_bpfel.o  bpf_powerpc_bpfel.o  bpf_s390_bpfeb.o  bpf_x86_bpfel.o\n\n$ ls .output/xdp_dispatcher_v2.bpf/\nbpf_arm64_bpfel.o  bpf_powerpc_bpfel.o  bpf_s390_bpfeb.o  bpf_x86_bpfel.o\n

    Then build the bytecode image files:

    bpfman image build -f Containerfile.bytecode -t quay.io/$QUAY_USER/tc-dispatcher:test -b .output/tc_dispatcher.bpf/bpf_x86_bpfel.o\nbpfman image build -f Containerfile.bytecode -t quay.io/$QUAY_USER/xdp-dispatcher:test -b .output/xdp_dispatcher_v2.bpf/bpf_x86_bpfel.o\n

    If a multi-arch image is needed, use:

    bpfman image build -f Containerfile.bytecode.multi.arch -t quay.io/$QUAY_USER/tc-dispatcher:test -c .output/tc_dispatcher.bpf/\nbpfman image build -f Containerfile.bytecode.multi.arch -t quay.io/$QUAY_USER/xdp-dispatcher:test -c .output/xdp_dispatcher_v2.bpf/\n

    Note

    To build images for multiple architectures on a local system, docker (or podman) may need additional configuration settings to allow for caching of non-native images. See https://docs.docker.com/build/building/multi-platform/ for more details.

    "},{"location":"developer-guide/image-build/#locally-build-example-container-images","title":"Locally Build Example Container Images","text":"

    The example images are automatically built and pushed to quay.io/ under the :latest tag whenever code is merged into the main branch of the github.com/bpfman/bpfman. For each example, there is a bytecode and a userspace image. For official bpfman images, bytecode images are pushed to quay.io/bpfman-bytecode and userspace images are pushed to quay.io/bpfman-userspace. For example:

    • quay.io/bpfman-bytecode/go-kprobe-counter
    • quay.io/bpfman-bytecode/go-tc-counter
    • quay.io/bpfman-bytecode/go-tracepoint-counter
    • ...

    • quay.io/bpfman-userspace/go-kprobe-counter

    • quay.io/bpfman-userspace/go-tc-counter
    • quay.io/bpfman-userspace/go-tracepoint-counter
    • ...

    The Makefile in the examples directory has commands to build both sets of images. Image names and tags can be controlled using environment variables. If private images are being generated, both bytecode and userspace images will probably be pushed to the same account, so bytecode and userspace images will need to be distinguished by either fully qualified image names (using IMAGE_TC_BC, IMAGE_TC_US, IMAGE_XDP_BC, IMAGE_XDP_US, etc) or unique tags for each (TAG_BC, TAG_US). See make help in the examples directory and the samples below.

    "},{"location":"developer-guide/image-build/#example-bytecode-container-images","title":"Example Bytecode Container Images","text":"

    If an example bytecode container image needs to be built locally, use the following to build the bytecode container image, (optionally passing the USER_BC and TAG_BC for the image):

    # Build images for all eBPF program types\n$ make build-bc-images USER_BC=$QUAY_USER TAG_BC=test-bc\n:\n => pushing quay.io/$QUAY_USER/go-kprobe-counter:test-bc with docker\n:\n => pushing quay.io/$QUAY_USER/go-tc-counter:test-bc with docker\n:\n => pushing quay.io/$QUAY_USER/go-tracepoint-counter:test-bc with docker\n:\n\n-- OR --\n\n# Build image for a single eBPF program type, XDP in this example\n$ make build-bc-xdp USER_BC=$QUAY_USER TAG_BC=test-bc\n:\n => pushing quay.io/$QUAY_USER/go-xdp-counter:test-bc with docker\n

    If a multi-arch image is needed, use (appending PLATFORM):

    $ make build-bc-xdp USER_BC=$QUAY_USER TAG_BC=test-bc PLATFORM=linux/amd64,linux/arm64,linux/ppc64le,linux/s390x\n:\n => pushing quay.io/$QUAY_USER/go-xdp-counter:test-bc with docker\n

    Note

    To build images for multiple architectures on a local system, docker (or podman) may need additional configuration settings to allow for caching of non-native images. See https://docs.docker.com/build/building/multi-platform/ for more details.

    "},{"location":"developer-guide/image-build/#example-userspace-container-images","title":"Example Userspace Container Images","text":"

    If an example userspace container image needs to be built locally, use the following to build the userspace container images, (optionally passing the USER_US and TAG_US for the image):

    cd bpfman/examples/\n\n# Build all images\n$ make build-us-images USER_US=$QUAY_USER TAG_US=test-us\n:\n => pushing quay.io/$QUAY_USER/go-kprobe-counter:test-us with docker\n:\n => pushing quay.io/$QUAY_USER/go-tc-counter:test-us with docker\n:\n => pushing quay.io/$QUAY_USER/go-tracepoint-counter:test-us with docker\n:\n\n-- OR --\n\n# Build a single image\n$ make build-us-xdp USER_US=$QUAY_USER TAG_US=test-us\n:\n => pushing quay.io/$QUAY_USER/go-xdp-counter:test-us with docker\n

    If a multi-arch image is needed, use (appending PLATFORM):

    $ make build-us-xdp USER_US=$QUAY_USER TAG_US=test-us PLATFORM=linux/amd64,linux/arm64,linux/ppc64le,linux/s390x\n:\n => pushing quay.io/$QUAY_USER/go-xdp-counter:test-us with docker\n

    Note

    To build images for multiple architectures on a local system, docker (or podman) may need additional configuration settings to allow for caching of non-native images. See https://docs.docker.com/build/building/multi-platform/ for more details.

    "},{"location":"developer-guide/image-build/#adding-additional-container-images","title":"Adding Additional Container Images","text":"

    When adding a new container image to one of the bpfman repositories, whether it be via the examples or integration tests, several steps need to be performed.

    • One of the maintainers of the bpfman quay.io repositories must:
      • Add the image to the quay.io repository.
      • Make the new image public.
      • On the image, provide Write access to the bpfman+github_actions robot account.
    • Add the new image to the bpfman/.github/workflows/image-build.yml so the image is built and pushed on each PR merge.
    • For examples, update the examples/Makefile to build the new images.
    "},{"location":"developer-guide/image-build/#signing-container-images","title":"Signing Container Images","text":"

    Signing eBPF container images is encouraged and can be easily done using cosign. Below is a summary of the steps needed to sign an image.

    First, install cosign:

    go install github.com/sigstore/cosign/v2/cmd/cosign@latest\n

    Then sign the image. The cosign command will generate a URL. Follow the sigstore URL and login with either GitHub, Google to Microsoft. That will generate a verification code that will complete the cosign command.

    cosign sign -y quay.io/$QUAY_USER/test-image@sha256:55fe3cfe46409939876be27f7ed4d2948842918145f6cda167d0c31fdea2046f\nGenerating ephemeral keys...\nRetrieving signed certificate...\n:\nhttps://oauth2.sigstore.dev/auth/auth?access_type=online&client_id=sigstore&code_challenge=EwHYBahRxlbli-oEXxS9DoEzEWcyuS_f1lLBhntCVFI&code_challenge_method=S256&nonce=2kR9mJbP0eUxFBAQI9Nhs6LyS4l&redirect_uri=urn%3Aietf%3Awg%3Aoauth%3A2.0%3Aoob&response_type=code&scope=openid+email&state=2kR9mIqOn6IgmAw46BxVrnEEi0M\nEnter verification code: wq3g58qhw6y25wwibcz2kgzfx\n\nSuccessfully verified SCT...\ntlog entry created with index: 120018072\nPushing signature to: quay.io/$QUAY_USER/test-image\n
    "},{"location":"developer-guide/image-build/#containerfiles","title":"Containerfiles","text":"

    There are multiple Containerfiles in the bpfman repositories. Below is a summary of the files and their purpose.

    "},{"location":"developer-guide/image-build/#userspace-containerfiles","title":"Userspace Containerfiles","text":"
    • bpfman/Containerfile.bpfman.local: This file is used to create a userspace container image with bpfman binaries (bpfman CLI, bpfman-rpc and bpfman-ns). It can be used to run local bpfman code in a Kubernetes cluster with the bpfman-operator and bpfman-agent.
    • bpfman/Containerfile.bpfman.multi.arch: This file is used to create a userspace container image with bpfman binaries (bpfman CLI, bpfman-rpc and bpfman-ns), but for multiple architectures. It is used by the bpfman/.github/workflows/image-build.yaml file to build bpfman multi-arch images on every github Pull Request merge. The resulting images are stored in quay.io.
    • bpfman/Containerfile.bpfman.openshift: This file is used to create a userspace container image with bpfman binaries (bpfman CLI, bpfman-rpc and bpfman-ns). It is used by internal OpenShift build processes.
    • bpfman/examples/go-*-counter/container-deployment/Containerfile.go-*-counter: Where '*' is one of the bpfman supported program types (tc, tcx, tracepoint, etc.). These files are used to create the userspace container images associated with the examples.
    • bpfman-operator/Containerfile.bpfman-agent: This file is used to create a userspace container image with bpfman-agent.
    • bpfman-operator/Containerfile.bpfman-agent.openshift: This file is used to create a userspace container image with bpfman-agent. It is used by internal OpenShift build processes.
    • bpfman-operator/Containerfile.bpfman-operator: This file is used to create a userspace container image with bpfman-operator.
    • bpfman-operator/Containerfile.bpfman-operator.openshift: This file is used to create a userspace container image with bpfman-operator. It is used by internal OpenShift build processes.
    • bpfman-operator/Containerfile.bundle: This file is used to create a container image with all the Kubernetes object definitions (ConfigMaps, Custom Resource Definitions (CRDs), Roles, Role Bindings, Service, Service Accounts, etc) bpfman needs to be deployed in a Kubernetes cluster.
    "},{"location":"developer-guide/image-build/#bytecode-containerfiles","title":"Bytecode Containerfiles","text":"
    • bpfman/Containerfile.bytecode: This file is used to create a container image with eBPF bytecode packaged inside. The Containerfile applies labels to the container image describing the bytecode for consumers of the image. See eBPF Bytecode Image Specifications for more details.
    • bpfman/Containerfile.bytecode.multi.arch: This file is used to create a container image with eBPF bytecode packaged inside, but packages eBPF bytecode for multiple architectures. The Containerfile applies labels to the container image describing the bytecode for consumers of the image. See eBPF Bytecode Image Specifications for more details.
    "},{"location":"developer-guide/k8s-selinux-distros/","title":"Running the Examples as Non-Root on SELinux Distributions","text":"

    Developer instances of Kubernetes such as kind often set SELinux to permissive mode, ensuring the security subsystem does not interfere with the local cluster operations. However, in production distributions such as Openshift, EKS, GKE and AWS where security is paramount, SELinux and other security subsystems are often enabled by default. This among other things presents unique challenges when determining how to deploy unprivileged applications with bpfman.

    In order to deploy the provided examples on SELinux distributions, users must first install the security-profiles-operator. This will allow bpfman to deploy custom SELinux policies which will allow container users access to bpf maps (i.e map_read and map_write actions).

    It can easily be installed via operatorhub.io from here.

    Once the security-profiles-operator and bpfman are installed simply deploy desired examples:

    cd examples/\nmake deploy-tc-selinux\nmake deploy-xdp-selinux\n:\nmake undeploy-tc-selinux\nmake undeploy-xdp-selinux\n
    "},{"location":"developer-guide/linux-capabilities/","title":"Linux Capabilities","text":"

    Linux divides the privileges traditionally associated with superuser into distinct units, known as capabilities, which can be independently enabled and disabled. Capabilities are a per-thread attribute. See capabilities man-page.

    When bpfman is run as a systemd service, the set of linux capabilities are restricted to only the required set of capabilities via the bpfman.service file using the AmbientCapabilities and CapabilityBoundingSet fields (see bpfman.service). All spawned threads are stripped of all capabilities, removing all sudo privileges (see drop_linux_capabilities() usage), leaving only the main thread with only the needed set of capabilities.

    "},{"location":"developer-guide/linux-capabilities/#current-bpfman-linux-capabilities","title":"Current bpfman Linux Capabilities","text":"

    Below are the current set of Linux capabilities required by bpfman to operate:

    • CAP_BPF:
      • Required to load BPF programs and create BPF maps.
    • CAP_DAC_READ_SEARCH:
      • Required by Tracepoint programs, needed by aya to check the tracefs mount point. For example, trying to read \"/sys/kernel/tracing\" and \"/sys/kernel/debug/tracing\".
    • CAP_NET_ADMIN:
      • Required for TC programs to attach/detach to/from a qdisc.
    • CAP_SETPCAP:
      • Required to allow bpfman to drop Linux Capabilities on spawned threads.
    • CAP_SYS_ADMIN:
      • Kprobe (Kprobe and Uprobe) and Tracepoint programs are considered perfmon programs and require CAP_PERFMON and CAP_SYS_ADMIN to load.
      • TC and XDP programs are considered admin programs and require CAP_NET_ADMIN and CAP_SYS_ADMIN to load.
    • CAP_SYS_RESOURCE:
      • Required by bpfman to call setrlimit() on RLIMIT_MEMLOCK.
    "},{"location":"developer-guide/linux-capabilities/#debugging-linux-capabilities","title":"Debugging Linux Capabilities","text":"

    As new features are added, the set of Linux capabilities required by bpfman may change over time. The following describes the steps to determine the set of capabilities required by bpfman. If there are any Permission denied (os error 13) type errors when starting or running bpfman as a systemd service, adjusting the linux capabilities is a good place to start.

    "},{"location":"developer-guide/linux-capabilities/#determine-required-capabilities","title":"Determine Required Capabilities","text":"

    The first step is to turn all capabilities on and see if that fixes the problem. This can be done without recompiling the code by editing bpfman.service. Comment out the finite list of granted capabilities and set to ~, which indicates all capabilities.

    sudo vi /usr/lib/systemd/system/bpfman.service\n:\n[Service]\n:\nAmbientCapabilities=~\nCapabilityBoundingSet=~\n#AmbientCapabilities=CAP_BPF CAP_DAC_OVERRIDE CAP_DAC_READ_SEARCH CAP_NET_ADMIN CAP_PERFMON CAP_SETPCAP CAP_SYS_ADMIN CAP_SYS_RESOURCE\n#CapabilityBoundingSet=CAP_BPF CAP_DAC_OVERRIDE CAP_DAC_READ_SEARCH CAP_NET_ADMIN CAP_PERFMON CAP_SETPCAP CAP_SYS_ADMIN CAP_SYS_RESOURCE\n

    Reload the service file and start/restart bpfman and watch the bpfman logs and see if the problem is resolved:

    sudo systemctl daemon-reload\nsudo systemctl start bpfman\n

    If so, then the next step is to watch the set of capabilities being requested by bpfman. Run the bcc capable tool to watch capabilities being requested real-time and restart bpfman:

    $ sudo /usr/share/bcc/tools/capable\nTIME      UID    PID    COMM             CAP  NAME                 AUDIT\n:\n16:36:00  979    75553  tokio-runtime-w  8    CAP_SETPCAP          1\n16:36:00  979    75553  tokio-runtime-w  8    CAP_SETPCAP          1\n16:36:00  979    75553  tokio-runtime-w  8    CAP_SETPCAP          1\n16:36:00  0      616    systemd-journal  19   CAP_SYS_PTRACE       1\n16:36:00  0      616    systemd-journal  19   CAP_SYS_PTRACE       1\n16:36:00  979    75550  bpfman             24   CAP_SYS_RESOURCE     1\n16:36:00  979    75550  bpfman             1    CAP_DAC_OVERRIDE     1\n16:36:00  979    75550  bpfman             21   CAP_SYS_ADMIN        1\n16:36:00  979    75550  bpfman             21   CAP_SYS_ADMIN        1\n16:36:00  0      75555  modprobe         16   CAP_SYS_MODULE       1\n16:36:00  0      628    systemd-udevd    2    CAP_DAC_READ_SEARCH  1\n16:36:00  0      75556  bpf_preload      24   CAP_SYS_RESOURCE     1\n16:36:00  0      75556  bpf_preload      39   CAP_BPF              1\n16:36:00  0      75556  bpf_preload      39   CAP_BPF              1\n16:36:00  0      75556  bpf_preload      39   CAP_BPF              1\n16:36:00  0      75556  bpf_preload      38   CAP_PERFMON          1\n16:36:00  0      75556  bpf_preload      38   CAP_PERFMON          1\n16:36:00  0      75556  bpf_preload      38   CAP_PERFMON          1\n:\n

    Compare the output to list in bpfman.service and determine the delta.

    "},{"location":"developer-guide/linux-capabilities/#determine-capabilities-per-thread","title":"Determine Capabilities Per Thread","text":"

    For additional debugging, it may be helpful to know the granted capabilities on a per thread basis. As mentioned above, all spawned threads are stripped of all Linux capabilities, so if a thread is requesting a capability, that functionality should be moved off the spawned thread and onto the main thread.

    First, determine the bpfman process id, then determine the set of threads:

    $ ps -ef | grep bpfman\n:\nbpfman       75550       1  0 16:36 ?        00:00:00 /usr/sbin/bpfman\n:\n\n$ ps -T -p 75550\n    PID    SPID TTY          TIME CMD\n  75550   75550 ?        00:00:00 bpfman\n  75550   75551 ?        00:00:00 tokio-runtime-w\n  75550   75552 ?        00:00:00 tokio-runtime-w\n  75550   75553 ?        00:00:00 tokio-runtime-w\n  75550   75554 ?        00:00:00 tokio-runtime-w\n

    Then dump the capabilities of each thread:

    $ grep Cap /proc/75550/status\nCapInh: 000000c001201106\nCapPrm: 000000c001201106\nCapEff: 000000c001201106\nCapBnd: 000000c001201106\nCapAmb: 000000c001201106\n\n$ grep Cap /proc/75551/status\nCapInh: 0000000000000000\nCapPrm: 0000000000000000\nCapEff: 0000000000000000\nCapBnd: 0000000000000000\nCapAmb: 0000000000000000\n\n$ grep Cap /proc/75552/status\nCapInh: 0000000000000000\nCapPrm: 0000000000000000\nCapEff: 0000000000000000\nCapBnd: 0000000000000000\nCapAmb: 0000000000000000\n\n:\n\n$ capsh --decode=000000c001201106\n0x000000c001201106=cap_dac_override,cap_dac_read_search,cap_setpcap,cap_net_admin,cap_sys_admin,cap_sys_resource,cap_perfmon,cap_bpf\n
    "},{"location":"developer-guide/linux-capabilities/#removing-cap_bpf-from-bpfman-clients","title":"Removing CAP_BPF from bpfman Clients","text":"

    One of the advantages of using bpfman is that it is doing all the loading and unloading of eBPF programs, so it requires CAP_BPF, but clients of bpfman are just making gRPC calls to bpfman, so they do not need to be privileged or require CAP_BPF. It must be noted that this is only true for kernels 5.19 or higher. Prior to kernel 5.19, all eBPF sys calls required CAP_BPF, which are used to access maps shared between the BFP program and the userspace program. In kernel 5.19, a change went in that only requires CAP_BPF for map creation (BPF_MAP_CREATE) and loading programs (BPF_PROG_LOAD). See bpf: refine kernel.unprivileged_bpf_disabled behaviour.

    "},{"location":"developer-guide/logging/","title":"Logging","text":"

    This section describes how to enable logging in different bpfman deployments.

    "},{"location":"developer-guide/logging/#local-privileged-bpfman-process","title":"Local Privileged Bpfman Process","text":"

    bpfman uses the env_logger crate to log messages to the terminal. By default, only error messages are logged, but that can be overwritten by setting the RUST_LOG environment variable. Valid values:

    • error
    • warn
    • info
    • debug
    • trace

    Example:

    $ sudo RUST_LOG=info /usr/local/bin/bpfman\n[2022-08-08T20:29:31Z INFO  bpfman::server] Loading static programs from /etc/bpfman/programs.d\n[2022-08-08T20:29:31Z INFO  bpfman::server::bpf] Map veth12fa8e3 to 13\n[2022-08-08T20:29:31Z INFO  bpfman::server] Listening on [::1]:50051\n[2022-08-08T20:29:31Z INFO  bpfman::server::bpf] Program added: 1 programs attached to veth12fa8e3\n[2022-08-08T20:29:31Z INFO  bpfman::server] Loaded static program pass with UUID d9fd88df-d039-4e64-9f63-19f3e08915ce\n
    "},{"location":"developer-guide/logging/#systemd-service","title":"Systemd Service","text":"

    If bpfman is running as a systemd service, then bpfman will log to journald. As with env_logger, by default, info and higher messages are logged, but that can be overwritten by setting the RUST_LOG environment variable.

    Example:

    sudo vi /usr/lib/systemd/system/bpfman.service\n[Unit]\nDescription=Run bpfman as a service\nDefaultDependencies=no\nAfter=network.target\n\n[Service]\nEnvironment=\"RUST_LOG=Info\"    <==== Set Log Level Here\nExecStart=/usr/sbin/bpfman system service\nAmbientCapabilities=CAP_BPF CAP_DAC_READ_SEARCH CAP_NET_ADMIN CAP_PERFMON CAP_SYS_ADMIN CAP_SYS_RESOURCE\nCapabilityBoundingSet=CAP_BPF CAP_DAC_READ_SEARCH CAP_NET_ADMIN CAP_PERFMON CAP_SYS_ADMIN CAP_SYS_RESOURCE\n

    Start the service:

    sudo systemctl daemon-reload\nsudo systemctl start bpfman.service\n

    Check the logs:

    $ sudo journalctl -f -u bpfman\nAug 08 16:25:04 ebpf03 systemd[1]: Started bpfman.service - Run bpfman as a service.\nAug 08 16:25:04 ebpf03 bpfman[180118]: Loading static programs from /etc/bpfman/programs.d\nAug 08 16:25:04 ebpf03 bpfman[180118]: Map veth12fa8e3 to 13\nAug 08 16:25:04 ebpf03 bpfman[180118]: Listening on [::1]:50051\nAug 08 16:25:04 ebpf03 bpfman[180118]: Program added: 1 programs attached to veth12fa8e3\nAug 08 16:25:04 ebpf03 bpfman[180118]: Loaded static program pass with UUID a3ffa14a-786d-48ad-b0cd-a4802f0f10b6\n

    Stop the service:

    sudo systemctl stop bpfman.service\n
    "},{"location":"developer-guide/logging/#kubernetes-deployment","title":"Kubernetes Deployment","text":"

    When bpfman is run in a Kubernetes deployment, there is the bpfman Daemonset that runs on every node and the bpd Operator that runs on the control plane:

    kubectl get pods -A\nNAMESPACE            NAME                                                    READY   STATUS    RESTARTS   AGE\nbpfman                 bpfman-daemon-dgqzw                                       2/2     Running   0          3d22h\nbpfman                 bpfman-daemon-gqsgd                                       2/2     Running   0          3d22h\nbpfman                 bpfman-daemon-zx9xr                                       2/2     Running   0          3d22h\nbpfman                 bpfman-operator-7fbf4888c4-z8w76                          2/2     Running   0          3d22h\n:\n
    "},{"location":"developer-guide/logging/#bpfman-daemonset","title":"bpfman Daemonset","text":"

    bpfman and bpfman-agent are running in the bpfman daemonset.

    "},{"location":"developer-guide/logging/#view-logs","title":"View Logs","text":"

    To view the bpfman logs:

    kubectl logs -n bpfman bpfman-daemon-dgqzw -c bpfman\n[2023-05-05T14:41:26Z INFO  bpfman] Has CAP_BPF: false\n[2023-05-05T14:41:26Z INFO  bpfman] Has CAP_SYS_ADMIN: true\n:\n

    To view the bpfman-agent logs:

    kubectl logs -n bpfman bpfman-daemon-dgqzw -c bpfman-agent\n{\"level\":\"info\",\"ts\":\"2023-12-20T20:15:34Z\",\"logger\":\"controller-runtime.metrics\",\"msg\":\"Metrics server is starting to listen\",\"addr\":\":8174\"}\n{\"level\":\"info\",\"ts\":\"2023-12-20T20:15:34Z\",\"logger\":\"setup\",\"msg\":\"Waiting for active connection to bpfman\"}\n{\"level\":\"info\",\"ts\":\"2023-12-20T20:15:34Z\",\"logger\":\"setup\",\"msg\":\"starting Bpfman-Agent\"}\n:\n
    "},{"location":"developer-guide/logging/#change-log-level","title":"Change Log Level","text":"

    To change the log level of the agent or daemon, edit the bpfman-config ConfigMap. The bpfman-operator will detect the change and restart the bpfman daemonset with the updated values.

    kubectl edit configmaps -n bpfman bpfman-config\napiVersion: v1\ndata:\n  bpfman.agent.image: quay.io/bpfman/bpfman-agent:latest\n  bpfman.image: quay.io/bpfman/bpfman:latest\n  bpfman.log.level: info                     <==== Set bpfman Log Level Here\n  bpfman.agent.log.level: info               <==== Set bpfman agent Log Level Here\nkind: ConfigMap\nmetadata:\n  creationTimestamp: \"2023-05-05T14:41:19Z\"\n  name: bpfman-config\n  namespace: bpfman\n  resourceVersion: \"700803\"\n  uid: 0cc04af4-032c-4712-b824-748b321d319b\n

    Valid values for the daemon (bpfman.log.level) are:

    • error
    • warn
    • info
    • debug
    • trace

    trace can be very verbose. More information can be found regarding Rust's env_logger here.

    Valid values for the agent (bpfman.agent.log.level) are:

    • info
    • debug
    • trace
    "},{"location":"developer-guide/logging/#bpfman-operator","title":"bpfman Operator","text":"

    The bpfman Operator is running as a Deployment with a ReplicaSet of one. It runs with the containers bpfman-operator and kube-rbac-proxy.

    "},{"location":"developer-guide/logging/#view-logs_1","title":"View Logs","text":"

    To view the bpfman-operator logs:

    kubectl logs -n bpfman bpfman-operator-7fbf4888c4-z8w76 -c bpfman-operator\n{\"level\":\"info\",\"ts\":\"2023-05-09T18:37:11Z\",\"logger\":\"controller-runtime.metrics\",\"msg\":\"Metrics server is starting to listen\",\"addr\":\"127.0.0.1:8174\"}\n{\"level\":\"info\",\"ts\":\"2023-05-09T18:37:11Z\",\"logger\":\"setup\",\"msg\":\"starting manager\"}\n{\"level\":\"info\",\"ts\":\"2023-05-09T18:37:11Z\",\"msg\":\"Starting server\",\"kind\":\"health probe\",\"addr\":\"[::]:8175\"}\n{\"level\":\"info\",\"ts\":\"2023-05-09T18:37:11Z\",\"msg\":\"Starting server\",\"path\":\"/metrics\",\"kind\":\"metrics\",\"addr\":\"127.0.0.1:8174\"}\nI0509 18:37:11.262885       1 leaderelection.go:248] attempting to acquire leader lease bpfman/8730d955.bpfman.io...\nI0509 18:37:11.268918       1 leaderelection.go:258] successfully acquired lease bpfman/8730d955.bpfman.io\n{\"level\":\"info\",\"ts\":\"2023-05-09T18:37:11Z\",\"msg\":\"Starting EventSource\",\"controller\":\"configmap\",\"controllerGroup\":\"\",\"controllerKind\":\"ConfigMap\",\"source\":\"kind source: *v1.ConfigMap\"}\n:\n

    To view the kube-rbac-proxy logs:

    kubectl logs -n bpfman bpfman-operator-7fbf4888c4-z8w76 -c kube-rbac-proxy\nI0509 18:37:11.063386       1 main.go:186] Valid token audiences: \nI0509 18:37:11.063485       1 main.go:316] Generating self signed cert as no cert is provided\nI0509 18:37:11.955256       1 main.go:366] Starting TCP socket on 0.0.0.0:8443\nI0509 18:37:11.955849       1 main.go:373] Listening securely on 0.0.0.0:8443\n
    "},{"location":"developer-guide/logging/#change-log-level_1","title":"Change Log Level","text":"

    To change the log level, edit the bpfman-operator Deployment. The change will get detected and the bpfman operator pod will get restarted with the updated log level.

    kubectl edit deployment -n bpfman bpfman-operator\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  annotations:\n    deployment.kubernetes.io/revision: \"1\"\n    kubectl.kubernetes.io/last-applied-configuration: |\n      {\"apiVersion\":\"apps/v1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"app.kubernetes.io/component\":\"manager\",\"app.kubernetes.io/create>\n  creationTimestamp: \"2023-05-09T18:37:08Z\"\n  generation: 1\n:\nspec:\n:\n  template:\n    metadata:\n:\n    spec:\n      containers:\n      - args:\n:\n      - args:\n        - --health-probe-bind-address=:8175\n        - --metrics-bind-address=127.0.0.1:8174\n        - --leader-elect\n        command:\n        - /bpfman-operator\n        env:\n        - name: GO_LOG\n          value: info                   <==== Set Log Level Here\n        image: quay.io/bpfman/bpfman-operator:latest\n        imagePullPolicy: IfNotPresent\n:\n

    Valid values are:

    • error
    • info
    • debug
    • trace
    "},{"location":"developer-guide/observability/","title":"Observability","text":""},{"location":"developer-guide/observability/#ebpf-metrics-exporter","title":"eBPF Metrics Exporter","text":"

    The eBPF Metrics Exporter (bpf-metrics-exporter) exports metrics from the kernel's BPF subsystem to OpenTelmetry.

    Note

    An initial set of metrics have been added as a proof of concept. The metrics can be enriched with other metrics from the system as use cases are identified. For example, a possible improvement could be to correlate process IDs -> containers -> k8s pods.

    "},{"location":"developer-guide/observability/#metrics","title":"Metrics","text":"

    The following metrics are currently exported, this list will continue to expand:

    "},{"location":"developer-guide/observability/#gauges","title":"Gauges","text":"
    • bpf_program_info: Information on each loaded BPF Program
      • Labels:
        • id: The ID of the BPF program
        • name: The name of the BPF program
        • type: The type of the BPF program as a readable string
        • tag: The tag of the BPF program
        • gpl_compatible: Whether the BPF program is GPL compatible
        • map_ids: List of associated maps, if any
        • load_time: The time the BPF program was loaded
    • bpf_map_info: Information of each loaded BPF Map
      • Labels:
        • id: The ID of the BPF map
        • name: The name of the BPF map
        • type: The type of the BPF map as an u32 which corresponds to the following kernel enumeration
        • key_size: The key size in bytes for the BPF map
        • value_size: The value size for the BPF map
        • max_entries: The maximum number of entries for the BPF map.
        • flags: Loadtime specific flags for the BPF map
    • bpf_link_info: Information on each of the loaded BPF Link
      • Labels:
        • id: The ID of the bpf Link
        • prog_id: The Program ID of the BPF program which is using the Link.
        • type: The BPF Link type as a u32 which corresponds to the following kernel enumeration
    • bpf_program_load_time: The standard UTC time the program was loaded in seconds
      • Labels:
        • id: The ID of the BPF program
        • name: The name of the BPF program
        • type: The type of the BPF program as a readable string
    "},{"location":"developer-guide/observability/#counters","title":"Counters","text":"
    • bpf_program_size_jitted_bytes: The size in bytes of the program's JIT-compiled machine code.
      • Labels:
        • id: The ID of the BPF program
        • name: The name of the BPF program
        • type: The type of the BPF program as a readable string
    • bpf_program_size_translated_bytes: The size of the BPF program in bytes.
      • Labels:
        • id: The ID of the BPF program
        • name: The name of the BPF program
        • type: The type of the BPF program as a readable string
    • bpf_program_mem_bytes: The amount of memory used by the BPF program in bytes.
      • Labels:
        • id: The ID of the BPF program
        • name: The name of the BPF program
        • type: The type of the BPF program as a readable string
    • bpf_program_verified_instructions: The number of instructions in the BPF program.
      • Labels:
        • id: The ID of the BPF program
        • name: The name of the BPF program
        • type: The type of the BPF program as a readable string
    • bpf_map_key_size: The size of the BPF map key
      • Labels:
        • id: The ID of the BPF map
        • name: The name of the BPF map
        • type: The type of the BPF map as an u32 which corresponds to the following kernel enumeration
    • bpf_map_value_size: The size of the BPF map value
      • Labels:
        • id: The ID of the BPF map
        • name: The name of the BPF map
        • type: The type of the BPF map as an u32 which corresponds to the following kernel enumeration
    • bpf_map_max_entries: The maximum number of entries allowed for the BPF map
      • Labels:
        • id: The ID of the BPF map
        • name: The name of the BPF map
        • type: The type of the BPF map as an u32 which corresponds to the following kernel enumeration

    Note

    All counters will need to have the suffix _total appended when exposed as a sample metric (For an example, search for _total in bpf-metrics-exporter/metrics-stack.yaml).

    "},{"location":"developer-guide/observability/#try-it-out","title":"Try it Out","text":"

    Grafana Stack:

    You'll need a Grafana stack set up. You can quickly deploy one using:

    podman play kube metrics-stack.yaml\n

    Installation:

    bpf-metrics-exporter can be installed using the installation script:

    cd bpfman/\nsudo ./scripts/setup.sh install\n

    Run:

    Then, you can deploy the exporter:

    sudo bpf-metrics-exporter\n

    Verify:

    You can log into grafana at http://localhost:3000/ using the default user:password admin:admin.

    From there simply select the default dashboard titled eBPF Subsystem Metrics:

    Cleanup:

    In order to clean everything up simply exit the bpf-metrics-exporter process with <CTRL>C and run:

    podman kube down metrics-stack.yaml\n
    "},{"location":"developer-guide/observability/#ebpf-log-exporter","title":"eBPF Log Exporter","text":"

    The eBPF Log Exporter (bpf-log-exporter) is a utility tool that registers with the kernel auditing service to receive audit events. The eBPF Log Exporter filters out eBPF related events. Currently, these events are then printed to the terminal. Long term, these events will be forwarded as logs to OpenTelemetry.

    Note

    eBPF Log Exporter is a work in progress. Currently, audit events are just printed to a terminal, but the long term plan is for these events to be forwarded as logs to OpenTelemetry similar to how bpf-metric-exporter is implemented.

    Prerequisites:

    • Auditing must be enabled in the kernel.

    Installation:

    bpf-log-exporter can be installed using the installation script:

    cd bpfman/\nsudo ./scripts/setup.sh install\n

    Run:

    bpf-log-exporter needs root privileges to run. To see the logs, run with at least info level logs enabled.

    $ sudo RUST_LOG=info bpf-log-exporter\n[INFO  bpf_log_exporter] AUDIT_BPF: LogMessage { timestamp: \"1727301213.084\", prog_id: 326, op: \"LOAD\", syscall_op: 0, pid: 0, uid: 0, gid: 0, comm: \"\", cmdline: \"\" }\n[INFO  bpf_log_exporter] AUDIT_BPF: LogMessage { timestamp: \"1727301213.095\", prog_id: 327, op: \"LOAD\", syscall_op: 0, pid: 0, uid: 0, gid: 0, comm: \"\", cmdline: \"\" }\n[INFO  bpf_log_exporter] AUDIT_BPF: LogMessage { timestamp: \"1727301213.109\", prog_id: 326, op: \"UNLOAD\", syscall_op: 0, pid: 0, uid: 0, gid: 0, comm: \"\", cmdline: \"\" }\n[INFO  bpf_log_exporter] AUDIT_BPF: LogMessage { timestamp: \"1727301213.109\", prog_id: 327, op: \"UNLOAD\", syscall_op: 0, pid: 0, uid: 0, gid: 0, comm: \"\", cmdline: \"\" }\n[INFO  bpf_log_exporter] AUDIT_BPF: LogMessage { timestamp: \"1727301228.487\", prog_id: 328, op: \"LOAD\", syscall_op: 0, pid: 0, uid: 0, gid: 0, comm: \"\", cmdline: \"\" }\n[INFO  bpf_log_exporter] AUDIT_BPF: LogMessage { timestamp: \"1727301228.488\", prog_id: 328, op: \"UNLOAD\", syscall_op: 0, pid: 0, uid: 0, gid: 0, comm: \"\", cmdline: \"\" }\n[INFO  bpf_log_exporter] AUDIT_BPF: LogMessage { timestamp: \"1727301228.488\", prog_id: 329, op: \"LOAD\", syscall_op: 0, pid: 0, uid: 0, gid: 0, comm: \"\", cmdline: \"\" }\n[INFO  bpf_log_exporter] AUDIT_BPF: LogMessage { timestamp: \"1727301228.488\", prog_id: 329, op: \"UNLOAD\", syscall_op: 0, pid: 0, uid: 0, gid: 0, comm: \"\", cmdline: \"\" }\n:\n

    Then use <CTRL>C to stop.

    "},{"location":"developer-guide/operator-quick-start/","title":"Deploying the bpfman-operator","text":"

    The bpfman-operator repository exists in order to deploy and manage bpfman within a Kubernetes cluster. This operator was built utilizing some great tooling provided by the operator-sdk library. A great first step in understanding some of the functionality can be to just run make help.

    "},{"location":"developer-guide/operator-quick-start/#deploy-bpfman-operation","title":"Deploy bpfman Operation","text":"

    The bpfman-operator is running as a Deployment with a ReplicaSet of one. It runs on the control plane and is composed of the containers bpfman-operator and kube-rbac-proxy. The operator is responsible for launching the bpfman Daemonset, which runs on every node. The bpfman Daemonset is composed of the containers bpfman, bpfman-agent, and node-driver-registrar.

    "},{"location":"developer-guide/operator-quick-start/#deploy-locally-via-kind","title":"Deploy Locally via KIND","text":"

    After reviewing the possible make targets it's quick and easy to get bpfman deployed locally on your system via a KIND cluster with:

    cd bpfman/bpfman-operator\nmake run-on-kind\n

    Note

    By default, bpfman-operator deploys bpfman with CSI enabled. CSI requires Kubernetes v1.26 due to a PR (kubernetes/kubernetes#112597) that addresses a gRPC Protocol Error that was seen in the CSI client code and it doesn't appear to have been backported. It is recommended to install kind v0.20.0 or later.

    "},{"location":"developer-guide/operator-quick-start/#deploy-to-openshift-cluster","title":"Deploy To Openshift Cluster","text":"

    First deploy the operator with one of the following two options:

    "},{"location":"developer-guide/operator-quick-start/#1-manually-with-kustomize","title":"1. Manually with Kustomize","text":"

    To install manually with Kustomize and raw manifests simply run the following commands. The Openshift cluster needs to be up and running and specified in ~/.kube/config file.

    cd bpfman/bpfman-operator\nmake deploy-openshift\n

    Which can then be cleaned up at a later time with:

    make undeploy-openshift\n
    "},{"location":"developer-guide/operator-quick-start/#2-via-the-olm-bundle","title":"2. Via the OLM bundle","text":"

    The other option for installing the bpfman-operator is to install it using OLM bundle.

    First setup the namespace and certificates for the operator with:

    cd bpfman/bpfman-operator\noc apply -f ./hack/ocp-scc-hacks.yaml\n

    Then use operator-sdk to install the bundle like so:

    operator-sdk run bundle quay.io/bpfman/bpfman-operator-bundle:latest --namespace openshift-bpfman\n

    Which can then be cleaned up at a later time with:

    operator-sdk cleanup bpfman-operator\n

    followed by

    oc delete -f ./hack/ocp-scc-hacks.yaml\n
    "},{"location":"developer-guide/operator-quick-start/#verify-the-installation","title":"Verify the Installation","text":"

    Independent of the method used to deploy, if the bpfman-operator came up successfully you will see the bpfman-daemon and bpfman-operator pods running without errors:

    kubectl get pods -n bpfman\nNAME                             READY   STATUS    RESTARTS   AGE\nbpfman-daemon-w24pr                3/3     Running   0          130m\nbpfman-operator-78cf9c44c6-rv7f2   2/2     Running   0          132m\n
    "},{"location":"developer-guide/operator-quick-start/#deploy-an-ebpf-program-to-the-cluster","title":"Deploy an eBPF Program to the cluster","text":"

    To test the deployment simply deploy one of the sample xdpPrograms:

    cd bpfman/bpfman-operator/\nkubectl apply -f config/samples/bpfman.io_v1alpha1_xdp_pass_xdpprogram.yaml\n

    If loading of the XDP Program to the selected nodes was successful it will be reported back to the user via the xdpProgram's status field:

    kubectl get xdpprogram xdp-pass-all-nodes -o yaml\napiVersion: bpfman.io/v1alpha1\nkind: XdpProgram\nmetadata:\n  annotations:\n    kubectl.kubernetes.io/last-applied-configuration: |\n      {\"apiVersion\":\"bpfman.io/v1alpha1\",\"kind\":\"XdpProgram\",\"metadata\":{\"annotations\":{},\"labels\":{\"app.kubernetes.io/name\":\"xdpprogram\"},\"name\":\"xdp-pass-all-nodes\"},\"spec\":{\"bpffunctionname\":\"pass\",\"bytecode\":{\"image\":{\"url\":\"quay.io/bpfman-bytecode/xdp_pass:latest\"}},\"globaldata\":{\"GLOBAL_u32\":[13,12,11,10],\"GLOBAL_u8\":[1]},\"interfaceselector\":{\"primarynodeinterface\":true},\"nodeselector\":{},\"priority\":0}}\n  creationTimestamp: \"2023-11-07T19:16:39Z\"\n  finalizers:\n  - bpfman.io.operator/finalizer\n  generation: 2\n  labels:\n    app.kubernetes.io/name: xdpprogram\n  name: xdp-pass-all-nodes\n  resourceVersion: \"157187\"\n  uid: 21c71a61-4e73-44eb-9b49-07af2866d25b\nspec:\n  bpffunctionname: pass\n  bytecode:\n    image:\n      imagepullpolicy: IfNotPresent\n      url: quay.io/bpfman-bytecode/xdp_pass:latest\n  globaldata:\n    GLOBAL_u8: AQ==\n    GLOBAL_u32: DQwLCg==\n  interfaceselector:\n    primarynodeinterface: true\n  mapownerselector: {}\n  nodeselector: {}\n  priority: 0\n  proceedon:\n  - pass\n  - dispatcher_return\nstatus:\n  conditions:\n  - lastTransitionTime: \"2023-11-07T19:16:42Z\"\n    message: bpfProgramReconciliation Succeeded on all nodes\n    reason: ReconcileSuccess\n    status: \"True\"\n    type: ReconcileSuccess\n

    To see information in listing form simply run:

    kubectl get xdpprogram -o wide\nNAME                 BPFFUNCTIONNAME   NODESELECTOR   PRIORITY   INTERFACESELECTOR               PROCEEDON\nxdp-pass-all-nodes   pass              {}             0          {\"primarynodeinterface\":true}   [\"pass\",\"dispatcher_return\"]\n
    "},{"location":"developer-guide/operator-quick-start/#api-types-overview","title":"API Types Overview","text":"

    See api-spec.md for a more detailed description of all the bpfman Kubernetes API types.

    "},{"location":"developer-guide/operator-quick-start/#multiple-program-crds","title":"Multiple Program CRDs","text":"

    The multiple *Program CRDs are the bpfman Kubernetes API objects most relevant to users and can be used to understand clusterwide state for an eBPF program. It's designed to express how, and where eBPF programs are to be deployed within a Kubernetes cluster. Currently bpfman supports:

    • fentryProgram
    • fexitProgram
    • kprobeProgram
    • tcProgram
    • tracepointProgram
    • uprobeProgram
    • xdpProgram
    "},{"location":"developer-guide/operator-quick-start/#bpfprogram-crd","title":"BpfProgram CRD","text":"

    The BpfProgram CRD is used internally by the bpfman-deployment to keep track of per node bpfman state such as map pin points, and to report node specific errors back to the user. Kubernetes users/controllers are only allowed to view these objects, NOT create or edit them.

    Applications wishing to use bpfman to deploy/manage their eBPF programs in Kubernetes will make use of this object to find references to the bpfMap pin points (spec.maps) in order to configure their eBPF programs.

    "},{"location":"developer-guide/release/","title":"Release Process","text":"

    This document describes the process for making a release for the bpfman project.

    "},{"location":"developer-guide/release/#overview","title":"Overview","text":"

    The bpfman project includes both the bpfman and bpfman-operator repositories. When a release is made for the project, a release is created for each repository with the same version number.

    Each bpfman project release is comprised of the following major components:

    • bpfman (Core library) and bpfman-api (Core GRPC API protobuf definitions) library crates
    • bpfman (CLI), and bpfman-rpc (gRPC server) binary crates
    • bpf-metrics-exporter and bpf-log-exporter binary crates
    • bpfman RPMs stored in the bpfman COPR repository.
    • Kubernetes User Facing Custom Resource Definitions (CRDs)
      • BpfApplication
      • FentryProgram
      • FexitProgram
      • KprobeProgram
      • TcProgram
      • TcxProgram
      • TracepointProgram
      • UprobeProgram
      • XdpProgram
    • Corresponding go pkgs in the form of github.com/bpfman/bpfman which includes the following:
      • github.com/bpfman/bpfman/clients/gobpfman/v1: The go client for the bpfman GRPC API API helpers.
    • Corresponding go pkgs in the form of github.com/bpfman/bpfman-operator which includes the following:
      • github.com/bpfman/bpfman-operator/apis: The go bindings for the bpfman CRD API
      • github.com/bpfman/bpfman-operator/pkg/client: The autogenerated clientset for the bpfman CRD API
      • github.com/bpfman/bpfman-operator/pkg/helpers: The provided bpfman CRD API helpers.
    • The following core component container images with tag <RELEASE_VERSION>:
      • quay.io/bpfman/bpfman-agent
      • quay.io/bpfman/bpfman-operator-bundle
      • quay.io/bpfman/bpfman-operator
      • quay.io/bpfman/bpfman
      • quay.io/bpfman/tc-dispatcher
      • quay.io/bpfman/xdp-dispatcher
    • The relevant example bytecode container images with tag <RELEASE_VERSION> from source code located in the bpfman project:
      • quay.io/bpfman-bytecode/fentry
      • quay.io/bpfman-bytecode/fexit
      • quay.io/bpfman-bytecode/go-app-counter
      • quay.io/bpfman-bytecode/go-kprobe-counter
      • quay.io/bpfman-bytecode/go-tc-counter
      • quay.io/bpfman-bytecode/go-tracepoint-counter
      • quay.io/bpfman-bytecode/go-uprobe-counter
      • quay.io/bpfman-bytecode/go-xdp-counter
      • quay.io/bpfman-bytecode/kprobe
      • quay.io/bpfman-bytecode/kretprobe
      • quay.io/bpfman-bytecode/tc-pass
      • quay.io/bpfman-bytecode/tcx-test
      • quay.io/bpfman-bytecode/tracepoint
      • quay.io/bpfman-bytecode/uprobe
      • quay.io/bpfman-bytecode/uretprobe
      • quay.io/bpfman-bytecode/xdp-pass-private
      • quay.io/bpfman-bytecode/xdp-pass
    • The relevant example userspace container images with tag <RELEASE_VERSION> from source code located in the bpfman project:
      • quay.io/bpfman-userspace/go-app-counter
      • quay.io/bpfman-userspace/go-kprobe-counter
      • quay.io/bpfman-userspace/go-target
      • quay.io/bpfman-userspace/go-tc-counter
      • quay.io/bpfman-userspace/go-tcx-counter
      • quay.io/bpfman-userspace/go-tracepoint-counter
      • quay.io/bpfman-userspace/go-uprobe-counter
      • quay.io/bpfman-userspace/go-xdp-counter
    • The OLM (Operator Lifecycle Manager) for the Kubernetes Operator.
      • This includes a bundle directory on disk as well as the quay.io/bpfman/bpfman-operator-bundle image with the tag <RELEASE_VERSION>.
    "},{"location":"developer-guide/release/#versioning-strategy","title":"Versioning strategy","text":""},{"location":"developer-guide/release/#release-version-number","title":"Release Version Number","text":"

    bpfman uses the MAJOR.MINOR.PATCH scheme defined by SemVer for version numbers in which the components are defined as follows:

    • MAJOR: Incremented for incompatible API changes.
    • MINOR: Incremented for adding functionality in a backward-compatible manner.
    • PATCH: Incremented for backward-compatible bug fixes.

    Major version zero (0.y.z) is for initial development. If the MAJOR version is 0, anything MAY change at any time, and the public API SHOULD NOT be considered stable.

    Releases are tagged in git with the version number prefixed by \"v\". For example, release version 0.5.2 is tagged as v0.5.2.

    "},{"location":"developer-guide/release/#kubernetes-api-versions-eg-v1alpha2-v1beta1","title":"Kubernetes API Versions (e.g. v1alpha2, v1beta1)","text":"

    Within the bpfman-operator, API versions are primarily used to indicate the stability of a resource. For example, if a resource has not yet graduated to beta, it is still possible that it could either be removed from the API or changed in backward incompatible ways. For more information on API versions, refer to the Kubernetes API versioning documentation.

    "},{"location":"developer-guide/release/#releasing-a-new-version","title":"Releasing a new version","text":""},{"location":"developer-guide/release/#release-process-overview","title":"Release Process Overview","text":"

    Since bpfman and bpfman-operator are maintained in separate repositories, each requires an independent release. However, to ensure version consistency, we plan to synchronize the release versions of both projects. Therefore, whenever a release is needed for either bpfman or bpfman-operator, both repositories will be released with the same version number.

    As bpfman-operator depends on bpfman, it is essential to release bpfman first, followed by bpfman-operator.

    Whenever possible, releases are made on the main branch of each repository and should follow the Standard Release from Main Branch process. However, it is sometimes necessary to \"patch\" a previous release with some but not all of the changes that exist on the main branch. In those cases, a patch branch is created from the tag of the release being patched and the release is done on that branch as described in the Patch Branch Release section. Finally, if it is necessary to test the release automation, the simplified process described in the Release Candidate Release section can be used.

    "},{"location":"developer-guide/release/#generating-release-notes","title":"Generating Release Notes","text":"

    The release notes are contained in CHANGELOG files stored in the changelogs directory of each repository. The change log name must contain the release version (e.g., CHANGELOG-v0.5.2.md).

    To simplify the generation of the release notes details, we are using the GitHub release page as described below. Note that we only use the release page to generate a starting point for the release notes, and don't actually create a tag or do a release from it.

    1. Go to the bpfman releases page.
    2. Push the \"Draft a new release\" button.
    3. Enter the new release number in the \"Choose a tag\" pull-down.
    4. Choose the most recent release in the \"Previous tag\" pull-down.
    5. Push the \"Generate release notes\" button.

    The automatically generated output will likely need to be reorganized and cleaned up a bit, but it provides a good starting point.

    The format for the CHANGELOG file is as follows:

    1. Summary of the major changes and highlights. For example: \"The v0.5.2 release is a patch release that introduced...\"
    2. What's Changed (minor changes may be removed from the list generated by GitHub)
    3. Full Changelog
    4. New Contributors
    5. Known Issues

    Notes on generating the changelog

    • Empty sections should be omitted.
    • Sections 2-3 may be copied and pasted from the text generated with the GitHub releases page process described above.
    • The CHANGELOG for a given release is used by GitHub to generate the initial content for that release on the bpfman releases page. However, after the release has been generated, updates to the CHANGELOG file are not automatically reflected on the GitHub releases page, so the GitHub releases page must be manually edited using the GitHub GUI.
    • Unlike most markdown, the generated output on the GitHub releases page renders each newline in the CHANGELOG file. So each paragraph should be on a single line, or it will not flow as intended.
    "},{"location":"developer-guide/release/#standard-release-from-main-branch","title":"Standard Release from Main Branch","text":"

    This section describes the standard release process used when making a release from the main branch and may be used for major, minor, or patch releases. As mentioned above, we first complete the release for bpfman and then follow that up with a release for bpfman-operator.

    "},{"location":"developer-guide/release/#bpfman-release","title":"bpfman Release","text":"
    • Create a new branch in your bpfman fork, for example <githubuser>/release-x.y.z, and use the new branch in the upcoming steps.
    • Make the following changes
      • Add a new changelog for the release using the process described in Generating Release Notes.
      • Update the Cargo.toml version for the workspace:
        • version = \"x.y.z\"
        • bpfman = { version = \"x.y.z\", path = \"./bpfman\" }\"
        • bpfman-api = { version = \"x.y.z\", path = \"./bpfman-api\" }
        • Note: bpfman-csi does not need to be updated.
      • Run cargo generate-lockfile
      • Update the bpfman version in the bpfman/examples/Makefile:
        • VERSION ?= x.y.z
      • Add a new bpfman/examples/config/v0.x.y/ and bpfman/examples/config/v0.x.y-selinux/ config directory for the release version by copying the latest release directory and running a search for the current release and replace with the new release.
      • Add new example config directories for any new examples added since the last release.
      • Update dispatcher tags.
        • Modify the tag for XDP_DISPATCHER_IMAGE and TC_DISPATCHER_IMAGE in bpfman/src/lib.rs from latest to the new release tag.
        • Manually add the new release tag to the latest version of the following dispatcher images:
          • https://quay.io/repository/bpfman/xdp-dispatcher
          • https://quay.io/repository/bpfman/tc-dispatcher
      • Search the code and docs for the current version number without the \"v\" (e.g., 0.5.1) and replace it with the new version number where it makes sense. (Be careful, though, because not all should be replaced.)
    • Commit the changes, push them to your repo, and open a PR against the bpfman repo.
    • After the PR is reviewed, merged, and all GitHub actions have completed successfully, tag the release with the version number (e.g., v0.5.2).
      • Tag the release using the commit on main where the changelog update merged.
      • A maintainer or someone with write permission on the repo must create the tag.
      • This can be done using the git CLI or Github's release page.
    • The Release will be automatically created by GitHub actions when the tag is applied.

    After these steps are completed, the following should occur: - All GitHub actions should complete successfully. - The release appears on the GitHub Releases Page. - Images are built and updated with the new version tag at: - quay.io/bpfman - quay.io/bpfman-bytecode - quay.io/bpfman-userspace - The new version appears at crates.io - New RPMs are built and pushed to the bpfman COPR repository.

    After the release is complete do the following:

    • Run make build-release-yamls from the bpfman/examples directory, and then add the yaml files generated to the release as assets from the GitHub release page.
      • The yaml files generated include:
        • bpfman-crds-install.yaml
        • bpfman-operator-install.yaml
        • go-app-counter-install-selinux.yaml
        • go-app-counter-install.yaml
        • go-kprobe-counter-install-selinux.yaml
        • go-kprobe-counter-install.yaml
        • go-tc-counter-install-selinux.yaml
        • go-tc-counter-install.yaml
        • go-tcx-counter-install-selinux.yaml
        • go-tcx-counter-install.yaml
        • go-tracepoint-counter-install-selinux.yaml
        • go-tracepoint-counter-install.yaml
        • go-uprobe-counter-install-selinux.yaml
        • go-uprobe-counter-install.yaml
        • go-uretprobe-counter-install-selinux.yaml
        • go-uretprobe-counter-install.yaml
        • go-xdp-counter-install-selinux.yaml
        • go-xdp-counter-install.yaml
    • Do another PR that changes the tag for XDP_DISPATCHER_IMAGE and TC_DISPATCHER_IMAGE in bpfman/src/lib.rs back to latest.
    "},{"location":"developer-guide/release/#bpfman-operator-release","title":"bpfman-operator Release","text":"
    • Create a new branch in your bpfman-operator fork, for example <githubuser>/release-x.y.z, and use the new branch in the upcoming steps.
    • Make the following changes
      • Add a new changelog for the release using the process described in Generating Release Notes.
      • Update the bpfman version in go.mod
      • Run the following commands from the bpfman-operator directory:
        go mod vendor\ngo mod tidy\n
      • Update the bpfman-operator version in the Makefile:
        • VERSION ?= x.y.z
      • Run make bundle from the bpfman-operator directory to update the bundle version.
      • Update the version in the links in README.md
      • Update the version in the OpenShift Containerfiles.
    • Commit the changes, push them to your repo, and open a PR against the bpfman-operator repo.
    • After the PR is reviewed, merged, and all GitHub actions have completed successfully, tag the release with the version number (e.g., v0.5.4).
      • Tag the release using the commit on main where the changelog update merged.
      • A maintainer or someone with write permission on the repo must create the tag.
      • This can be done using the git CLI or Github's release page.
    • The Release will be automatically created by GitHub actions when the tag is applied.

    After these steps are completed, the following should occur:

    • All GitHub actions should complete successfully.
    • The release appears on the GitHub Releases Page.
    • Images are built and updated with the new version tag at:
      • quay.io/bpfman/bpfman-operator
      • quay.io/bpfman/bpfman-agent

    After the release completes:

    • Update the community-operator and community-operators-prod repositories with the latest bundle manifests.
      • Run IMAGE_TAG=vx.y.z make bundle from bpfman-operator.
      • Manually update the following tags in bundle/manifests/bpfman-operator.clusterserviceversion.yaml (TODO: automate this step).
        • Change :latest to :vx.y.z on for the example image URLs.
        • Change \"containerImage: quay.io/bpfman/bpfman-operator:latest\" to \"containerImage: quay.io/bpfman/bpfman-operator:vx.y.z\".
      • Open a PR in each of the community operator repos with the following:
        • Copy bpfman-operator/{manifests, metadata, tests} to the new release directory. Copy bpfman-operator/Containerfile.bundle.openshift to the new release directory.
        • Create a new release directory under operator/bpfman-operator/ in each repo named x.y.z
      • Lessons learned about updating the community operators:
        • These PRs usually auto-merge as soon as all checks pass, and once a bundle for a release is merged, it cannot be modified. If any errors are found in the bundle files after merging, the only solution is to create a new release and open a new PR in each community operator repository.
        • If you start a PR in the community-operator repository as a draft and later mark it as ready for review, it will still auto-merge. However, this auto-merge behavior doesn\u2019t apply in the community-operators-prod repository, where a maintainer must manually merge the PR if you start it as draft.
        • To streamline the process, it\u2019s recommended that you begin with a draft PR in the community-operator repository to allow for review. Once the PR is reviewed and all checks pass, mark it as ready for review. After it merges, submit a PR with the same bundle to the community-operators-prod repository.
    "},{"location":"developer-guide/release/#patch-branch-release","title":"Patch Branch Release","text":"

    The patch branch release process is essentially the same as that for the standard release with the following exceptions.

    Do the following for each repo:

    • If this is the first patch release for a given release, someone with write permissions on the repo (e.g., one of the maintainers) must create a branch from the git tag of the release you want to patch.
      • If patching vx.y.z, the patch branch should be named release-vx.y.z-patch.
    • Create a branch for your changes from the upstream branch.
    • Cherry pick the relevant commits.
    • Do other fixups if necessary.

    Then, follow the steps from Standard Release from Main Branch section, except open your PRs against the release branch.

    "},{"location":"developer-guide/release/#release-candidate-release","title":"Release Candidate Release","text":"

    Often times cutting a release candidate is a great way to test any changes to our release infrastructure before cutting an official release. Make sure release candidate versions contain an rc suffix (e.g., 0.4.0-rc1). This is a lighter-weight process meaning many of the versioned manifests do not necessarily need to be created.

    As in the other releases, first complete the release for bpfman and then follow that up with a release for bpfman-operator.

    "},{"location":"developer-guide/release/#bpfman-release_1","title":"bpfman Release","text":"
    • Create a new branch in your bpfman fork based on the upstream patch branch named, for example <githubuser>/release-x.y.z-rc1, and use the new branch in the upcoming steps.
    • Make the following changes
      • Add a new changelog for the release. A full set of release notes is not required. A single line that says something like \"Pre-release 1 for v0.5.2\" is sufficient.
      • Update the Cargo.toml version for the workspace:
        • version = \"x.y.z-rc1\"
        • bpfman = { version = \"x.y.z-rc1\", path = \"./bpfman\" }\"
        • bpfman-api = { version = \"x.y.z-rc1\", path = \"./bpfman-api\" }
        • Note: bpfman-csi does not need to be updated.
      • Run cargo generate-lockfile
    • Commit the changes, push them to your repo, and open a PR against the bpfman repo.
    • After the PR is reviewed, merged, and all GitHub actions have completed successfully, tag the release with the version number (e.g., vx.y.z-rc1).
      • Tag the release using the commit on main where the changelog update merged.
      • A maintainer or someone with write permission on the repo must create the tag.
      • This can be done using the git CLI or Github's release page.
    • The Release will be automatically created by GitHub actions when the tag is applied.

    After these steps are completed, the following should occur:

    • All GitHub actions should complete successfully.
    • The release appears on the GitHub Releases Page.
    • Images are built and updated with the new version tag at:
      • quay.io/bpfman
      • quay.io/bpfman-bytecode
      • quay.io/bpfman-userspace
    • The new version appears at crates.io
    • A new RPM is built and pushed to the bpfman COPR repository.
    "},{"location":"developer-guide/release/#bpfman-operator-release_1","title":"bpfman-operator Release","text":"
    • Create a new branch in your bpfman fork based on the upstream patch branch named, for example <githubuser>/release-x.y.z-rc1, and use the new branch in the upcoming steps.
    • Make the following changes
      • Add a new changelog for the release. A full set of release notes is not required. A single line that says something like \"Pre-release 1 for v0.5.2\" is sufficient.
      • Update the bpfman-operator version in the Makefile:
        • VERSION ?= x.y.z-rc1
    • Commit the changes, push them to your repo, and open a PR against the bpfman-operator repo.
    • After the PR is reviewed, merged, and all GitHub actions have completed successfully, tag the release with the version number (e.g., vx.y.z-rc1).
      • Tag the release using the commit on main where the changelog update merged.
      • A maintainer or someone with write permission on the repo must create the tag.
      • This can be done using the git CLI or Github's release page.
    • The Release will be automatically created by GitHub actions when the tag is applied.

    After these steps are completed, the following should occur:

    • All GitHub actions should complete successfully.
    • The release appears on the GitHub Releases Page.
    • Images are built and updated with the new version tag at:
      • quay.io/bpfman/bpfman-operator
      • quay.io/bpfman/bpfman-agent
    "},{"location":"developer-guide/shipping-bytecode/","title":"eBPF Bytecode Image Specifications","text":""},{"location":"developer-guide/shipping-bytecode/#introduction","title":"Introduction","text":"

    The eBPF Bytecode Image specification defines how to package eBPF bytecode as container images. The initial primary use case focuses on the containerization and deployment of eBPF programs within container orchestration systems such as Kubernetes, where it is necessary to provide a portable way to distribute bytecode to all nodes which need it.

    "},{"location":"developer-guide/shipping-bytecode/#specifications","title":"Specifications","text":"

    We provide two distinct spec variants here to ensure interoperability with existing registries and packages which do not support the new custom media types defined here.

    • custom-data-type-spec
    • backwards-compatable-spec
    "},{"location":"developer-guide/shipping-bytecode/#backwards-compatible-oci-compliant-spec","title":"Backwards compatible OCI compliant spec","text":"

    This variant makes use of existing OCI conventions to represent eBPF Bytecode as container images.

    "},{"location":"developer-guide/shipping-bytecode/#image-layers","title":"Image Layers","text":"

    The container images following this variant must contain exactly one layer who's media type is one of the following:

    • application/vnd.oci.image.layer.v1.tar+gzip or the compliant application/vnd.docker.image.rootfs.diff.tar.gzip

    Additionally the image layer must contain a valid eBPF object file (generally containing a .o extension) placed at the root of the layer ./.

    "},{"location":"developer-guide/shipping-bytecode/#image-labels","title":"Image Labels","text":"

    To provide relevant metadata regarding the bytecode to any consumers, some relevant labels MUST be defined on the image.

    These labels are dynamic and defined as follows:

    • io.ebpf.programs: A label which defines the eBPF programs stored in the bytecode image. The value of the label is a list which must contain a valid JSON object with Key's specifying the program name, and values specifying the program type i.e: \"{ \"pass\" : \"xdp\" , \"counter\" : \"tc\", ...}\".

    • io.ebpf.maps: A label which defines the eBPF maps stored in the bytecode image. The value of the label is a list which must contain a valid JSON object with Key's specifying the map name, and values specifying the map type i.e: \"{ \"xdp_stats_map\" : \"per_cpu_array\", ...}\".

    "},{"location":"developer-guide/shipping-bytecode/#building-a-backwards-compatible-oci-compliant-image","title":"Building a Backwards compatible OCI compliant image","text":"

    Bpfman does not provide wrappers around compilers like clang since many eBPF libraries (i.e aya, libbpf, cilium-ebpf) already do so, meaning users are expected to pass in the correct ebpf program bytecode for the appropriate platform. However, bpfman does provide a few image builder commands to make this whole process easier.

    Example Containerfiles for single-arch and multi-arch can be found at Containerfile.bytecode and Containerfile.bytecode.multi.arch.

    "},{"location":"developer-guide/shipping-bytecode/#host-platform-architecture-image-build","title":"Host Platform Architecture Image Build","text":"
    bpfman image build -b ./examples/go-xdp-counter/bpf_x86_bpfel.o -f Containerfile.bytecode --tag quay.io/<USER>/go-xdp-counter\n

    Where ./examples/go-xdp-counter/bpf_x86_bpfel.o is the path to the bytecode object file.

    Users can also use skopeo to ensure the image follows the backwards compatible version of the spec:

    • skopeo inspect will show the correctly configured labels stored in the configuration layer (application/vnd.oci.image.config.v1+json) of the image.
    skopeo inspect docker://quay.io/bpfman-bytecode/go-xdp-counter\n{\n    \"Name\": \"quay.io/bpfman-bytecode/go-xdp-counter\",\n    \"Digest\": \"sha256:e8377e94c56272937689af88a1a6231d4d594f83218b5cda839eaeeea70a30d3\",\n    \"RepoTags\": [\n        \"latest\"\n    ],\n    \"Created\": \"2024-05-30T09:17:15.327378016-04:00\",\n    \"DockerVersion\": \"\",\n    \"Labels\": {\n        \"io.ebpf.maps\": \"{\\\"xdp_stats_map\\\":\\\"per_cpu_array\\\"}\",\n        \"io.ebpf.programs\": \"{\\\"xdp_stats\\\":\\\"xdp\\\"}\"\n    },\n    \"Architecture\": \"amd64\",\n    \"Os\": \"linux\",\n    \"Layers\": [\n        \"sha256:c0d921d3f0d077da7cdfba8c0240fb513789e7698cdf326f80f30f388c084cff\"\n    ],\n    \"LayersData\": [\n        {\n            \"MIMEType\": \"application/vnd.docker.image.rootfs.diff.tar.gzip\",\n            \"Digest\": \"sha256:c0d921d3f0d077da7cdfba8c0240fb513789e7698cdf326f80f30f388c084cff\",\n            \"Size\": 2656,\n            \"Annotations\": null\n        }\n    ],\n    \"Env\": [\n        \"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"\n    ]\n}\n
    "},{"location":"developer-guide/shipping-bytecode/#multi-architecture-image-build","title":"Multi-Architecture Image build","text":"
    bpfman image build -t quay.io/bpfman-bytecode/go-xdp-counter-multi --container-file ./Containerfile.bytecode.multi.arch --bc-amd64-el ./examples/go-xdp-counter/bpf_arm64_bpfel.o --bc-s390x-eb ./examples/go-xdp-counter/bpf_s390_bpfeb.o\n

    To better understand the available architectures users can use podman manifest-inspect

    podman manifest inspect quay.io/bpfman-bytecode/go-xdp-counter:test-manual-build\n{\n    \"schemaVersion\": 2,\n    \"mediaType\": \"application/vnd.docker.distribution.manifest.list.v2+json\",\n    \"manifests\": [\n        {\n            \"mediaType\": \"application/vnd.oci.image.manifest.v1+json\",\n            \"size\": 478,\n            \"digest\": \"sha256:aed62d2e5867663fac66822422512a722003b40453325fd873bbb5840d78cba9\",\n            \"platform\": {\n                \"architecture\": \"amd64\",\n                \"os\": \"linux\"\n            }\n        },\n        {\n            \"mediaType\": \"application/vnd.oci.image.manifest.v1+json\",\n            \"size\": 478,\n            \"digest\": \"sha256:a348fe2f26dc0851518d8d82e1049d2c39cc2e4f37419fe9231c1967abc4828c\",\n            \"platform\": {\n                \"architecture\": \"arm64\",\n                \"os\": \"linux\"\n            }\n        },\n        {\n            \"mediaType\": \"application/vnd.oci.image.manifest.v1+json\",\n            \"size\": 478,\n            \"digest\": \"sha256:d5c5d41d2d21e0cb5fb79fe9f343e540942c9a1657cf0de96b8f63e43d369743\",\n            \"platform\": {\n                \"architecture\": \"ppc64le\",\n                \"os\": \"linux\"\n            }\n        },\n        {\n            \"mediaType\": \"application/vnd.oci.image.manifest.v1+json\",\n            \"size\": 478,\n            \"digest\": \"sha256:7915c83838d73268690381b313fb84b5509912aa351c98c78204584cced50efd\",\n            \"platform\": {\n                \"architecture\": \"s390x\",\n                \"os\": \"linux\"\n            }\n        },\n    ]\n}\n
    "},{"location":"developer-guide/shipping-bytecode/#custom-oci-compatible-spec","title":"Custom OCI compatible spec","text":"

    This variant of the eBPF bytecode image spec uses custom OCI medium types to represent eBPF bytecode as container images. Many toolchains and registries may not support this yet.

    TODO https://github.com/bpfman/bpfman/issues/1162

    "},{"location":"developer-guide/testing/","title":"Testing","text":"

    This document describes the automated testing that is done for each pull request submitted to bpfman, and also provides instructions for running them locally when doing development.

    "},{"location":"developer-guide/testing/#unit-testing","title":"Unit Testing","text":"

    Unit testing is executed as part of the build job by running the following command in the top-level bpfman directory.

    cd bpfman/\ncargo test\n
    "},{"location":"developer-guide/testing/#go-example-tests","title":"Go Example Tests","text":"

    Tests are run for each of the example programs found in directory examples

    Detailed description TBD

    "},{"location":"developer-guide/testing/#basic-integration-tests","title":"Basic Integration Tests","text":"

    The full set of basic integration tests are executed by running the following command in the top-level bpfman directory.

    cd bpfman/\ncargo xtask integration-test\n

    Optionally, a subset of the integration tests can be run by adding the \"--\" and a list of one or more names at the end of the command as shown below.

    cargo xtask integration-test -- test_load_unload_xdp test_proceed_on_xdp\n

    The integration tests start a bpfman daemon process, and issue CLI commands to verify a range of functionality. For XDP and TC programs that are installed on network interfaces, the integration test code creates a test network namespace connected to the host by a veth pair on which the programs are attached. The test code uses the IP subnet 172.37.37.1/24 for the namespace. If that address conflicts with an existing network on the host, it can be changed by setting the BPFMAN_IP_PREFIX environment variable to one that is available as shown below.

    export BPFMAN_IP_PREFIX=\"192.168.50\"\n

    If bpfman logs are needed to help debug an integration test, set RUST_LOG either globally or for a given test.

    export RUST_LOG=info\n
    OR
    RUST_LOG=info cargo xtask integration-test -- test_load_unload_xdp test_proceed_on_xdp\n

    There are two categories of integration tests: basic and e2e. The basic tests verify basic CLI functionality such as loading, listing, and unloading programs. The e2e tests verify more advanced functionality such as the setting of global variables, priority, and proceed-on by installing the programs, creating traffic if needed, and examining logs to confirm that things are running as expected.

    Most eBPF test programs are loaded from container images stored on quay.io. The source code for the eBPF test programs can be found in the tests/integration-test/bpf directory. These programs are compiled by executing cargo xtask build-ebpf --libbpf-dir <libbpf dir>

    We also load some tests from local files to test the bpfman load file option.

    "},{"location":"developer-guide/testing/#kubernetes-operator-tests","title":"Kubernetes Operator Tests","text":""},{"location":"developer-guide/testing/#kubernetes-operator-unit-tests","title":"Kubernetes Operator Unit Tests","text":"

    To run all of the unit tests defined in the bpfman-operator controller code run make test in the bpfman-operator directory.

    cd bpfman-operator/\nmake test\n
    "},{"location":"developer-guide/testing/#kubernetes-operator-integration-tests","title":"Kubernetes Operator Integration Tests","text":"

    To run the Kubernetes Operator integration tests locally:

    1. Build the example test code userspace images locally.

      cd bpfman/examples/\nmake build-us-images\n
    2. (optional) build the bytecode images

      In order to rebuild all of the bytecode images for a PR, ask a maintainer to do so, they will be built and generate by github actions with the tag quay.io/bpfman-bytecode/<example>:<branch-name>

    3. Build the bpfman images locally with a unique tag, for example: int-test

      cd bpfman-operator/\nBPFMAN_AGENT_IMG=quay.io/bpfman/bpfman-agent:int-test BPFMAN_OPERATOR_IMG=quay.io/bpfman/bpfman-operator:int-test make build-images\n
    4. Run the integration test suite with the images from the previous step:

      cd bpfman-operator/\nBPFMAN_AGENT_IMG=quay.io/bpfman/bpfman-agent:int-test BPFMAN_OPERATOR_IMG=quay.io/bpfman/bpfman-operator:int-test make test-integration\n

      If an update bpfman image is required, build it separately and pass to make test-integration using BPFMAN_IMG. See Locally Build bpfman Container Image.

      Additionally the integration test can be configured with the following environment variables:

      • KEEP_TEST_CLUSTER: If set to true the test cluster will not be torn down after the integration test suite completes.
      • USE_EXISTING_KIND_CLUSTER: If this is set to the name of the existing kind cluster the integration test suite will use that cluster instead of creating a new one.
    "},{"location":"developer-guide/xdp-overview/","title":"XDP Tutorial","text":"

    The XDP hook point is unique in that the associated eBPF program attaches to an interface and only one eBPF program is allowed to attach to the XDP hook point for a given interface. Due to this limitation, the libxdp protocol was written. The one program that is attached to the XDP hook point is an eBPF dispatcher program. The dispatcher program contains a list of 10 stub functions. When XDP programs wish to be loaded, they are loaded as extension programs which are then called in place of one of the stub functions.

    bpfman is leveraging the libxdp protocol to allow it's users to load up to 10 XDP programs on a given interface. This tutorial will show you how to use bpfman to load multiple XDP programs on an interface.

    Note

    The TC hook point is also associated with an interface. Within bpfman, TC is implemented in a similar fashion to XDP in that it uses a dispatcher with stub functions. TCX is a fairly new kernel feature that improves how the kernel handles multiple TC programs on a given interface. bpfman is on the process of integrating TCX support, which will replace the dispatcher logic for TC. Until then, assume TC behaves in a similar fashion to XDP.

    See Launching bpfman for more detailed instructions on building and loading bpfman. This tutorial assumes bpfman has been built and the bpfman CLI is in $PATH.

    "},{"location":"developer-guide/xdp-overview/#load-xdp-program","title":"Load XDP program","text":"

    We will load the simple xdp-pass program, which permits all traffic to the attached interface, eno3 in this example. We will use the priority of 100. Find a deeper dive into CLI syntax in CLI Guide.

    sudo bpfman load image --image-url quay.io/bpfman-bytecode/xdp_pass:latest --name pass \\\n  xdp --iface eno3 --priority 100\n Bpfman State\n---------------\n Name:          pass\n Image URL:     quay.io/bpfman-bytecode/xdp_pass:latest\n Pull Policy:   IfNotPresent\n Global:        None\n Metadata:      None\n Map Pin Path:  /run/bpfman/fs/maps/6213\n Map Owner ID:  None\n Map Used By:   6213\n Priority:      100\n Iface:         eno3\n Position:      0\n Proceed On:    pass, dispatcher_return\n\n Kernel State\n----------------------------------\n Program ID:                       6213\n Name:                             pass\n Type:                             xdp\n Loaded At:                        2023-07-17T17:48:10-0400\n Tag:                              4b9d1b2c140e87ce\n GPL Compatible:                   true\n Map IDs:                          [2724]\n BTF ID:                           2834\n Size Translated (bytes):          96\n JITed:                            true\n Size JITed (bytes):               67\n Kernel Allocated Memory (bytes):  4096\n Verified Instruction Count:       9\n

    bpfman load image returns the same data as a bpfman get command. From the output, the Program Id of 6213 can be found in the Kernel State section. This id can be used to perform a bpfman get to retrieve all relevant program data and a bpfman unload when the program needs to be unloaded.

    sudo bpfman list\n Program ID  Name  Type  Load Time\n 6213        pass  xdp   2023-07-17T17:48:10-0400\n

    We can recheck the details about the loaded program with the bpfman get command:

    sudo bpfman get 6213\n Bpfman State\n---------------\n Name:          pass\n Image URL:     quay.io/bpfman-bytecode/xdp_pass:latest\n Pull Policy:   IfNotPresent\n Global:        None\n Metadata:      None\n Map Pin Path:  /run/bpfman/fs/maps/6213\n Map Owner ID:  None\n Map Used By:   6213\n Priority:      100\n Iface:         eno3\n Position:      0\n Proceed On:    pass, dispatcher_return\n\n Kernel State\n----------------------------------\n Program ID:                       6213\n Name:                             pass\n Type:                             xdp\n Loaded At:                        2023-07-17T17:48:10-0400\n Tag:                              4b9d1b2c140e87ce\n GPL Compatible:                   true\n Map IDs:                          [2724]\n BTF ID:                           2834\n Size Translated (bytes):          96\n JITed:                            true\n Size JITed (bytes):               67\n Kernel Allocated Memory (bytes):  4096\n Verified Instruction Count:       9\n

    From the output above you can see the program was loaded to position 0 on our interface and thus will be executed first.

    "},{"location":"developer-guide/xdp-overview/#loading-additional-xdp-programs","title":"Loading Additional XDP Programs","text":"

    We will now load 2 more programs with different priorities to demonstrate how bpfman will ensure they are ordered correctly:

    sudo bpfman load image --image-url quay.io/bpfman-bytecode/xdp_pass:latest --name pass \\\n  xdp --iface eno3 --priority 50\n Bpfman State\n---------------\n Name:          pass\n Image URL:     quay.io/bpfman-bytecode/xdp_pass:latest\n Pull Policy:   IfNotPresent\n Global:        None\n Metadata:      None\n Map Pin Path:  /run/bpfman/fs/maps/6215\n Map Owner ID:  None\n Map Used By:   6215\n Priority:      50\n Iface:         eno3\n Position:      0\n Proceed On:    pass, dispatcher_return\n\n Kernel State\n----------------------------------\n Program ID:                       6215\n Name:                             pass\n Type:                             xdp\n:\n
    sudo bpfman load image --image-url quay.io/bpfman-bytecode/xdp_pass:latest --name pass \\\n  xdp --iface eno3 --priority 200\n Bpfman State\n---------------\n Name:          pass\n Image URL:     quay.io/bpfman-bytecode/xdp_pass:latest\n Pull Policy:   IfNotPresent\n Global:        None\n Metadata:      None\n Map Pin Path:  /run/bpfman/fs/maps/6217\n Map Owner ID:  None\n Map Used By:   6217\n Priority:      200\n Iface:         eno3\n Position:      2\n Proceed On:    pass, dispatcher_return\n\n Kernel State\n----------------------------------\n Program ID:                       6217\n Name:                             pass\n Type:                             xdp\n:\n

    Using bpfman list we can see all the programs that were loaded.

    sudo bpfman list\n Program ID  Name  Type  Load Time\n 6213        pass  xdp   2023-07-17T17:48:10-0400\n 6215        pass  xdp   2023-07-17T17:52:46-0400\n 6217        pass  xdp   2023-07-17T17:53:57-0400\n

    The lowest priority program is executed first, while the highest is executed last. As can be seen from the detailed output for each command below:

    • Program 6215 is at position 0 with a priority of 50
    • Program 6213 is at position 1 with a priority of 100
    • Program 6217 is at position 2 with a priority of 200
    sudo bpfman get 6213\n Bpfman State\n---------------\n Name:          pass\n:\n Priority:      100\n Iface:         eno3\n Position:      1\n Proceed On:    pass, dispatcher_return\n\n Kernel State\n----------------------------------\n Program ID:                       6213\n Name:                             pass\n Type:                             xdp\n:\n
    sudo bpfman get 6215\n Bpfman State\n---------------\n Name:          pass\n:\n Priority:      50\n Iface:         eno3\n Position:      0\n Proceed On:    pass, dispatcher_return\n\n Kernel State\n----------------------------------\n Program ID:                       6215\n Name:                             pass\n Type:                             xdp\n:\n
    sudo bpfman get 6217\n Bpfman State\n---------------\n Name:          pass\n:\n Priority:      200\n Iface:         eno3\n Position:      2\n Proceed On:    pass, dispatcher_return\n\n Kernel State\n----------------------------------\n Program ID:                       6217\n Name:                             pass\n Type:                             xdp\n:\n

    By default, the next program in the chain will only be executed if a given program returns pass (see proceed-on field in the bpfman get output above). If the next program in the chain should be called even if a different value is returned, then the program can be loaded with those additional return values using the proceed-on parameter (see bpfman load image xdp --help for list of valid values):

    sudo bpfman load image --image-url quay.io/bpfman-bytecode/xdp_pass:latest --name pass \\\n  xdp --iface eno3 --priority 150 --proceed-on \"pass\" --proceed-on \"dispatcher_return\"\n Bpfman State\n---------------\n Name:          pass\n Image URL:     quay.io/bpfman-bytecode/xdp_pass:latest\n Pull Policy:   IfNotPresent\n Global:        None\n Metadata:      None\n Map Pin Path:  /run/bpfman/fs/maps/6219\n Map Owner ID:  None\n Map Used By:   6219\n Priority:      150\n Iface:         eno3\n Position:      2\n Proceed On:    pass, dispatcher_return\n\n Kernel State\n----------------------------------\n Program ID:                       6219\n Name:                             pass\n Type:                             xdp\n:\n

    Which results in being loaded in position 2 because it was loaded at priority 150, which is lower than the previous program at that position with a priority of 200.

    "},{"location":"developer-guide/xdp-overview/#delete-xdp-program","title":"Delete XDP Program","text":"

    Let's remove the program at position 1.

    sudo bpfman list\n Program ID  Name  Type  Load Time\n 6213        pass  xdp   2023-07-17T17:48:10-0400\n 6215        pass  xdp   2023-07-17T17:52:46-0400\n 6217        pass  xdp   2023-07-17T17:53:57-0400\n 6219        pass  xdp   2023-07-17T17:59:41-0400\n
    sudo bpfman unload 6213\n

    And we can verify that it has been removed and the other programs re-ordered:

    sudo bpfman list\n Program ID  Name  Type  Load Time\n 6215        pass  xdp   2023-07-17T17:52:46-0400\n 6217        pass  xdp   2023-07-17T17:53:57-0400\n 6219        pass  xdp   2023-07-17T17:59:41-0400\n
    bpfman get 6215\n Bpfman State\n---------------\n Name:          pass\n Image URL:     quay.io/bpfman-bytecode/xdp_pass:latest\n Pull Policy:   IfNotPresent\n Global:        None\n Metadata:      None\n Map Pin Path:  /run/bpfman/fs/maps/6215\n Map Owner ID:  None\n Map Used By:   6215\n Priority:      50\n Iface:         eno3\n Position:      0\n Proceed On:    pass, dispatcher_return\n\n Kernel State\n----------------------------------\n Program ID:                       6215\n Name:                             pass\n Type:                             xdp\n:\n
    bpfman get 6217\n Bpfman State\n---------------\n Name:          pass\n Image URL:     quay.io/bpfman-bytecode/xdp_pass:latest\n Pull Policy:   IfNotPresent\n Global:        None\n Metadata:      None\n Map Pin Path:  /run/bpfman/fs/maps/6217\n Map Owner ID:  None\n Map Used By:   6217\n Priority:      200\n Iface:         eno3\n Position:      2\n Proceed On:    pass, dispatcher_return\n\n Kernel State\n----------------------------------\n Program ID:                       6217\n Name:                             pass\n Type:                             xdp\n:\n
    bpfman get 6219\n Bpfman State\n---------------\n Name:          pass\n Image URL:     quay.io/bpfman-bytecode/xdp_pass:latest\n Pull Policy:   IfNotPresent\n Global:        None\n Metadata:      None\n Map Pin Path:  /run/bpfman/fs/maps/6219\n Map Owner ID:  None\n Map Used By:   6219\n Priority:      150\n Iface:         eno3\n Position:      1\n Proceed On:    pass, dispatcher_return\n\n Kernel State\n----------------------------------\n Program ID:                       6219\n Name:                             pass\n Type:                             xdp\n:\n
    "},{"location":"getting-started/building-bpfman/","title":"Setup and Building bpfman","text":"

    This section describes how to build bpfman. If this is the first time building bpfman, the Development Environment Setup section describes all packages needed to build bpfman.

    There is also an option to run prebuilt images from a given release or from an RPM, as opposed to building locally. Jump to:

    • Run bpfman From Release Image for installing from a prebuilt fixed release.
    • Run bpfman From RPM for installing from a prebuilt RPM.
    "},{"location":"getting-started/building-bpfman/#kernel-versions","title":"Kernel Versions","text":"

    eBPF is still a relatively new technology that is being actively developed. To take advantage of this constantly evolving technology, it is best to use the newest kernel version possible. If bpfman needs to be run on an older kernel, this section describes some of the kernel features bpfman relies on to work and which kernel the feature was first introduced.

    Major kernel features leveraged by bpfman:

    • Program Extensions: Program Extensions allows bpfman to load multiple XDP or TC eBPF programs on an interface, which is not natively supported in the kernel. A dispatcher program is loaded as the one program on a given interface, and the user's XDP or TC programs are loaded as extensions to the dispatcher program. Introduced in Kernel 5.6.
    • Pinning: Pinning allows the eBPF program to remain loaded when the loading process (bpfman) is stopped or restarted. Introduced in Kernel 4.11.
    • BPF Perf Link: Support BPF perf link for tracing programs (Tracepoint, Uprobe and Kprobe) which enables pinning for these program types. Introduced in Kernel 5.15.
    • Relaxed CAP_BPF Requirement: Prior to Kernel 5.19, all eBPF system calls required CAP_BPF. This required userspace programs that wanted to access eBPF maps to have the CAP_BPF Linux capability. With the kernel 5.19 change, CAP_BPF is only required for load and unload requests.
    • TCX: TCX support was added in Kernel 6.6 and is expected to be added to bpfman in an upcoming release. TCX has performance improvements over TC and adds support in the kernel for multiple TCX programs to run on a given TC hook point.

    bpfman tested on older kernel versions:

    • Fedora 34: Kernel 5.17.6-100.fc34.x86_64
      • XDP, TC, Tracepoint, Uprobe and Kprobe programs all loaded with bpfman running on localhost and running as systemd service.
    • Fedora 33: Kernel 5.14.18-100.fc33.x86_64
      • XDP and TC programs loaded with bpfman running on localhost and running as systemd service once SELinux was disabled (see https://github.com/fedora-selinux/selinux-policy/pull/806).
      • Tracepoint, Uprobe and Kprobe programs failed to load because they require the BPF Perf Link support.
    • Fedora 32: Kernel 5.11.22-100.fc32.x86_64
      • XDP and TC programs loaded with bpfman running on localhost once SELinux was disabled (see https://github.com/fedora-selinux/selinux-policy/pull/806).
      • bpfman fails to run as a systemd service because of some capabilities issues in the bpfman.service file.
      • Tracepoint, Uprobe and Kprobe programs failed to load because they require the BPF Perf Link support.
    • Fedora 31: Kernel 5.8.18-100.fc31.x86_64
      • bpfman was able to start on localhost, but XDP and TC programs wouldn't load because BPF_LINK_CREATE call was updated in newer kernels.
      • bpfman fails to run as a systemd service because of some capabilities issues in the bpfman.service file.
    "},{"location":"getting-started/building-bpfman/#development-environment-setup","title":"Development Environment Setup","text":"

    To build bpfman, the following packages must be installed.

    "},{"location":"getting-started/building-bpfman/#install-rust-toolchain","title":"Install Rust Toolchain","text":"

    For further detailed instructions, see Rust Stable & Rust Nightly.

    curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh\nsource \"$HOME/.cargo/env\"\nrustup toolchain install nightly -c rustfmt,clippy,rust-src\n
    "},{"location":"getting-started/building-bpfman/#install-llvm","title":"Install LLVM","text":"

    LLVM 11 or later must be installed. Linux package managers should provide a recent enough release.

    dnf based OS:

    sudo dnf install llvm-devel clang-devel elfutils-libelf-devel\n

    apt based OS:

    sudo apt install clang lldb lld libelf-dev gcc-multilib\n
    "},{"location":"getting-started/building-bpfman/#install-ssl-library","title":"Install SSL Library","text":"

    dnf based OS:

    sudo dnf install openssl-devel\n

    apt based OS:

    sudo apt install libssl-dev\n
    "},{"location":"getting-started/building-bpfman/#install-bpf-helper-header-files","title":"Install bpf Helper Header Files","text":"

    apt based OS:

    sudo apt install libbpf-dev\n
    "},{"location":"getting-started/building-bpfman/#install-protobuf-compiler","title":"Install Protobuf Compiler","text":"

    If any of the Protobuf files need to be updated, then the protobuf-compiler will need to be installed. See RPC Protobuf Generation for bpfman use of protobufs and see protoc for more detailed installation instructions.

    dnf based OS:

    sudo dnf install protobuf-compiler\n

    apt based OS:

    sudo apt install protobuf-compiler\n
    "},{"location":"getting-started/building-bpfman/#install-go-protobuf-compiler-extensions","title":"Install GO protobuf Compiler Extensions","text":"

    See Quick Start Guide for gRPC in Go for installation instructions.

    "},{"location":"getting-started/building-bpfman/#local-libbpf","title":"Local libbpf","text":"

    Checkout a local copy of libbpf.

    git clone https://github.com/libbpf/libbpf --branch v0.8.0\n
    "},{"location":"getting-started/building-bpfman/#install-perl","title":"Install perl","text":"

    Install perl:

    dnf based OS:

    sudo dnf install perl\n

    apt based OS:

    sudo apt install perl\n
    "},{"location":"getting-started/building-bpfman/#install-docker","title":"Install docker","text":"

    To build the bpfman-agent and bpfman-operator using the provided Makefile and the make build-images command, docker needs to be installed. There are several existing guides:

    • Fedora: https://developer.fedoraproject.org/tools/docker/docker-installation.html
    • Linux: https://docs.docker.com/engine/install/
    "},{"location":"getting-started/building-bpfman/#install-kind","title":"Install Kind","text":"

    Optionally, to test bpfman running in Kubernetes, the easiest method and the one documented throughout the bpfman documentation is to run a Kubernetes Kind cluster. See kind for documentation and installation instructions. kind also requires docker to be installed.

    Note

    By default, bpfman-operator deploys bpfman with CSI enabled. CSI requires Kubernetes v1.26 due to a PR (kubernetes/kubernetes#112597) that addresses a gRPC Protocol Error that was seen in the CSI client code and it doesn't appear to have been backported. kind v0.20.0 or later is recommended.

    If the following error is seen, it means there is an older version of Kubernetes running and it needs to be upgraded.

    kubectl get pods -A\nNAMESPACE   NAME                               READY   STATUS             RESTARTS      AGE\nbpfman      bpfman-daemon-2hnhx                2/3     CrashLoopBackOff   4 (38s ago)   2m20s\nbpfman      bpfman-operator-6b6cf97857-jbvv4   2/2     Running            0             2m22s\n:\n\nkubectl logs -n bpfman bpfman-daemon-2hnhx -c node-driver-registrar\n:\nE0202 15:33:12.342704       1 main.go:101] Received NotifyRegistrationStatus call: &RegistrationStatus{PluginRegistered:false,Error:RegisterPlugin error -- plugin registration failed with err: rpc error: code = Internal desc = stream terminated by RST_STREAM with error code: PROTOCOL_ERROR,}\nE0202 15:33:12.342723       1 main.go:103] Registration process failed with error: RegisterPlugin error -- plugin registration failed with err: rpc error: code = Internal desc = stream terminated by RST_STREAM with error code: PROTOCOL_ERROR, restarting registration container.\n
    "},{"location":"getting-started/building-bpfman/#install-bash-completion","title":"Install bash-completion","text":"

    bpfman uses the Rust crate clap for the CLI implementation. clap has an optional Rust crate clap_complete. For bash shell, it leverages bash-completion for CLI Command completion. So in order for CLI completion to work in a bash shell, bash-completion must be installed. This feature is optional.

    For the CLI completion to work after installation, /etc/profile.d/bash_completion.sh must be sourced in the running sessions. New login sessions should pick it up automatically.

    dnf based OS:

    sudo dnf install bash-completion\nsource /etc/profile.d/bash_completion.sh\n

    apt based OS:

    sudo apt install bash-completion\nsource /etc/profile.d/bash_completion.sh\n
    "},{"location":"getting-started/building-bpfman/#install-yaml-formatter","title":"Install Yaml Formatter","text":"

    As part of CI, the Yaml files are validated with a Yaml formatter. Optionally, to verify locally, install the YAML Language Support by Red Hat VsCode Extension, or to format in bulk, install prettier.

    To install prettier:

    npm install -g prettier\n

    Then to flag which files are violating the formatting guide, run:

    prettier -l \"*.yaml\"\n

    And to write changes in place, run:

     prettier -f \"*.yaml\"\n
    "},{"location":"getting-started/building-bpfman/#install-toml-formatter","title":"Install toml Formatter","text":"

    As part of CI, the toml files are validated with a toml formatter. Optionally, to verify locally, install taplo.

    cargo install taplo-cli\n

    And to verify locally:

    taplo fmt --check\n
    "},{"location":"getting-started/building-bpfman/#clone-the-bpfman-and-bpfman-operator-repositories","title":"Clone the bpfman and bpfman-operator Repositories","text":"

    You can build and run bpfman from anywhere. For simplicity throughout this documentation, all examples will reference bpfman/ and bpfman-operator/ to indicate which repository is being used. bpfman-operator only needs to be cloned if deploying in Kubernetes.

    mkdir -p $HOME/src/\ncd $HOME/src/\ngit clone https://github.com/bpfman/bpfman.git\ngit clone https://github.com/bpfman/bpfman-operator.git\n
    "},{"location":"getting-started/building-bpfman/#building-bpfman","title":"Building bpfman","text":"

    If you are building bpfman for the first time OR the eBPF code has changed:

    cd bpfman/\ncargo xtask build-ebpf --libbpf-dir /path/to/libbpf\n

    If protobuf files have changed (see RPC Protobuf Generation):

    cargo xtask build-proto\n

    To build bpfman:

    cargo build\n
    "},{"location":"getting-started/building-bpfman/#building-cli-tab-completion-files","title":"Building CLI TAB completion files","text":"

    Optionally, to build the CLI TAB completion files, run the following command:

    cd bpfman/\ncargo xtask build-completion\n

    Files are generated for different shells:

    ls .output/completions/\n_bpfman  bpfman.bash  bpfman.elv  bpfman.fish  _bpfman.ps1\n
    "},{"location":"getting-started/building-bpfman/#bash","title":"bash","text":"

    For bash, this generates a file that can be used by the linux bash-completion utility (see Install bash-completion for installation instructions).

    If the files are generated, they are installed automatically when using the install script (i.e. sudo ./scripts/setup.sh install - See Run as a systemd Service). To install the files manually, copy the file associated with a given shell to /usr/share/bash-completion/completions/. For example:

    sudo cp .output/completions/bpfman.bash /usr/share/bash-completion/completions/.\n\nbpfman g<TAB>\n
    "},{"location":"getting-started/building-bpfman/#other-shells","title":"Other shells","text":"

    Files are generated other shells (Elvish, Fish, PowerShell and zsh). For these shells, generated file must be manually installed.

    "},{"location":"getting-started/building-bpfman/#building-cli-manpages","title":"Building CLI Manpages","text":"

    Optionally, to build the CLI Manpage files, run the following command:

    cd bpfman/\ncargo xtask build-man-page\n

    If the files are generated, they are installed automatically when using the install script (i.e. sudo ./scripts/setup.sh install - See Run as a systemd Service). To install the files manually, copy the generated files to /usr/local/share/man/man1/. For example:

    sudo cp .output/manpage/bpfman*.1 /usr/local/share/man/man1/.\n

    Once installed, use man to view the pages.

    man bpfman list\n

    Note

    bpfman commands with subcommands (specifically bpfman load) have - in the manpage subcommand generation. So use man bpfman load-file, man bpfman load-image, man bpfman load-image-xdp, etc. to display the subcommand manpage files.

    "},{"location":"getting-started/cli-guide/","title":"CLI Guide","text":"

    bpfman offers several CLI commands to interact with the bpfman daemon. The CLI allows you to load, unload, get and list eBPF programs.

    "},{"location":"getting-started/cli-guide/#notes-for-this-guide","title":"Notes For This Guide","text":"

    As described in other sections, bpfman can be run as either a privileged process or a systemd service. If run as a privileged process, bpfman will most likely be run from your local development branch and will require sudo. Example:

    sudo ./target/debug/bpfman list\n

    If run as a systemd service, bpfman will most likely be installed in your $PATH, and will also require sudo. Example:

    sudo bpfman list\n

    The examples here use sudo bpfman in place of sudo ./target/debug/bpfman for readability, use as your system is deployed.

    eBPF object files used in the examples are taken from the examples and integration-test directories from the bpfman repository.

    "},{"location":"getting-started/cli-guide/#basic-syntax","title":"Basic Syntax","text":"

    Below are the commands supported by bpfman.

    An eBPF manager focusing on simplifying the deployment and administration of eBPF programs.\n\nUsage: bpfman <COMMAND>\n\nCommands:\n  load    Load an eBPF program on the system\n  unload  Unload an eBPF program using the Program Id\n  list    List all eBPF programs loaded via bpfman\n  get     Get an eBPF program using the Program Id\n  image   eBPF Bytecode Image related commands\n  help    Print this message or the help of the given subcommand(s)\n\nOptions:\n  -h, --help\n          Print help (see a summary with '-h')\n
    "},{"location":"getting-started/cli-guide/#bpfman-load","title":"bpfman load","text":"

    The bpfman load file and bpfman load image commands are used to load eBPF programs. The bpfman load file command is used to load a locally built eBPF program. The bpfman load image command is used to load an eBPF program packaged in a OCI container image from a given registry. Each program type (i.e. <COMMAND>) has it's own set of attributes specific to the program type, and those attributes MUST come after the program type is entered. There are a common set of attributes, and those MUST come before the program type is entered.

    sudo bpfman load file --help\nLoad an eBPF program from a local .o file\n\nUsage: bpfman load file [OPTIONS] --path <PATH> --name <NAME> <COMMAND>\n\nCommands:\n  xdp         Install an eBPF program on the XDP hook point for a given interface\n  tc          Install an eBPF program on the TC hook point for a given interface\n  tracepoint  Install an eBPF program on a Tracepoint\n  kprobe      Install a kprobe or kretprobe eBPF probe\n  uprobe      Install a uprobe or uretprobe eBPF probe\n  fentry      Install a fentry eBPF probe\n  fexit       Install a fexit eBPF probe\n  help        Print this message or the help of the given subcommand(s)\n\nOptions:\n  -p, --path <PATH>\n          Required: Location of local bytecode file\n          Example: --path /run/bpfman/examples/go-xdp-counter/bpf_x86_bpfel.o\n\n  -n, --name <NAME>\n          Required: The name of the function that is the entry point for the BPF program\n\n  -g, --global <GLOBAL>...\n          Optional: Global variables to be set when program is loaded.\n          Format: <NAME>=<Hex Value>\n\n          This is a very low level primitive. The caller is responsible for formatting\n          the byte string appropriately considering such things as size, endianness,\n          alignment and packing of data structures.\n\n  -m, --metadata <METADATA>\n          Optional: Specify Key/Value metadata to be attached to a program when it\n          is loaded by bpfman.\n          Format: <KEY>=<VALUE>\n\n          This can later be used to `list` a certain subset of programs which contain\n          the specified metadata.\n          Example: --metadata owner=acme\n\n      --map-owner-id <MAP_OWNER_ID>\n          Optional: Program Id of loaded eBPF program this eBPF program will share a map with.\n          Only used when multiple eBPF programs need to share a map.\n          Example: --map-owner-id 63178\n\n  -h, --help\n          Print help (see a summary with '-h')\n

    and

    sudo bpfman load image --help\nLoad an eBPF program packaged in a OCI container image from a given registry\n\nUsage: bpfman load image [OPTIONS] --image-url <IMAGE_URL> --name <NAME> <COMMAND>\n\nCommands:\n  xdp         Install an eBPF program on the XDP hook point for a given interface\n  tc          Install an eBPF program on the TC hook point for a given interface\n  tracepoint  Install an eBPF program on a Tracepoint\n  kprobe      Install a kprobe or kretprobe eBPF probe\n  uprobe      Install a uprobe or uretprobe eBPF probe\n  fentry      Install a fentry eBPF probe\n  fexit       Install a fexit eBPF probe\n  help        Print this message or the help of the given subcommand(s)\n\nOptions:\n  -i, --image-url <IMAGE_URL>\n          Required: Container Image URL.\n          Example: --image-url quay.io/bpfman-bytecode/xdp_pass:latest\n\n  -r, --registry-auth <REGISTRY_AUTH>\n          Optional: Registry auth for authenticating with the specified image registry.\n          This should be base64 encoded from the '<username>:<password>' string just like\n          it's stored in the docker/podman host config.\n          Example: --registry_auth \"YnjrcKw63PhDcQodiU9hYxQ2\"\n\n  -p, --pull-policy <PULL_POLICY>\n          Optional: Pull policy for remote images.\n\n          [possible values: Always, IfNotPresent, Never]\n\n          [default: IfNotPresent]\n\n  -n, --name <NAME>\n          Required: The name of the function that is the entry point for the eBPF program.\n\n  -g, --global <GLOBAL>...\n          Optional: Global variables to be set when program is loaded.\n          Format: <NAME>=<Hex Value>\n\n          This is a very low level primitive. The caller is responsible for formatting\n          the byte string appropriately considering such things as size, endianness,\n          alignment and packing of data structures.\n\n  -m, --metadata <METADATA>\n          Optional: Specify Key/Value metadata to be attached to a program when it\n          is loaded by bpfman.\n          Format: <KEY>=<VALUE>\n\n          This can later be used to list a certain subset of programs which contain\n          the specified metadata.\n          Example: --metadata owner=acme\n\n      --map-owner-id <MAP_OWNER_ID>\n          Optional: Program Id of loaded eBPF program this eBPF program will share a map with.\n          Only used when multiple eBPF programs need to share a map.\n          Example: --map-owner-id 63178\n\n  -h, --help\n          Print help (see a summary with '-h')\n

    When using either load command, --path, --image-url, --registry-auth, --pull-policy, --name, --global, --metadata and --map-owner-id must be entered before the <COMMAND> (xdp, tc, tracepoint, etc) is entered. Then each <COMMAND> has its own custom parameters (same for both bpfman load file and bpfman load image):

    sudo bpfman load file xdp --help\nInstall an eBPF program on the XDP hook point for a given interface\n\nUsage: bpfman load file --path <PATH> --name <NAME> xdp [OPTIONS] --iface <IFACE> --priority <PRIORITY>\n\nOptions:\n  -i, --iface <IFACE>\n          Required: Interface to load program on\n\n  -p, --priority <PRIORITY>\n          Required: Priority to run program in chain. Lower value runs first\n\n      --proceed-on <PROCEED_ON>...\n          Optional: Proceed to call other programs in chain on this exit code.\n          Multiple values supported by repeating the parameter.\n          Example: --proceed-on \"pass\" --proceed-on \"drop\"\n\n          [possible values: aborted, drop, pass, tx, redirect, dispatcher_return]\n\n          [default: pass, dispatcher_return]\n\n  -h, --help\n          Print help (see a summary with '-h')\n

    Example loading from local file (--path is the fully qualified path):

    cd bpfman/\nsudo bpfman load file --path tests/integration-test/bpf/.output/xdp_pass.bpf.o --name \"pass\" xdp --iface eno3 --priority 100\n

    Example from image in remote repository:

    sudo bpfman load image --image-url quay.io/bpfman-bytecode/xdp_pass:latest --name \"pass\" xdp --iface eno3 --priority 100\n

    The tc command is similar to xdp, but it also requires the direction option and the proceed-on values are different.

    sudo bpfman load file tc --help\nInstall an eBPF program on the TC hook point for a given interface\n\nUsage: bpfman load file --path <PATH> --name <NAME> tc [OPTIONS] --direction <DIRECTION> --iface <IFACE> --priority <PRIORITY>\n\nOptions:\n  -d, --direction <DIRECTION>\n          Required: Direction to apply program.\n\n          [possible values: ingress, egress]\n\n  -i, --iface <IFACE>\n          Required: Interface to load program on\n\n  -p, --priority <PRIORITY>\n          Required: Priority to run program in chain. Lower value runs first\n\n      --proceed-on <PROCEED_ON>...\n          Optional: Proceed to call other programs in chain on this exit code.\n          Multiple values supported by repeating the parameter.\n          Example: --proceed-on \"ok\" --proceed-on \"pipe\"\n\n          [possible values: unspec, ok, reclassify, shot, pipe, stolen, queued,\n                            repeat, redirect, trap, dispatcher_return]\n\n          [default: ok, pipe, dispatcher_return]\n\n  -h, --help\n          Print help (see a summary with '-h')\n

    The following is an example of the tc command using short option names:

    cd bpfman/\nsudo bpfman load file -p tests/integration-test/bpf/.output/tc_pass.bpf.o -n \"pass\" tc -d ingress -i mynet1 -p 40\n

    For the tc_pass.bpf.o program loaded with the command above, the name would be set as shown in the following snippet, taken from the function name, not SEC():

    SEC(\"classifier/pass\")\nint pass(struct __sk_buff *skb) {\n{\n    :\n}\n
    "},{"location":"getting-started/cli-guide/#additional-load-examples","title":"Additional Load Examples","text":"

    Below are some additional examples of bpfman load commands:

    "},{"location":"getting-started/cli-guide/#fentry","title":"Fentry","text":"
    sudo bpfman load image --image-url quay.io/bpfman-bytecode/fentry:latest --name \"test_fentry\" fentry -f do_unlinkat\n
    "},{"location":"getting-started/cli-guide/#fexit","title":"Fexit","text":"
    sudo bpfman load image --image-url quay.io/bpfman-bytecode/fexit:latest --name \"test_fexit\" fexit -f do_unlinkat\n
    "},{"location":"getting-started/cli-guide/#kprobe","title":"Kprobe","text":"
    sudo bpfman load image --image-url quay.io/bpfman-bytecode/kprobe:latest --name \"my_kprobe\" kprobe -f try_to_wake_up\n
    "},{"location":"getting-started/cli-guide/#kretprobe","title":"Kretprobe","text":"
    sudo bpfman load image --image-url quay.io/bpfman-bytecode/kretprobe:latest --name \"my_kretprobe\" kprobe -f try_to_wake_up -r\n
    "},{"location":"getting-started/cli-guide/#tc","title":"TC","text":"
    cd bpfman/\nsudo bpfman load file --path examples/go-tc-counter/bpf_x86_bpfel.o --name \"stats\" tc --direction ingress --iface eno3 --priority 110\n
    "},{"location":"getting-started/cli-guide/#uprobe","title":"Uprobe","text":"
    sudo bpfman load image --image-url quay.io/bpfman-bytecode/uprobe:latest --name \"my_uprobe\" uprobe -f \"malloc\" -t \"libc\"\n
    "},{"location":"getting-started/cli-guide/#uretprobe","title":"Uretprobe","text":"
    sudo bpfman load image --image-url quay.io/bpfman-bytecode/uretprobe:latest --name \"my_uretprobe\" uprobe -f \"malloc\" -t \"libc\" -r\n
    "},{"location":"getting-started/cli-guide/#xdp","title":"XDP","text":"
    cd bpfman/\nsudo bpfman load file --path bpfman/examples/go-xdp-counter/bpf_x86_bpfel.o --name \"xdp_stats\" xdp --iface eno3 --priority 35\n
    "},{"location":"getting-started/cli-guide/#setting-global-variables-in-ebpf-programs","title":"Setting Global Variables in eBPF Programs","text":"

    Global variables can be set for any eBPF program type when loading as follows:

    cd bpfman/\nsudo bpfman load file -p bpfman/tests/integration-test/bpf/.output/tc_pass.bpf.o -g GLOBAL_u8=01 GLOBAL_u32=0A0B0C0D -n \"pass\" tc -d ingress -i mynet1 -p 40\n

    Note that when setting global variables, the eBPF program being loaded must have global variables named with the strings given, and the size of the value provided must match the size of the given variable. For example, the above command can be used to update the following global variables in an eBPF program.

    volatile const __u32 GLOBAL_u8 = 0;\nvolatile const __u32 GLOBAL_u32 = 0;\n
    "},{"location":"getting-started/cli-guide/#modifying-the-proceed-on-behavior","title":"Modifying the Proceed-On Behavior","text":"

    The proceed-on setting applies to xdp and tc programs. For both of these program types, an ordered list of eBPF programs is maintained per attach point. The proceed-on setting determines whether processing will \"proceed\" to the next eBPF program in the list, or terminate processing and return, based on the program's return value. For example, the default proceed-on configuration for an xdp program can be modified as follows:

    cd bpfman/\nsudo bpfman load file -p tests/integration-test/bpf/.output/xdp_pass.bpf.o -n \"pass\" xdp -i mynet1 -p 30 --proceed-on drop pass dispatcher_return\n
    "},{"location":"getting-started/cli-guide/#sharing-maps-between-ebpf-programs","title":"Sharing Maps Between eBPF Programs","text":"

    Warning

    Currently for the map sharing feature to work the LIBBPF_PIN_BY_NAME flag MUST be set in the shared bpf map definitions. Please see this aya issue for future work that will change this requirement.

    To share maps between eBPF programs, first load the eBPF program that owns the maps. One eBPF program must own the maps.

    cd bpfman/\nsudo bpfman load file --path examples/go-xdp-counter/bpf_x86_bpfel.o -n \"xdp_stats\" xdp --iface eno3 --priority 100\n6371\n

    Next, load additional eBPF programs that will share the existing maps by passing the program id of the eBPF program that owns the maps using the --map-owner-id parameter:

    cd bpfman/\nsudo bpfman load file --path examples/go-xdp-counter/bpf_x86_bpfel.o -n \"xdp_stats\" --map-owner-id 6371 xdp --iface eno3 --priority 100\n6373\n

    Use the bpfman get <PROGRAM_ID> command to display the configuration:

    sudo bpfman list\n Program ID  Name       Type  Load Time\n 6371        xdp_stats  xdp   2023-07-18T16:50:46-0400\n 6373        xdp_stats  xdp   2023-07-18T16:51:06-0400\n
    sudo bpfman get 6371\n Bpfman State\n---------------\n Name:          xdp_stats\n Path:          /home/<$USER>/src/bpfman/examples/go-xdp-counter/bpf_x86_bpfel.o\n Global:        None\n Metadata:      None\n Map Pin Path:  /run/bpfman/fs/maps/6371\n Map Owner ID:  None\n Map Used By:   6371\n                6373\n Priority:      100\n Iface:         eno3\n Position:      1\n Proceed On:    pass, dispatcher_return\n:\n
    sudo bpfman get 6373\n Bpfman State\n---------------\n Name:          xdp_stats\n Path:          /home/<$USER>/src/bpfman/examples/go-xdp-counter/bpf_x86_bpfel.o\n Global:        None\n Metadata:      None\n Map Pin Path:  /run/bpfman/fs/maps/6371\n Map Owner ID:  6371\n Map Used By:   6371\n                6373\n Priority:      100\n Iface:         eno3\n Position:      0\n Proceed On:    pass, dispatcher_return\n:\n

    As the output shows, the first program (6371) owns the map, with Map Owner ID of None and the Map Pin Path (/run/bpfman/fs/maps/6371) that includes its own ID.

    The second program (6373) references the first program via the Map Owner ID set to 6371 and the Map Pin Path (/run/bpfman/fs/maps/6371) set to same directory as the first program, which includes the first program's ID. The output for both commands shows the map is being used by both programs via the Map Used By with values of 6371 and 6373.

    The eBPF programs can be unloaded any order, the Map Pin Path will not be deleted until all the programs referencing the maps are unloaded:

    sudo bpfman unload 6371\nsudo bpfman unload 6373\n
    "},{"location":"getting-started/cli-guide/#bpfman-list","title":"bpfman list","text":"

    The bpfman list command lists all the bpfman loaded eBPF programs:

    sudo bpfman list\n Program ID  Name              Type        Load Time\n 6201        pass              xdp         2023-07-17T17:17:53-0400\n 6202        sys_enter_openat  tracepoint  2023-07-17T17:19:09-0400\n 6204        stats             tc          2023-07-17T17:20:14-0400\n

    To see all eBPF programs loaded on the system, include the --all option.

    sudo bpfman list --all\n Program ID  Name              Type           Load Time\n 52          restrict_filesy   lsm            2023-05-03T12:53:34-0400\n 166         dump_bpf_map      tracing        2023-05-03T12:53:52-0400\n 167         dump_bpf_prog     tracing        2023-05-03T12:53:52-0400\n 455                           cgroup_device  2023-05-03T12:58:26-0400\n :\n 6194                          cgroup_device  2023-07-17T17:15:23-0400\n 6201        pass              xdp            2023-07-17T17:17:53-0400\n 6202        sys_enter_openat  tracepoint     2023-07-17T17:19:09-0400\n 6203        dispatcher        tc             2023-07-17T17:20:14-0400\n 6204        stats             tc             2023-07-17T17:20:14-0400\n 6207        xdp               xdp            2023-07-17T17:27:13-0400\n 6210        test_fentry       tracing        2023-07-17T17:28:34-0400\n 6212        test_fexit        tracing        2023-07-17T17:29:02-0400\n 6223        my_uprobe         probe          2023-07-17T17:31:45-0400\n 6225        my_kretprobe      probe          2023-07-17T17:32:27-0400\n 6928        my_kprobe         probe          2023-07-17T17:33:49-0400\n

    To filter on a given program type, include the --program-type parameter:

    sudo bpfman list --all --program-type tc\n Program ID  Name        Type  Load Time\n 6203        dispatcher  tc    2023-07-17T17:20:14-0400\n 6204        stats       tc    2023-07-17T17:20:14-0400\n

    Note: The list filters by the Kernel Program Type. kprobe, kretprobe, uprobe and uretprobe all map to the probe Kernel Program Type. fentry and fexit both map to the tracing Kernel Program Type.

    "},{"location":"getting-started/cli-guide/#bpfman-get","title":"bpfman get","text":"

    To retrieve detailed information for a loaded eBPF program, use the bpfman get <PROGRAM_ID> command. If the eBPF program was loaded via bpfman, then there will be a Bpfman State section with bpfman related attributes and a Kernel State section with kernel information. If the eBPF program was loaded outside of bpfman, then the Bpfman State section will be empty and Kernel State section will be populated.

    sudo bpfman get 6204\n Bpfman State\n---------------\n Name:          stats\n Image URL:     quay.io/bpfman-bytecode/go-tc-counter:latest\n Pull Policy:   IfNotPresent\n Global:        None\n Metadata:      None\n Map Pin Path:  /run/bpfman/fs/maps/6204\n Map Owner ID:  None\n Map Used By:   6204\n Priority:      100\n Iface:         eno3\n Position:      0\n Direction:     eg\n Proceed On:    pipe, dispatcher_return\n\n Kernel State\n----------------------------------\n Program ID:                       6204\n Name:                             stats\n Type:                             tc\n Loaded At:                        2023-07-17T17:20:14-0400\n Tag:                              ead94553702a3742\n GPL Compatible:                   true\n Map IDs:                          [2705]\n BTF ID:                           2821\n Size Translated (bytes):          176\n JITed:                            true\n Size JITed (bytes):               116\n Kernel Allocated Memory (bytes):  4096\n Verified Instruction Count:       24\n
    sudo bpfman get 6190\n Bpfman State\n---------------\nNONE\n\n Kernel State\n----------------------------------\nProgram ID:                        6190\nName:                              None\nType:                              cgroup_skb\nLoaded At:                         2023-07-17T17:15:23-0400\nTag:                               6deef7357e7b4530\nGPL Compatible:                    true\nMap IDs:                           []\nBTF ID:                            0\nSize Translated (bytes):           64\nJITed:                             true\nSize JITed (bytes):                55\nKernel Allocated Memory (bytes):   4096\nVerified Instruction Count:        8\n
    "},{"location":"getting-started/cli-guide/#bpfman-unload","title":"bpfman unload","text":"

    The bpfman unload command takes the program id from the load or list command as a parameter, and unloads the requested eBPF program:

    sudo bpfman unload 6204\n
    sudo bpfman list\n Program ID  Name              Type        Load Time\n 6201        pass              xdp         2023-07-17T17:17:53-0400\n 6202        sys_enter_openat  tracepoint  2023-07-17T17:19:09-0400\n
    "},{"location":"getting-started/cli-guide/#bpfman-image","title":"bpfman image","text":"

    The bpfman image commands contain a set of container image related commands.

    "},{"location":"getting-started/cli-guide/#bpfman-image-pull","title":"bpfman image pull","text":"

    The bpfman image pull command pulls a given bytecode image for future use by a load command.

    sudo bpfman image pull --help\nPull an eBPF bytecode image from a remote registry\n\nUsage: bpfman image pull [OPTIONS] --image-url <IMAGE_URL>\n\nOptions:\n  -i, --image-url <IMAGE_URL>\n          Required: Container Image URL.\n          Example: --image-url quay.io/bpfman-bytecode/xdp_pass:latest\n\n  -r, --registry-auth <REGISTRY_AUTH>\n          Optional: Registry auth for authenticating with the specified image registry.\n          This should be base64 encoded from the '<username>:<password>' string just like\n          it's stored in the docker/podman host config.\n          Example: --registry_auth \"YnjrcKw63PhDcQodiU9hYxQ2\"\n\n  -p, --pull-policy <PULL_POLICY>\n          Optional: Pull policy for remote images.\n\n          [possible values: Always, IfNotPresent, Never]\n\n          [default: IfNotPresent]\n\n  -h, --help\n          Print help (see a summary with '-h')\n

    Example usage:

    sudo bpfman image pull --image-url quay.io/bpfman-bytecode/xdp_pass:latest\nSuccessfully downloaded bytecode\n

    Then when loaded, the local image will be used:

    sudo bpfman load image --image-url quay.io/bpfman-bytecode/xdp_pass:latest --pull-policy IfNotPresent xdp --iface eno3 --priority 100\n Bpfman State                                           \n ---------------\n Name:          pass                                  \n Image URL:     quay.io/bpfman-bytecode/xdp_pass:latest \n Pull Policy:   IfNotPresent                          \n Global:        None                                  \n Metadata:      None                                  \n Map Pin Path:  /run/bpfman/fs/maps/406681              \n Map Owner ID:  None                                  \n Maps Used By:  None                                  \n Priority:      100                                   \n Iface:         eno3\n Position:      2                                     \n Proceed On:    pass, dispatcher_return               \n\n Kernel State                                               \n ----------------------------------\n Program ID:                       406681                   \n Name:                             pass                     \n Type:                             xdp                      \n Loaded At:                        1917-01-27T01:37:06-0500 \n Tag:                              4b9d1b2c140e87ce         \n GPL Compatible:                   true                     \n Map IDs:                          [736646]                 \n BTF ID:                           555560                   \n Size Translated (bytes):          96                       \n JITted:                           true                     \n Size JITted:                      67                       \n Kernel Allocated Memory (bytes):  4096                     \n Verified Instruction Count:       9                        \n
    "},{"location":"getting-started/cli-guide/#bpfman-image-build","title":"bpfman image build","text":"

    The bpfman image build command is a utility command that builds and pushes an eBPF program in a OCI container image leveraging either docker or podman. The eBPF program bytecode must already be generated. This command calls docker or podman with the proper parameters for building multi-architecture based images with the proper labels for a OCI container image.

    Since this command is leveraging docker and podman, a container file (--container-file or -f) is required, along with an image tag (--tag of -t). In addition, the bytecode to package must be included. The bytecode can take several forms, but at least one must be provided:

    • --bytecode or -b: Use this option for a single bytecode object file built for the host architecture. The value of this parameter is a single bytecode object file.
    • --cilium-ebpf-project or -c: Use this option for a cilium/ebpf based project. The value of this parameter is a directory that contains multiple object files for different architectures, where the object files follow the Cilium naming convention with the architecture in the name (i.e. bpf_x86_bpfel.o, bpf_arm64_bpfel.o, bpf_powerpc_bpfel.o, bpf_s390_bpfeb.o).
    • --bc-386-el .. --bc-s390x-eb: Use this option to add one or more architecture specific bytecode files.
    bpfman image build --help\nBuild an eBPF bytecode image from local bytecode objects and push to a registry.\n\nTo use, the --container-file and --tag must be included, as well as a pointer to\nat least one bytecode file that can be passed in several ways. Use either:\n\n* --bytecode: for a single bytecode built for the host architecture.\n\n* --cilium-ebpf-project: for a cilium/ebpf project directory which contains\n    multiple object files for different architectures.\n\n* --bc-386-el .. --bc-s390x-eb: to add one or more architecture specific bytecode files.\n\nExamples:\n   bpfman image build -f Containerfile.bytecode -t quay.io/<USER>/go-xdp-counter:test \\\n     -b ./examples/go-xdp-counter/bpf_x86_bpfel.o\n\nUsage: bpfman image build [OPTIONS] --tag <TAG> --container-file <CONTAINER_FILE> <--bytecode <BYTECODE>|--cilium-ebpf-project <CILIUM_EBPF_PROJECT>|--bc-386-el <BC_386_EL>|--bc-amd64-el <BC_AMD64_EL>|--bc-arm-el <BC_ARM_EL>|--bc-arm64-el <BC_ARM64_EL>|--bc-loong64-el <BC_LOONG64_EL>|--bc-mips-eb <BC_MIPS_EB>|--bc-mipsle-el <BC_MIPSLE_EL>|--bc-mips64-eb <BC_MIPS64_EB>|--bc-mips64le-el <BC_MIPS64LE_EL>|--bc-ppc64-eb <BC_PPC64_EB>|--bc-ppc64le-el <BC_PPC64LE_EL>|--bc-riscv64-el <BC_RISCV64_EL>|--bc-s390x-eb <BC_S390X_EB>>\n\nOptions:\n  -t, --tag <TAG>\n          Required: Name and optionally a tag in the name:tag format.\n          Example: --tag quay.io/bpfman-bytecode/xdp_pass:latest\n\n  -f, --container-file <CONTAINER_FILE>\n          Required: Dockerfile to use for building the image.\n          Example: --container_file Containerfile.bytecode\n\n  -r, --runtime <RUNTIME>\n          Optional: Container runtime to use, works with docker or podman, defaults to docker\n          Example: --runtime podman\n\n  -b, --bytecode <BYTECODE>\n          Optional: bytecode file to use for building the image assuming host architecture.\n          Example: -b ./examples/go-xdp-counter/bpf_x86_bpfel.o\n\n  -c, --cilium-ebpf-project <CILIUM_EBPF_PROJECT>\n          Optional: If specified pull multi-arch bytecode files from a cilium/ebpf formatted project\n          where the bytecode files all contain a standard bpf_<GOARCH>_<(el/eb)>.o tag.\n          Example: --cilium-ebpf-project ./examples/go-xdp-counter\n\n      --bc-386-el <BC_386_EL>\n          Optional: bytecode file to use for building the image assuming amd64 architecture.\n          Example: --bc-386-el ./examples/go-xdp-counter/bpf_386_bpfel.o\n\n      --bc-amd64-el <BC_AMD64_EL>\n          Optional: bytecode file to use for building the image assuming amd64 architecture.\n          Example: --bc-amd64-el ./examples/go-xdp-counter/bpf_x86_bpfel.o\n\n      --bc-arm-el <BC_ARM_EL>\n          Optional: bytecode file to use for building the image assuming arm architecture.\n          Example: --bc-arm-el ./examples/go-xdp-counter/bpf_arm_bpfel.o\n\n      --bc-arm64-el <BC_ARM64_EL>\n          Optional: bytecode file to use for building the image assuming arm64 architecture.\n          Example: --bc-arm64-el ./examples/go-xdp-counter/bpf_arm64_bpfel.o\n\n      --bc-loong64-el <BC_LOONG64_EL>\n          Optional: bytecode file to use for building the image assuming loong64 architecture.\n          Example: --bc-loong64-el ./examples/go-xdp-counter/bpf_loong64_bpfel.o\n\n      --bc-mips-eb <BC_MIPS_EB>\n          Optional: bytecode file to use for building the image assuming mips architecture.\n          Example: --bc-mips-eb ./examples/go-xdp-counter/bpf_mips_bpfeb.o\n\n      --bc-mipsle-el <BC_MIPSLE_EL>\n          Optional: bytecode file to use for building the image assuming mipsle architecture.\n          Example: --bc-mipsle-el ./examples/go-xdp-counter/bpf_mipsle_bpfel.o\n\n      --bc-mips64-eb <BC_MIPS64_EB>\n          Optional: bytecode file to use for building the image assuming mips64 architecture.\n          Example: --bc-mips64-eb ./examples/go-xdp-counter/bpf_mips64_bpfeb.o\n\n      --bc-mips64le-el <BC_MIPS64LE_EL>\n          Optional: bytecode file to use for building the image assuming mips64le architecture.\n          Example: --bc-mips64le-el ./examples/go-xdp-counter/bpf_mips64le_bpfel.o\n\n      --bc-ppc64-eb <BC_PPC64_EB>\n          Optional: bytecode file to use for building the image assuming ppc64 architecture.\n          Example: --bc-ppc64-eb ./examples/go-xdp-counter/bpf_ppc64_bpfeb.o\n\n      --bc-ppc64le-el <BC_PPC64LE_EL>\n          Optional: bytecode file to use for building the image assuming ppc64le architecture.\n          Example: --bc-ppc64le-el ./examples/go-xdp-counter/bpf_ppc64le_bpfel.o\n\n      --bc-riscv64-el <BC_RISCV64_EL>\n          Optional: bytecode file to use for building the image assuming riscv64 architecture.\n          Example: --bc-riscv64-el ./examples/go-xdp-counter/bpf_riscv64_bpfel.o\n\n      --bc-s390x-eb <BC_S390X_EB>\n          Optional: bytecode file to use for building the image assuming s390x architecture.\n          Example: --bc-s390x-eb ./examples/go-xdp-counter/bpf_s390x_bpfeb.o\n\n  -h, --help\n          Print help (see a summary with '-h')\n

    Below are some different examples of building images. Note that sudo is not required. This command also pushed the image to a registry, so user must already be logged into the registry.

    Example of single bytecode image:

    bpfman image build -f Containerfile.bytecode -t quay.io/$QUAY_USER/go-xdp-counter:test -b ./examples/go-xdp-counter/bpf_x86_bpfel.o\n

    Example of directory with Cilium generated bytecode objects:

    bpfman image build -f Containerfile.bytecode.multi.arch -t quay.io/$QUAY_USER/go-xdp-counter:test -c ./examples/go-xdp-counter/\n

    Note

    To build images for multiple architectures on a local system, docker (or podman) may need additional configuration settings to allow for caching of non-native images. See https://docs.docker.com/build/building/multi-platform/ for more details.

    "},{"location":"getting-started/cli-guide/#bpfman-image-generate-build-args","title":"bpfman image generate-build-args","text":"

    The bpfman image generate-build-args command is a utility command that generates the labels used to package eBPF program bytecode in a OCI container image. It is recommended to use the bpfman image build command to package the eBPF program in a OCI container image, but an alternative is to generate the labels then build the container image with docker or podman.

    The eBPF program bytecode must already be generated. The bytecode can take several forms, but at least one must be provided:

    • --bytecode or -b: Use this option for a single bytecode object file built for the host architecture. The value of this parameter is a single bytecode object file.
    • --cilium-ebpf-project or -c: Use this option for a cilium/ebpf based project. The value of this parameter is a directory that contains multiple object files for different architectures, where the object files follow the Cilium naming convention with the architecture in the name (i.e. bpf_x86_bpfel.o, bpf_arm64_bpfel.o, bpf_powerpc_bpfel.o, bpf_s390_bpfeb.o).
    • --bc-386-el .. --bc-s390x-eb: Use this option to add one or more architecture specific bytecode files.
    bpfman image generate-build-args --help\nGenerate the OCI image labels for a given bytecode file.\n\nTo use, the --container-file and --tag must be included, as well as a pointer to\nat least one bytecode file that can be passed in several ways. Use either:\n\n* --bytecode: for a single bytecode built for the host architecture.\n\n* --cilium-ebpf-project: for a cilium/ebpf project directory which contains\n    multiple object files for different architectures.\n\n* --bc-386-el .. --bc-s390x-eb: to add one or more architecture specific bytecode files.\n\nExamples:\n  bpfman image generate-build-args --bc-amd64-el ./examples/go-xdp-counter/bpf_x86_bpfel.o\n\nUsage: bpfman image generate-build-args <--bytecode <BYTECODE>|--cilium-ebpf-project <CILIUM_EBPF_PROJECT>|--bc-386-el <BC_386_EL>|--bc-amd64-el <BC_AMD64_EL>|--bc-arm-el <BC_ARM_EL>|--bc-arm64-el <BC_ARM64_EL>|--bc-loong64-el <BC_LOONG64_EL>|--bc-mips-eb <BC_MIPS_EB>|--bc-mipsle-el <BC_MIPSLE_EL>|--bc-mips64-eb <BC_MIPS64_EB>|--bc-mips64le-el <BC_MIPS64LE_EL>|--bc-ppc64-eb <BC_PPC64_EB>|--bc-ppc64le-el <BC_PPC64LE_EL>|--bc-riscv64-el <BC_RISCV64_EL>|--bc-s390x-eb <BC_S390X_EB>>\n\nOptions:\n  -b, --bytecode <BYTECODE>\n          Optional: bytecode file to use for building the image assuming host architecture.\n          Example: -b ./examples/go-xdp-counter/bpf_x86_bpfel.o\n\n  -c, --cilium-ebpf-project <CILIUM_EBPF_PROJECT>\n          Optional: If specified pull multi-arch bytecode files from a cilium/ebpf formatted project\n          where the bytecode files all contain a standard bpf_<GOARCH>_<(el/eb)>.o tag.\n          Example: --cilium-ebpf-project ./examples/go-xdp-counter\n\n      --bc-386-el <BC_386_EL>\n          Optional: bytecode file to use for building the image assuming amd64 architecture.\n          Example: --bc-386-el ./examples/go-xdp-counter/bpf_386_bpfel.o\n\n      --bc-amd64-el <BC_AMD64_EL>\n          Optional: bytecode file to use for building the image assuming amd64 architecture.\n          Example: --bc-amd64-el ./examples/go-xdp-counter/bpf_x86_bpfel.o\n\n      --bc-arm-el <BC_ARM_EL>\n          Optional: bytecode file to use for building the image assuming arm architecture.\n          Example: --bc-arm-el ./examples/go-xdp-counter/bpf_arm_bpfel.o\n\n      --bc-arm64-el <BC_ARM64_EL>\n          Optional: bytecode file to use for building the image assuming arm64 architecture.\n          Example: --bc-arm64-el ./examples/go-xdp-counter/bpf_arm64_bpfel.o\n\n      --bc-loong64-el <BC_LOONG64_EL>\n          Optional: bytecode file to use for building the image assuming loong64 architecture.\n          Example: --bc-loong64-el ./examples/go-xdp-counter/bpf_loong64_bpfel.o\n\n      --bc-mips-eb <BC_MIPS_EB>\n          Optional: bytecode file to use for building the image assuming mips architecture.\n          Example: --bc-mips-eb ./examples/go-xdp-counter/bpf_mips_bpfeb.o\n\n      --bc-mipsle-el <BC_MIPSLE_EL>\n          Optional: bytecode file to use for building the image assuming mipsle architecture.\n          Example: --bc-mipsle-el ./examples/go-xdp-counter/bpf_mipsle_bpfel.o\n\n      --bc-mips64-eb <BC_MIPS64_EB>\n          Optional: bytecode file to use for building the image assuming mips64 architecture.\n          Example: --bc-mips64-eb ./examples/go-xdp-counter/bpf_mips64_bpfeb.o\n\n      --bc-mips64le-el <BC_MIPS64LE_EL>\n          Optional: bytecode file to use for building the image assuming mips64le architecture.\n          Example: --bc-mips64le-el ./examples/go-xdp-counter/bpf_mips64le_bpfel.o\n\n      --bc-ppc64-eb <BC_PPC64_EB>\n          Optional: bytecode file to use for building the image assuming ppc64 architecture.\n          Example: --bc-ppc64-eb ./examples/go-xdp-counter/bpf_ppc64_bpfeb.o\n\n      --bc-ppc64le-el <BC_PPC64LE_EL>\n          Optional: bytecode file to use for building the image assuming ppc64le architecture.\n          Example: --bc-ppc64le-el ./examples/go-xdp-counter/bpf_ppc64le_bpfel.o\n\n      --bc-riscv64-el <BC_RISCV64_EL>\n          Optional: bytecode file to use for building the image assuming riscv64 architecture.\n          Example: --bc-riscv64-el ./examples/go-xdp-counter/bpf_riscv64_bpfel.o\n\n      --bc-s390x-eb <BC_S390X_EB>\n          Optional: bytecode file to use for building the image assuming s390x architecture.\n          Example: --bc-s390x-eb ./examples/go-xdp-counter/bpf_s390x_bpfeb.o\n\n  -h, --help\n          Print help (see a summary with '-h')\n

    Below are some different examples of generating build arguments. Note that sudo is not required.

    Example of single bytecode image:

    $ bpfman image generate-build-args -b ./examples/go-xdp-counter/bpf_x86_bpfel.o\nBYTECODE_FILE=./examples/go-xdp-counter/bpf_x86_bpfel.o\nPROGRAMS={\"xdp_stats\":\"xdp\"}\nMAPS={\"xdp_stats_map\":\"per_cpu_array\"}\n

    Example of directory with Cilium generated bytecode objects:

    $ bpfman image generate-build-args -c ./examples/go-xdp-counter/\nBC_AMD64_EL=./examples/go-xdp-counter/bpf_x86_bpfel.o\nBC_ARM_EL=./examples/go-xdp-counter/bpf_arm64_bpfel.o\nBC_PPC64LE_EL=./examples/go-xdp-counter/bpf_powerpc_bpfel.o\nBC_S390X_EB=./examples/go-xdp-counter/bpf_s390_bpfeb.o\nPROGRAMS={\"xdp_stats\":\"xdp\"}\nMAPS={\"xdp_stats_map\":\"per_cpu_array\"}\n

    Once the labels are generated, the eBPF program can be packaged in a OCI container image using docker or podman by passing the generated labels as build-arg parameters:

    docker build \\\n  --build-arg BYTECODE_FILE=./examples/go-xdp-counter/bpf_x86_bpfel.o \\\n  --build-arg PROGRAMS={\"xdp_stats\":\"xdp\"} \\\n  --build-arg MAPS={\"xdp_stats_map\":\"per_cpu_array\"} \\\n  -f Containerfile.bytecode . -t quay.io/$USER/go-xdp-counter-bytecode:test\n
    "},{"location":"getting-started/example-bpf-k8s/","title":"Deploying Example eBPF Programs On Kubernetes","text":"

    This section will describe launching eBPF enabled applications on a Kubernetes cluster. The approach is slightly different when running on a Kubernetes cluster.

    This section assumes there is already a Kubernetes cluster running and bpfman is running in the cluster. See Deploying the bpfman-operator for details on deploying bpfman on a Kubernetes cluster, but the quickest solution is to run a Kubernetes KIND Cluster:

    cd bpfman/bpfman-operator/\nmake run-on-kind\n
    "},{"location":"getting-started/example-bpf-k8s/#loading-ebpf-programs-on-kubernetes","title":"Loading eBPF Programs On Kubernetes","text":"

    Instead of using the userspace program or CLI to load the eBPF bytecode as done in previous sections, the bytecode will be loaded by creating a Kubernetes CRD object. There is a CRD object for each eBPF program type bpfman supports.

    • FentryProgram CRD: Fentry Sample yaml
    • FexitProgram CRD: Fexit Sample yaml
    • KprobeProgram CRD: Kprobe Examples yaml
    • TcProgram CRD: TcProgram Examples yaml
    • TracepointProgram CRD: Tracepoint Examples yaml
    • UprobeProgram CRD: Uprobe Examples yaml
    • XdpProgram CRD: XdpProgram Examples yaml

    Sample bytecode yaml with XdpProgram CRD:

    cat examples/config/base/go-xdp-counter/bytecode.yaml\napiVersion: bpfman.io/v1alpha1\nkind: XdpProgram\nmetadata:\n  labels:\n    app.kubernetes.io/name: xdpprogram\n  name: go-xdp-counter-example\nspec:\n  name: xdp_stats\n  # Select all nodes\n  nodeselector: {}\n  interfaceselector:\n    primarynodeinterface: true\n  priority: 55\n  bytecode:\n    image:\n      url: quay.io/bpfman-bytecode/go-xdp-counter:latest\n

    Note that all the sample yaml files are configured with the bytecode running on all nodes (nodeselector: {}). This can be configured to run on specific nodes, but the DaemonSet yaml for the userspace program, which is described below, should have an equivalent change.

    Assume the following command is run:

    kubectl apply -f examples/config/base/go-xdp-counter/bytecode.yaml\n  xdpprogram.bpfman.io/go-xdp-counter-example created\n

    The diagram below shows go-xdp-counter example, but the other examples operate in a similar fashion.

    Following the diagram for XDP example (Blue numbers):

    1. The user creates a XdpProgram object with the parameters associated with the eBPF bytecode, like interface, priority and BFP bytecode image. The name of the XdpProgram object in this example is go-xdp-counter-example. The XdpProgram is applied using kubectl, but in a more practical deployment, the XdpProgram would be applied by the application or a controller.
    2. bpfman-agent, running on each node, is watching for all changes to XdpProgram objects. When it sees a XdpProgram object created or modified, it makes sure a BpfProgram object for that node exists. The name of the BpfProgram object is the XdpProgram object name with the node name and interface or attach point appended. On a KIND Cluster, it would be similar to go-xdp-counter-example-bpfman-deployment-control-plane-eth0.
    3. bpfman-agent then determines if it should be running on the given node, loads or unloads as needed by making gRPC calls the bpfman-rpc, which calls into the bpfman Library. bpfman behaves the same as described in the running locally example.
    4. bpfman-agent finally updates the status of the BpfProgram object.
    5. bpfman-operator watches all BpfProgram objects, and updates the status of the XdpProgram object indicating if the eBPF program has been applied to all the desired nodes or not.

    To retrieve information on the XdpProgram objects:

    kubectl get xdpprograms\nNAME                     BPFFUNCTIONNAME   NODESELECTOR   STATUS\ngo-xdp-counter-example   xdp_stats         {}             ReconcileSuccess\n\n\nkubectl get xdpprograms go-xdp-counter-example -o yaml\napiVersion: bpfman.io/v1alpha1\nkind: XdpProgram\nmetadata:\n  annotations:\n    kubectl.kubernetes.io/last-applied-configuration: |\n      {\"apiVersion\":\"bpfman.io/v1alpha1\",\"kind\":\"XdpProgram\",\"metadata\":{\"annotations\":{},\"labels\":{\"app.kubernetes.io/name\":\"xdpprogram\"},\"name\":\"go-xdp-counter-example\"},\"spec\":{\"bpffunctionname\":\"xdp_stats\",\"bytecode\":{\"image\":{\"url\":\"quay.io/bpfman-bytecode/go-xdp-counter:latest\"}},\"interfaceselector\":{\"primarynodeinterface\":true},\"nodeselector\":{},\"priority\":55}}\n  creationTimestamp: \"2023-11-06T21:05:15Z\"\n  finalizers:\n  - bpfman.io.operator/finalizer\n  generation: 2\n  labels:\n    app.kubernetes.io/name: xdpprogram\n  name: go-xdp-counter-example\n  resourceVersion: \"3103\"\n  uid: edd45e2e-a40b-4668-ac76-c1f1eb63a23b\nspec:\n  bpffunctionname: xdp_stats\n  bytecode:\n    image:\n      imagepullpolicy: IfNotPresent\n      url: quay.io/bpfman-bytecode/go-xdp-counter:latest\n  interfaceselector:\n    primarynodeinterface: true\n  mapownerselector: {}\n  nodeselector: {}\n  priority: 55\n  proceedon:\n  - pass\n  - dispatcher_return\nstatus:\n  conditions:\n  - lastTransitionTime: \"2023-11-06T21:05:21Z\"\n    message: bpfProgramReconciliation Succeeded on all nodes\n    reason: ReconcileSuccess\n    status: \"True\"\n    type: ReconcileSuccess\n

    To retrieve information on the BpfProgram objects:

    kubectl get bpfprograms\nNAME                                                          TYPE      STATUS         AGE\n:\ngo-xdp-counter-example-bpfman-deployment-control-plane-eth0   xdp       bpfmanLoaded   11m\n\n\nkubectl get bpfprograms go-xdp-counter-example-bpfman-deployment-control-plane-eth0 -o yaml\napiVersion: bpfman.io/v1alpha1\nkind: BpfProgram\nmetadata:\n  annotations:\n    bpfman.io.xdpprogramcontroller/interface: eth0\n    bpfman.io/ProgramId: \"4801\"\n  creationTimestamp: \"2023-11-06T21:05:15Z\"\n  finalizers:\n  - bpfman.io.xdpprogramcontroller/finalizer\n  generation: 1\n  labels:\n    bpfman.io/ownedByProgram: go-xdp-counter-example\n    kubernetes.io/hostname: bpfman-deployment-control-plane\n  name: go-xdp-counter-example-bpfman-deployment-control-plane-eth0\n  ownerReferences:\n  - apiVersion: bpfman.io/v1alpha1\n    blockOwnerDeletion: true\n    controller: true\n    kind: XdpProgram\n    name: go-xdp-counter-example\n    uid: edd45e2e-a40b-4668-ac76-c1f1eb63a23b\n  resourceVersion: \"3102\"\n  uid: f7ffd156-168b-4dc8-be38-18c42626a631\nspec:\n  type: xdp\nstatus:\n  conditions:\n  - lastTransitionTime: \"2023-11-06T21:05:21Z\"\n    message: Successfully loaded bpfProgram\n    reason: bpfmanLoaded\n    status: \"True\"\n    type: Loaded\n
    "},{"location":"getting-started/example-bpf-k8s/#deploying-an-ebpf-enabled-application-on-kubernetes","title":"Deploying an eBPF enabled application On Kubernetes","text":"

    Here, a userspace container is deployed to consume the map data generated by the eBPF counter program. bpfman provides a Container Storage Interface (CSI) driver for exposing eBPF maps into a userspace container. To avoid having to mount a host directory that contains the map pinned file into the container and forcing the container to have permissions to access that host directory, the CSI driver mounts the map at a specified location in the container. All the examples use CSI, here is go-xdp-counter/deployment.yaml for reference:

    cd bpfman/examples/\ncat config/base/go-xdp-counter/deployment.yaml\n:\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: go-xdp-counter-ds\n  namespace: go-xdp-counter\n  labels:\n    k8s-app: go-xdp-counter\nspec:\n  :\n  template:\n    :\n    spec:\n       :\n      containers:\n      - name: go-xdp-counter\n        :\n        volumeMounts:\n        - name: go-xdp-counter-maps                        <==== 2) VolumeMount in container\n          mountPath: /run/xdp/maps                         <==== 2a) Mount path in the container\n          readOnly: true\n      volumes:\n      - name: go-xdp-counter-maps                          <==== 1) Volume describing the map\n        csi:\n          driver: csi.bpfman.io                             <==== 1a) bpfman CSI Driver\n          volumeAttributes:\n            csi.bpfman.io/program: go-xdp-counter-example   <==== 1b) eBPF Program owning the map\n            csi.bpfman.io/maps: xdp_stats_map               <==== 1c) Map to be exposed to the container\n
    "},{"location":"getting-started/example-bpf-k8s/#loading-a-userspace-container-image","title":"Loading A Userspace Container Image","text":"

    The userspace programs have been pre-built and can be found here:

    • quay.io/bpfman-userspace/go-kprobe-counter:latest
    • quay.io/bpfman-userspace/go-tc-counter:latest
    • quay.io/bpfman-userspace/go-tracepoint-counter:latest
    • quay.io/bpfman-userspace/go-uprobe-counter:latest
    • quay.io/bpfman-userspace/go-xdp-counter:latest

    The example yaml files below are loading from these image.

    • go-kprobe-counter/deployment.yaml
    • go-tc-counter/deployment.yaml
    • go-tracepoint-counter/deployment.yaml
    • go-uprobe-counter/deployment.yaml
    • go-xdp-counter/deployment.yaml

    The userspace program in a Kubernetes Deployment doesn't interacts directly with bpfman like it did in the local host deployment. Instead, the userspace program running on each node, if needed, reads the BpfProgram object from the KubeApiServer to gather additional information about the loaded eBPF program. To interact with the KubeApiServer, RBAC must be setup properly to access the BpfProgram object. The bpfman-operator defined the yaml for several ClusterRoles that can be used to access the different bpfman related CRD objects with different access rights. The example userspace containers will use the bpfprogram-viewer-role, which allows Read-Only access to the BpfProgram object. This ClusterRole is created automatically by the bpfman-operator.

    The remaining objects (NameSpace, ServiceAccount, ClusterRoleBinding and examples DaemonSet) can be created for each program type as follows:

    cd bpfman/\nkubectl create -f examples/config/base/go-xdp-counter/deployment.yaml\n

    This creates the go-xdp-counter userspace pod, but the other examples operate in a similar fashion.

    Following the diagram for the XDP example (Green numbers):

    1. The userspace program queries the KubeApiServer for a specific BpfProgram object.
    2. The userspace program verifies the BpfProgram has been loaded and uses the map to periodically read the counter values.

    To see if the userspace programs are working, view the logs:

    kubectl get pods -A\nNAMESPACE               NAME                              READY   STATUS    RESTARTS   AGE\nbpfman                  bpfman-daemon-jsgdh               3/3     Running   0          11m\nbpfman                  bpfman-operator-6c5c8887f7-qk28x  2/2     Running   0          12m\ngo-xdp-counter          go-xdp-counter-ds-2hs6g           1/1     Running   0          6m12s\n:\n\nkubectl logs -n go-xdp-counter go-xdp-counter-ds-2hs6g\n2023/11/06 20:27:16 2429 packets received\n2023/11/06 20:27:16 1328474 bytes received\n\n2023/11/06 20:27:19 2429 packets received\n2023/11/06 20:27:19 1328474 bytes received\n\n2023/11/06 20:27:22 2430 packets received\n2023/11/06 20:27:22 1328552 bytes received\n:\n

    To cleanup:

    kubectl delete -f examples/config/base/go-xdp-counter/deployment.yaml\nkubectl delete -f examples/config/base/go-xdp-counter/bytecode.yaml\n
    "},{"location":"getting-started/example-bpf-k8s/#automated-deployment","title":"Automated Deployment","text":"

    The steps above are automated in the Makefile in the examples directory. Run make deploy to load each of the example bytecode and userspace yaml files, then make undeploy to unload them.

    cd bpfman/examples/\nmake deploy\n  for target in deploy-tc deploy-tracepoint deploy-xdp deploy-xdp-ms deploy-kprobe deploy-target deploy-uprobe ; do \\\n      make $target  || true; \\\n  done\n  make[1]: Entering directory '/home/<$USER>/go/src/github.com/bpfman/bpfman/examples'\n  sed 's@URL_BC@quay.io/bpfman-bytecode/go-tc-counter:latest@' config/default/go-tc-counter/patch.yaml.env > config/default/go-tc-counter/patch.yaml\n  cd config/default/go-tc-counter && /home/<$USER>/go/src/github.com/bpfman/bpfman/examples/bin/kustomize edit set image quay.io/bpfman-userspace/go-tc-counter=quay.io/bpfman-userspace/go-tc-counter:latest\n  namespace/go-tc-counter created\n  serviceaccount/bpfman-app-go-tc-counter created\n  daemonset.apps/go-tc-counter-ds created\n  tcprogram.bpfman.io/go-tc-counter-example created\n  :\n  sed 's@URL_BC@quay.io/bpfman-bytecode/go-uprobe-counter:latest@' config/default/go-uprobe-counter/patch.yaml.env > config/default/go-uprobe-counter/patch.yaml\n  cd config/default/go-uprobe-counter && /home/<$USER>/go/src/github.com/bpfman/bpfman/examples/bin/kustomize edit set image quay.io/bpfman-userspace/go-uprobe-counter=quay.io/bpfman-userspace/go-uprobe-counter:latest\n  namespace/go-uprobe-counter created\n  serviceaccount/bpfman-app-go-uprobe-counter created\n  daemonset.apps/go-uprobe-counter-ds created\n  uprobeprogram.bpfman.io/go-uprobe-counter-example created\n  make[1]: Leaving directory '/home/<$USER>/go/src/github.com/bpfman/bpfman/examples'\n\n# Test Away ...\n\nkubectl get pods -A\nNAMESPACE               NAME                                                      READY   STATUS    RESTARTS   AGE\nbpfman                  bpfman-daemon-md2c5                                       3/3     Running   0          2d17h\nbpfman                  bpfman-operator-7f67bc7c57-95zf7                          2/2     Running   0          2d17h\ngo-kprobe-counter       go-kprobe-counter-ds-8dkls                                1/1     Running   0          2m14s\ngo-target               go-target-ds-nbdf5                                        1/1     Running   0          2m14s\ngo-tc-counter           go-tc-counter-ds-7mtcw                                    1/1     Running   0          2m19s\ngo-tracepoint-counter   go-tracepoint-counter-ds-bcbs7                            1/1     Running   0          2m18s\ngo-uprobe-counter       go-uprobe-counter-ds-j26hc                                1/1     Running   0          2m13s\ngo-xdp-counter          go-xdp-counter-ds-nls6s                                   1/1     Running   0          2m17s\n\nkubectl get bpfprograms\nNAME                                                                                                TYPE         STATUS         AGE\ngo-kprobe-counter-example-bpfman-deployment-control-plane-try-to-wake-up                            kprobe       bpfmanLoaded   2m41s\ngo-tc-counter-example-bpfman-deployment-control-plane-eth0                                          tc           bpfmanLoaded   2m46s\ngo-tracepoint-counter-example-bpfman-deployment-control-plane-syscalls-sys-enter-kill               tracepoint   bpfmanLoaded   2m35s\ngo-uprobe-counter-example-bpfman-deployment-control-plane--go-target-go-target-ds-nbdf5-go-target   uprobe       bpfmanLoaded   2m29s\ngo-xdp-counter-example-bpfman-deployment-control-plane-eth0                                         xdp          bpfmanLoaded   2m24s\ngo-xdp-counter-sharing-map-example-bpfman-deployment-control-plane-eth0                             xdp          bpfmanLoaded   2m21s\n\nmake undeploy\n  for target in undeploy-tc undeploy-tracepoint undeploy-xdp undeploy-xdp-ms undeploy-kprobe undeploy-uprobe undeploy-target ; do \\\n      make $target  || true; \\\n  done\n  make[1]: Entering directory '/home/<$USER>/go/src/github.com/bpfman/bpfman/examples'\n  sed 's@URL_BC@quay.io/bpfman-bytecode/go-tc-counter:latest@' config/default/go-tc-counter/patch.yaml.env > config/default/go-tc-counter/patch.yaml\n  cd config/default/go-tc-counter && /home/<$USER>/go/src/github.com/bpfman/bpfman/examples/bin/kustomize edit set image quay.io/bpfman-userspace/go-tc-counter=quay.io/bpfman-userspace/go-tc-counter:latest\n  namespace \"go-tc-counter\" deleted\n  serviceaccount \"bpfman-app-go-tc-counter\" deleted\n  daemonset.apps \"go-tc-counter-ds\" deleted\n  tcprogram.bpfman.io \"go-tc-counter-example\" deleted\n  :\n  kubectl delete -f config/base/go-target/deployment.yaml\n  namespace \"go-target\" deleted\n  serviceaccount \"bpfman-app-go-target\" deleted\n  daemonset.apps \"go-target-ds\" deleted\n  make[1]: Leaving directory '/home/<$USER>/go/src/github.com/bpfman/bpfman/examples'\n

    Individual examples can be loaded and unloaded as well, for example make deploy-xdp and make undeploy-xdp. To see the full set of available commands, run make help:

    make help\n\nUsage:\n  make <target>\n  make deploy TAG=v0.2.0\n  make deploy-xdp IMAGE_XDP_US=quay.io/user1/go-xdp-counter-userspace:test\n\nGeneral\n  help             Display this help.\n\nLocal Dependencies\n  kustomize        Download kustomize locally if necessary.\n\nDevelopment\n  fmt              Run go fmt against code.\n  verify           Verify all the autogenerated code\n\nBuild\n  build            Build all the userspace example code.\n  generate         Run `go generate` to build the bytecode for each of the examples.\n  build-us-images  Build all example userspace images\n  build-bc-images  Build bytecode example userspace images\n  push-us-images   Push all example userspace images\n  push-bc-images   Push all example bytecode images\n  load-us-images-kind  Build and load all example userspace images into kind\n\nDeployment Variables (not commands)\n  TAG              Used to set all images to a fixed tag. Example: make deploy TAG=v0.2.0\n  IMAGE_TC_BC      TC Bytecode image. Example: make deploy-tc IMAGE_TC_BC=quay.io/user1/go-tc-counter-bytecode:test\n  IMAGE_TC_US      TC Userspace image. Example: make deploy-tc IMAGE_TC_US=quay.io/user1/go-tc-counter-userspace:test\n  IMAGE_TP_BC      Tracepoint Bytecode image. Example: make deploy-tracepoint IMAGE_TP_BC=quay.io/user1/go-tracepoint-counter-bytecode:test\n  IMAGE_TP_US      Tracepoint Userspace image. Example: make deploy-tracepoint IMAGE_TP_US=quay.io/user1/go-tracepoint-counter-userspace:test\n  IMAGE_XDP_BC     XDP Bytecode image. Example: make deploy-xdp IMAGE_XDP_BC=quay.io/user1/go-xdp-counter-bytecode:test\n  IMAGE_XDP_US     XDP Userspace image. Example: make deploy-xdp IMAGE_XDP_US=quay.io/user1/go-xdp-counter-userspace:test\n  IMAGE_KP_BC      Kprobe Bytecode image. Example: make deploy-kprobe IMAGE_KP_BC=quay.io/user1/go-kprobe-counter-bytecode:test\n  IMAGE_KP_US      Kprobe Userspace image. Example: make deploy-kprobe IMAGE_KP_US=quay.io/user1/go-kprobe-counter-userspace:test\n  IMAGE_UP_BC      Uprobe Bytecode image. Example: make deploy-uprobe IMAGE_UP_BC=quay.io/user1/go-uprobe-counter-bytecode:test\n  IMAGE_UP_US      Uprobe Userspace image. Example: make deploy-uprobe IMAGE_UP_US=quay.io/user1/go-uprobe-counter-userspace:test\n  IMAGE_GT_US      Uprobe Userspace target. Example: make deploy-target IMAGE_GT_US=quay.io/user1/go-target-userspace:test\n  KIND_CLUSTER_NAME  Name of the deployed cluster to load example images to, defaults to `bpfman-deployment`\n  ignore-not-found  For any undeploy command, set to true to ignore resource not found errors during deletion. Example: make undeploy ignore-not-found=true\n\nDeployment\n  deploy-tc        Deploy go-tc-counter to the cluster specified in ~/.kube/config.\n  undeploy-tc      Undeploy go-tc-counter from the cluster specified in ~/.kube/config.\n  deploy-tracepoint  Deploy go-tracepoint-counter to the cluster specified in ~/.kube/config.\n  undeploy-tracepoint  Undeploy go-tracepoint-counter from the cluster specified in ~/.kube/config.\n  deploy-xdp       Deploy go-xdp-counter to the cluster specified in ~/.kube/config.\n  undeploy-xdp     Undeploy go-xdp-counter from the cluster specified in ~/.kube/config.\n  deploy-xdp-ms    Deploy go-xdp-counter-sharing-map (shares map with go-xdp-counter) to the cluster specified in ~/.kube/config.\n  undeploy-xdp-ms  Undeploy go-xdp-counter-sharing-map from the cluster specified in ~/.kube/config.\n  deploy-kprobe    Deploy go-kprobe-counter to the cluster specified in ~/.kube/config.\n  undeploy-kprobe  Undeploy go-kprobe-counter from the cluster specified in ~/.kube/config.\n  deploy-uprobe    Deploy go-uprobe-counter to the cluster specified in ~/.kube/config.\n  undeploy-uprobe  Undeploy go-uprobe-counter from the cluster specified in ~/.kube/config.\n  deploy-target    Deploy go-target to the cluster specified in ~/.kube/config.\n  undeploy-target  Undeploy go-target from the cluster specified in ~/.kube/config.\n  deploy           Deploy all examples to the cluster specified in ~/.kube/config.\n  undeploy         Undeploy all examples to the cluster specified in ~/.kube/config.\n
    "},{"location":"getting-started/example-bpf-k8s/#building-a-userspace-container-image","title":"Building A Userspace Container Image","text":"

    To build the userspace examples in a container instead of using the pre-built ones, from the bpfman examples code source directory, run the following build command:

    cd bpfman/examples\nmake \\\n  IMAGE_KP_US=quay.io/$USER/go-kprobe-counter:latest \\\n  IMAGE_TC_US=quay.io/$USER/go-tc-counter:latest \\\n  IMAGE_TP_US=quay.io/$USER/go-tracepoint-counter:latest \\\n  IMAGE_UP_US=quay.io/$USER/go-uprobe-counter:latest \\\n  IMAGE_XDP_US=quay.io/$USER/go-xdp-counter:latest \\\n  build-us-images\n

    Then EITHER push images to a remote repository:

    docker login quay.io\ncd bpfman/examples\nmake \\\n  IMAGE_KP_US=quay.io/$USER/go-kprobe-counter:latest \\\n  IMAGE_TC_US=quay.io/$USER/go-tc-counter:latest \\\n  IMAGE_TP_US=quay.io/$USER/go-tracepoint-counter:latest \\\n  IMAGE_UP_US=quay.io/$USER/go-uprobe-counter:latest \\\n  IMAGE_XDP_US=quay.io/$USER/go-xdp-counter:latest \\\n  push-us-images\n

    OR load the images directly to a specified kind cluster:

    cd bpfman/examples\nmake \\\n  IMAGE_KP_US=quay.io/$USER/go-kprobe-counter:latest \\\n  IMAGE_TC_US=quay.io/$USER/go-tc-counter:latest \\\n  IMAGE_TP_US=quay.io/$USER/go-tracepoint-counter:latest \\\n  IMAGE_UP_US=quay.io/$USER/go-uprobe-counter:latest \\\n  IMAGE_XDP_US=quay.io/$USER/go-xdp-counter:latest \\\n  KIND_CLUSTER_NAME=bpfman-deployment \\\n  load-us-images-kind\n

    Lastly, update the yaml to use the private images or override the yaml files using the Makefile:

    cd bpfman/examples/\n\nmake deploy-kprobe IMAGE_XDP_US=quay.io/$USER/go-kprobe-counter:latest\nmake undeploy-kprobe\n\nmake deploy-tc IMAGE_TC_US=quay.io/$USER/go-tc-counter:latest\nmake undeploy-tc\n\nmake deploy-tracepoint IMAGE_TP_US=quay.io/$USER/go-tracepoint-counter:latest\nmake undeploy-tracepoint\n\nmake deploy-uprobe IMAGE_XDP_US=quay.io/$USER/go-uprobe-counter:latest\nmake undeploy-uprobe\n\nmake deploy-xdp IMAGE_XDP_US=quay.io/$USER/go-xdp-counter:latest\nmake undeploy-xdp\n
    "},{"location":"getting-started/example-bpf-local/","title":"Deploying Example eBPF Programs On Local Host","text":"

    This section describes running bpfman and the example eBPF programs on a local host.

    "},{"location":"getting-started/example-bpf-local/#example-overview","title":"Example Overview","text":"

    Assume the following command is run:

    cd bpfman/examples/go-xdp-counter/\ngo run -exec sudo . -iface eno3\n

    The diagram below shows go-xdp-counter example, but the other examples operate in a similar fashion.

    Following the diagram (Purple numbers):

    1. When go-xdp-counter userspace is started, it will send a gRPC request over unix socket to bpfman-rpc requesting bpfman to load the go-xdp-counter eBPF bytecode located on disk at bpfman/examples/go-xdp-counter/bpf_x86_bpfel.o at a priority of 50 and on interface eno3. These values are configurable as we will see later, but for now we will use the defaults (except interface, which is required to be entered).
    2. bpfman will load it's dispatcher eBPF program, which links to the go-xdp-counter eBPF program and return a kernel Program ID referencing the running program.
    3. bpfman list can be used to show that the eBPF program was loaded.
    4. Once the go-xdp-counter eBPF bytecode is loaded, the eBPF program will write packet counts and byte counts to a shared map.
    5. go-xdp-counter userspace program periodically reads counters from the shared map and logs the value.

    Below are the steps to run the example program described above and then some additional examples that use the bpfman CLI to load and unload other eBPF programs. See Launching bpfman for more detailed instructions on building and loading bpfman. This tutorial assumes bpfman has been built, bpfman-rpc is running, and the bpfman CLI is in $PATH.

    "},{"location":"getting-started/example-bpf-local/#running-example-programs","title":"Running Example Programs","text":"

    Example eBPF Programs describes how the example programs work, how to build them, and how to run the different examples. Build the go-xdp-counter program before continuing.

    To run the go-xdp-counter program, determine the host interface to attach the eBPF program to and then start the go program. In this example, eno3 will be used, as shown in the diagram at the top of the page. The output should show the count and total bytes of packets as they pass through the interface as shown below:

    cd bpfman/examples/go-xdp-counter/\n\ngo run -exec sudo . --iface eno3\n2023/07/17 17:43:58 Using Input: Interface=eno3 Priority=50 Source=/home/$USER/src/bpfman/examples/go-xdp-counter/bpf_x86_bpfel.o\n2023/07/17 17:43:58 Program registered with id 6211\n2023/07/17 17:44:01 4 packets received\n2023/07/17 17:44:01 580 bytes received\n\n2023/07/17 17:44:04 4 packets received\n2023/07/17 17:44:04 580 bytes received\n\n2023/07/17 17:44:07 8 packets received\n2023/07/17 17:44:07 1160 bytes received\n\n:\n

    In another terminal, use the CLI to show the go-xdp-counter eBPF bytecode was loaded.

    sudo bpfman list\n Program ID  Name       Type  Load Time\n 6211        xdp_stats  xdp   2023-07-17T17:43:58-0400\n

    Finally, press <CTRL>+c when finished with go-xdp-counter.

    :\n\n2023/07/17 17:44:34 28 packets received\n2023/07/17 17:44:34 4060 bytes received\n\n^C2023/07/17 17:44:35 Exiting...\n2023/07/17 17:44:35 Unloading Program: 6211\n
    "},{"location":"getting-started/example-bpf-local/#using-cli-to-manage-ebpf-programs","title":"Using CLI to Manage eBPF Programs","text":"

    bpfman provides a CLI to interact with the bpfman Library. Find a deeper dive into CLI syntax in CLI Guide. We will load the simple xdp-pass program, which allows all traffic to pass through the attached interface, eno3 in this example. The source code, xdp_pass.bpf.c, is located in the integration-test directory and there is also a prebuilt image: quay.io/bpfman-bytecode/xdp_pass:latest.

    sudo bpfman load image --image-url quay.io/bpfman-bytecode/xdp_pass:latest xdp --iface eno3 --priority 100\n Bpfman State\n---------------\n Name:          pass\n Image URL:     quay.io/bpfman-bytecode/xdp_pass:latest\n Pull Policy:   IfNotPresent\n Global:        None\n Metadata:      None\n Map Pin Path:  /run/bpfman/fs/maps/6213\n Map Owner ID:  None\n Map Used By:   6213\n Priority:      100\n Iface:         eno3\n Position:      0\n Proceed On:    pass, dispatcher_return\n\n Kernel State\n----------------------------------\n Program ID:                       6213\n Name:                             pass\n Type:                             xdp\n Loaded At:                        2023-07-17T17:48:10-0400\n Tag:                              4b9d1b2c140e87ce\n GPL Compatible:                   true\n Map IDs:                          [2724]\n BTF ID:                           2834\n Size Translated (bytes):          96\n JITed:                            true\n Size JITed (bytes):               67\n Kernel Allocated Memory (bytes):  4096\n Verified Instruction Count:       9\n

    bpfman load image returns the same data as the bpfman get command. From the output, the Program Id of 6213 can be found in the Kernel State section. The Program Id can be used to perform a bpfman get to retrieve all relevant program data and a bpfman unload when the program needs to be unloaded.

    sudo bpfman list\n Program ID  Name  Type  Load Time\n 6213        pass  xdp   2023-07-17T17:48:10-0400\n

    We can recheck the details about the loaded program with the bpfman get command:

    sudo bpfman get 6213\n Bpfman State\n---------------\n Name:          pass\n Image URL:     quay.io/bpfman-bytecode/xdp_pass:latest\n Pull Policy:   IfNotPresent\n Global:        None\n Metadata:      None\n Map Pin Path:  /run/bpfman/fs/maps/6213\n Map Owner ID:  None\n Map Used By:   6213\n Priority:      100\n Iface:         eno3\n Position:      0\n Proceed On:    pass, dispatcher_return\n\n Kernel State\n----------------------------------\n Program ID:                       6213\n Name:                             pass\n Type:                             xdp\n Loaded At:                        2023-07-17T17:48:10-0400\n Tag:                              4b9d1b2c140e87ce\n GPL Compatible:                   true\n Map IDs:                          [2724]\n BTF ID:                           2834\n Size Translated (bytes):          96\n JITed:                            true\n Size JITed (bytes):               67\n Kernel Allocated Memory (bytes):  4096\n Verified Instruction Count:       9\n

    Then unload the program:

    sudo bpfman unload 6213\n
    "},{"location":"getting-started/example-bpf/","title":"Example eBPF Programs","text":"

    Example applications that use the bpfman-go bindings can be found in the bpfman/examples/ directory. Current examples include:

    • bpfman/examples/go-app-counter/
    • bpfman/examples/go-kprobe-counter/
    • bpfman/examples/go-target/
    • bpfman/examples/go-tc-counter/
    • bpfman/examples/go-tcx-counter/
    • bpfman/examples/go-tracepoint-counter/
    • bpfman/examples/go-uprobe-counter/
    • bpfman/examples/go-uretprobe-counter/
    • bpfman/examples/go-xdp-counter/
    "},{"location":"getting-started/example-bpf/#example-code-breakdown","title":"Example Code Breakdown","text":"

    These examples and the associated documentation are intended to provide the basics on how to deploy and manage an eBPF program using bpfman. Each of the examples contains an eBPF Program(s) written in C (app_counter.c, kprobe_counter.c, tc_counter.c, tcx_counter.c, tracepoint_counter.c, uprobe_counter.c, uretprobe_counter.c, and xdp_counter.c) that is compiled into eBPF bytecode for each supported architecture (bpf_arm64_bpfel.o, bpf_powerpc_bpfel.o, bpf_s390_bpfeb.o and bpf_x86_bpfel.o). Each time the eBPF program is called, it increments the packet and byte counts in a map that is accessible by the userspace portion.

    Each of the examples also have a userspace portion written in GO. The userspace code is leveraging the cilium/ebpf library to manage the maps shared with the eBPF program. The example eBPF programs are very similar in functionality, and only vary where in the Linux networking stack they are inserted. The userspace program then polls the eBPF map every 3 seconds and logs the current counts.

    The examples were written to either run locally on a host or run in a container in a Kubernetes deployment. The userspace code flow is slightly different depending on the deployment, so input parameters dictate the deployment method.

    "},{"location":"getting-started/example-bpf/#examples-in-local-deployment","title":"Examples in Local Deployment","text":"

    When run locally, the userspace program makes gRPC calls to bpfman-rpc requesting bpfman to load the eBPF program at the requested hook point (TC hook point, Tracepoint, XDP hook point, etc). Data sent in the RPC request is either defaulted or passed in via input parameters. To make the examples as simple as possible to run, all input data is defaulted (except the interface TC and XDP programs need to attach to) but can be overwritten if desired. All example programs have the following common parameters (kprobe does not have any command specific parameters):

    cd bpfman/examples/go-kprobe-counter/\n\n./go-kprobe-counter --help\nUsage of ./go-kprobe-counter:\n  -crd\n      Flag to indicate all attributes should be pulled from the BpfProgram CRD.\n      Used in Kubernetes deployments and is mutually exclusive with all other\n      parameters.\n  -file string\n      File path of bytecode source. \"file\" and \"image\"/\"id\" are mutually exclusive.\n      Example: -file /home/$USER/src/bpfman/examples/go-kprobe-counter/bpf_x86_bpfel.o\n  -id uint\n      Optional Program ID of bytecode that has already been loaded. \"id\" and\n      \"file\"/\"image\" are mutually exclusive.\n      Example: -id 28341\n  -image string\n      Image repository URL of bytecode source. \"image\" and \"file\"/\"id\" are\n      mutually exclusive.\n      Example: -image quay.io/bpfman-bytecode/go-kprobe-counter:latest\n  -map_owner_id int\n      Program Id of loaded eBPF program this eBPF program will share a map with.\n      Example: -map_owner_id 9785\n

    The location of the eBPF bytecode can be provided four different ways:

    • Defaulted: If nothing is passed in, the code scans the local directory for a bpf_x86_bpfel.o file. If found, that is used. If not, it errors out.
    • file: Fully qualified path of the bytecode object file.
    • image: Image repository URL of bytecode source.
    • id: Kernel program Id of a bytecode that has already been loaded. This program could have been loaded using bpftool, or bpfman.

    If two userspace programs need to share the same map, map_owner_id is the Program ID of the first loaded program that has the map the second program wants to share.

    The examples require sudo to run because they require access the Unix socket bpfman-rpc is listening on. Deploying Example eBPF Programs On Local Host steps through launching bpfman locally and running some of the examples.

    "},{"location":"getting-started/example-bpf/#examples-in-kubernetes-deployment","title":"Examples in Kubernetes Deployment","text":"

    When run in a Kubernetes deployment, all the input data is passed to Kubernetes through yaml files. To indicate to the userspace code that it is in a Kubernetes deployment and not to try to load the eBPF bytecode, the example is launched in the container with the crd flag. Example: ./go-kprobe-counter -crd

    For these examples, the bytecode is loaded via one yaml file which creates a *Program CRD Object (KprobeProgram, TcProgram, TracepointProgram, etc.) and the userspace pod is loaded via another yaml file. In a more realistic deployment, the userspace pod may have the logic to send the *Program CRD Object create request to the KubeAPI Server, but the two yaml files are load manually for simplicity in the example code. The examples directory contain yaml files to load each example, leveraging Kustomize to modify the yaml to load the latest images from Quay.io, to load custom images or released based images. It is recommended to use the commands built into the Makefile, which run kustomize, to apply and remove the yaml files to a Kubernetes cluster. Use make help to see all the make options. For example:

    cd bpfman/examples/\n\n# Deploy then undeploy all the examples\nmake deploy\nmake undeploy\n\nOR\n\n# Deploy then undeploy just the TC example\nmake deploy-tc\nmake undeploy-tc\n

    Deploying Example eBPF Programs On Kubernetes steps through deploying bpfman to multiple nodes in a Kubernetes cluster and loading the examples.

    "},{"location":"getting-started/example-bpf/#building-example-code","title":"Building Example Code","text":"

    All the examples can be built locally as well as packaged in a container for Kubernetes deployment.

    "},{"location":"getting-started/example-bpf/#building-locally","title":"Building Locally","text":"

    To build directly on a system, make sure all the prerequisites are met, then build.

    "},{"location":"getting-started/example-bpf/#prerequisites","title":"Prerequisites","text":"

    This assumes bpfman is already installed and running on the system. If not, see Setup and Building bpfman.

    1. All requirements defined by the cilium/ebpf package
    2. libbpf development package to get the required eBPF c headers

      Fedora: sudo dnf install libbpf-devel

      Ubuntu: sudo apt-get install libbpf-dev

    "},{"location":"getting-started/example-bpf/#build","title":"Build","text":"

    To build all the C based eBPF counter bytecode, run:

    cd bpfman/examples/\nmake generate\n

    To build all the Userspace GO Client examples, run:

    cd bpfman/examples/\nmake build\n

    To build only a single example:

    cd bpfman/examples/go-tracepoint-counter/\ngo generate\ngo build\n

    Other program types are the same.

    "},{"location":"getting-started/example-bpf/#building-ebpf-bytecode-container-image","title":"Building eBPF Bytecode Container Image","text":"

    eBPF Bytecode Image Specifications provides detailed instructions on building and shipping bytecode in a container image. Pre-built eBPF container images for the examples can be loaded from:

    • quay.io/bpfman-bytecode/go-app-counter:latest
    • quay.io/bpfman-bytecode/go-kprobe-counter:latest
    • quay.io/bpfman-bytecode/go-tc-counter:latest
    • quay.io/bpfman-bytecode/go-tcx-counter:latest
    • quay.io/bpfman-bytecode/go-tracepoint-counter:latest
    • quay.io/bpfman-bytecode/go-uprobe-counter:latest
    • quay.io/bpfman-bytecode/go-uretprobe-counter:latest
    • quay.io/bpfman-bytecode/go-xdp-counter:latest

    To build the example eBPF bytecode container images, first generate the bytecode (the generate commands require the Prerequisites described above in the Building Locally section).

    To generate the bytecode for all the examples:

    cd bpfman/examples/\nmake generate\n

    OR to generate the bytecode for a single example (XDP in this case):

    cd bpfman/examples/go-xdp-counter/\ngo generate\n

    The preferred method for building the container image is to use the bpfman image build command. See bpfman image build in the CLI Guide for more details.

    cd bpfman/examples/go-xdp-counter/\nbpfman image build -f ../../Containerfile.bytecode -t quay.io/$QUAY_USER/go-xdp-counter-bytecode:test -b bpf_x86_bpfel.o\n

    The examples Makefile has commands to build all the example images if needed. See Locally Build Example Container Images for more details.

    bpfman currently does not provide a method for pre-loading bytecode images (see issue #603), so push the bytecode image to an image repository.

    For example:

    docker login quay.io\ndocker push quay.io/$QUAY_USER/go-xdp-counter-bytecode:test\n
    "},{"location":"getting-started/example-bpf/#running-examples","title":"Running Examples","text":"

    Below are some examples of how to run the bpfman examples on a host where bpfman is already installed.

    cd bpfman/examples/go-xdp-counter/\nsudo ./go-xdp-counter -iface <INTERNET INTERFACE NAME>\n

    or (NOTE: TC programs also require a direction, ingress or egress)

    cd bpfman/examples/go-tc-counter/\nsudo ./go-tc-counter -direction ingress -iface <INTERNET INTERFACE NAME>\n

    or

    cd bpfman/examples/go-tracepoint-counter/\nsudo ./go-tracepoint-counter\n

    bpfman can load eBPF bytecode from a container image built following the spec described in eBPF Bytecode Image Specifications.

    To use the container image, pass the URL to the userspace program:

    sudo ./go-xdp-counter -iface eno3 -image quay.io/bpfman-bytecode/go-xdp-counter:latest\n2022/12/02 16:28:32 Using Input: Interface=eno3 Priority=50 Source=quay.io/bpfman-bytecode/go-xdp-counter:latest\n2022/12/02 16:28:34 Program registered with id 6223\n2022/12/02 16:28:37 4 packets received\n2022/12/02 16:28:37 580 bytes received\n\n2022/12/02 16:28:40 4 packets received\n2022/12/02 16:28:40 580 bytes received\n\n^C2022/12/02 16:28:42 Exiting...\n2022/12/02 16:28:42 Unloading Program: 6223\n

    Or to run with the privately built bytecode container image:

    sudo ./go-xdp-counter -iface eno3 -image quay.io/$QUAY_USER/go-xdp-counter-bytecode:test\n2022/12/02 16:38:44 Using Input: Interface=eno3 Priority=50 Source=quay.io/$QUAY_USER/go-xdp-counter-bytecode:test\n2022/12/02 16:38:45 Program registered with id 6225\n2022/12/02 16:38:48 4 packets received\n2022/12/02 16:38:48 580 bytes received\n\n2022/12/02 16:38:51 4 packets received\n2022/12/02 16:38:51 580 bytes received\n\n^C2022/12/02 16:38:51 Exiting...\n2022/12/02 16:38:51 Unloading Program: 6225\n
    "},{"location":"getting-started/launching-bpfman/","title":"Launching bpfman","text":"

    The most basic way to deploy bpfman is to run it directly on a host system. First bpfman needs to be built and then started.

    "},{"location":"getting-started/launching-bpfman/#build-bpfman","title":"Build bpfman","text":"

    Perform the following steps to build bpfman. If this is your first time using bpfman, follow the instructions in Setup and Building bpfman to setup the prerequisites for building. To avoid installing the dependencies and having to build bpfman, consider running bpfman from a packaged release (see Run bpfman From Release Image) or installing the bpfman RPM (see Run bpfman From RPM).

    cd bpfman/\ncargo build\n
    "},{"location":"getting-started/launching-bpfman/#install-and-start-bpfman","title":"Install and Start bpfman","text":"

    Run the following command to copy the bpfman CLI and bpfman-rpc binaries to /usr/sbin/ and copy bpfman.socket and bpfman.service files to /usr/lib/systemd/system/. This option will also enable and start the systemd services:

    cd bpfman/\nsudo ./scripts/setup.sh install\n

    bpfman CLI is now in $PATH and can be used to load, view and unload eBPF programs.

    sudo bpfman load image --image-url quay.io/bpfman-bytecode/xdp_pass:latest --name pass xdp --iface eno3 --priority 100\n\nsudo bpfman list\n Program ID  Name  Type  Load Time                \n 53885       pass  xdp   2024-08-26T17:41:36-0400 \n\nsudo bpfman unload 53885\n

    bpfman CLI is a Rust program that calls the bpfman library directly. To view logs while running bpfman CLI commands, prepend RUST_LOG=info to each command (see Logging for more details):

    sudo RUST_LOG=info bpfman list\n[INFO  bpfman::utils] Has CAP_BPF: true\n[INFO  bpfman::utils] Has CAP_SYS_ADMIN: true\n Program ID  Name  Type  Load Time \n

    The examples (see Deploying Example eBPF Programs On Local Host) are Go based programs, so they are building and sending RPC messages to the rust based binary bpfman-rpc, which in turn calls the bpfman library.

    cd bpfman/examples/go-xdp-counter/\ngo run -exec sudo . -iface eno3\n

    To view bpfman logs for RPC based applications, including all the provided examples, use journalctl:

    sudo journalctl -f -u bpfman.service -u bpfman.socket\n:\n  <RUN \"go run -exec sudo . -iface eno3\">\nAug 26 18:03:54 server-calvin bpfman-rpc[2401725]: Using a Unix socket from systemd\nAug 26 18:03:54 server-calvin bpfman-rpc[2401725]: Using inactivity timer of 15 seconds\nAug 26 18:03:54 server-calvin bpfman-rpc[2401725]: Listening on /run/bpfman-sock/bpfman.sock\nAug 26 18:03:54 server-calvin bpfman-rpc[2401725]: Has CAP_BPF: true\nAug 26 18:03:54 server-calvin bpfman-rpc[2401725]: Has CAP_SYS_ADMIN: true\nAug 26 18:03:54 server-calvin bpfman-rpc[2401725]: Starting Cosign Verifier, downloading data from Sigstore TUF repository\nAug 26 18:03:55 server-calvin bpfman-rpc[2401725]: Loading program bytecode from file: /home/$USER/src/bpfman/bpfman/examples/go-xdp-counter/bpf_x86_bpfel.o\nAug 26 18:03:57 server-calvin bpfman-rpc[2401725]: The bytecode image: quay.io/bpfman/xdp-dispatcher:latest is signed\nAug 26 18:03:57 server-calvin bpfman-rpc[2401725]: Added xdp program with name: xdp_stats and id: 53919\nAug 26 18:04:09 server-calvin bpfman-rpc[2401725]: Shutdown Unix Handler /run/bpfman-sock/bpfman.sock```\n
    "},{"location":"getting-started/launching-bpfman/#additional-notes","title":"Additional Notes","text":"

    To update the configuration settings associated with running bpfman as a service, edit the service configuration files:

    sudo vi /usr/lib/systemd/system/bpfman.socket\nsudo vi /usr/lib/systemd/system/bpfman.service\nsudo systemctl daemon-reload\n

    If bpfman CLI or bpfman-rpc is rebuilt, the following command can be run to install the update binaries without tearing down bpfman. The services are automatically restarted.

    sudo ./scripts/setup.sh reinstall\n

    To unwind all the changes, stop bpfman and remove all related files from the system, run the following script:

    sudo ./scripts/setup.sh uninstall\n
    "},{"location":"getting-started/launching-bpfman/#preferred-method-to-start-bpfman","title":"Preferred Method to Start bpfman","text":"

    In order to call into the bpfman Library, the calling process must be privileged. In order to load and unload eBPF, the kernel requires a set of powerful capabilities. Long lived privileged processes are more vulnerable to attack than short lived processes. When bpfman-rpc is run as a systemd service, it is leveraging socket activation. This means that it loads a bpfman.socket and bpfman.service file. The socket service is the long lived process, which doesn't have any special permissions. The service that runs bpfman-rpc is only started when there is a request on the socket, and then bpfman-rpc stops itself after an inactivity timeout.

    Note

    For security reasons, it is recommended to run bpfman-rpc as a systemd service when running on a local host. For local development, some may find it useful to run bpfman-rpc as a long lived process.

    When run as a systemd service, the set of linux capabilities are limited to only the required set. If permission errors are encountered, see Linux Capabilities for help debugging.

    "},{"location":"getting-started/overview/","title":"bpfman Overview","text":"

    Core bpfman is a library written in Rust and published as a Crate via crates.io. The bpfman library leverages the aya library to manage eBPF programs. Applications written in Rust can import the bpfman library and call the bpfman APIs directly. An example of a Rust based application leveraging the bpfman library is the bpfman CLI, which is a Rust based binary used to provision bpfman from a Linux command prompt (see CLI Guide).

    For applications written in other languages, bpfman provides bpfman-rpc, a Rust based bpfman RPC server binary. Non-Rust applications can send a RPC message to the server, which translate the RPC request into a bpfman library call. The long term solution is to leverage the Rust Foreign Function Interface (FFI) feature, which enables a different (foreign) programming language to call Rust functions, but that is not supported at the moment.

    "},{"location":"getting-started/overview/#local-host-deployment","title":"Local Host Deployment","text":"

    When deploying bpfman on a local server, the bpfman-rpc binary runs as a systemd service that uses socket activation to start bpfman-rpc only when there is a RPC message to process. More details are provided in Deploying Example eBPF Programs On Local Host.

    "},{"location":"getting-started/overview/#kubernetes-deployment","title":"Kubernetes Deployment","text":"

    When deploying bpfman in a Kubernetes deployment, bpfman-agent, bpfman-rpc, and the bpfman library are packaged in a container. When the container starts, bpfman-rpc is started as a long running process. bpfman-agent listens to the KubeAPI Server and send RPC requests to bpfman-rpc, which in turn calls the bpfman library to manage eBPF programs on a given node.

    More details provided in Deploying Example eBPF Programs On Kubernetes.

    "},{"location":"getting-started/running-release/","title":"Run bpfman From Release Image","text":"

    This section describes how to deploy bpfman from a given release. See Releases for the set of bpfman releases.

    Note

    Instructions for interacting with bpfman change from release to release, so reference release specific documentation. For example:

    https://bpfman.io/v0.5.4/getting-started/running-release/

    Jump to the Setup and Building bpfman section for help building from the latest code or building from a release branch.

    Start bpfman-rpc contains more details on the different modes to run bpfman in on the host. Use Run using an rpm for deploying a released version of bpfman from an rpm as a systemd service and then use Deploying Example eBPF Programs On Local Host for further information on how to test and interact with bpfman.

    Deploying the bpfman-operator contains more details on deploying bpfman in a Kubernetes deployment and Deploying Example eBPF Programs On Kubernetes contains more details on interacting with bpfman running in a Kubernetes deployment. Use Deploying Release Version of the bpfman-operator below for deploying released version of bpfman in Kubernetes and then use the links above for further information on how to test and interact with bpfman.

    "},{"location":"getting-started/running-release/#run-as-a-long-lived-process","title":"Run as a Long Lived Process","text":"
    export BPFMAN_REL=0.5.4\nmkdir -p $HOME/src/bpfman-${BPFMAN_REL}/; cd $HOME/src/bpfman-${BPFMAN_REL}/\nwget https://github.com/bpfman/bpfman/releases/download/v${BPFMAN_REL}/bpfman-linux-x86_64.tar.gz\ntar -xzvf bpfman-linux-x86_64.tar.gz; rm bpfman-linux-x86_64.tar.gz\n\n$ tree\n.\n\u251c\u2500\u2500 bpf-log-exporter\n\u251c\u2500\u2500 bpfman\n\u251c\u2500\u2500 bpfman-ns\n\u251c\u2500\u2500 bpfman-rpc\n\u2514\u2500\u2500 bpf-metrics-exporter\n

    To deploy bpfman-rpc:

    sudo RUST_LOG=info ./bpfman-rpc --timeout=0\n[INFO  bpfman::utils] Has CAP_BPF: true\n[INFO  bpfman::utils] Has CAP_SYS_ADMIN: true\n[INFO  bpfman_rpc::serve] Using no inactivity timer\n[INFO  bpfman_rpc::serve] Using default Unix socket\n[INFO  bpfman_rpc::serve] Listening on /run/bpfman-sock/bpfman.sock\n:\n

    To use the CLI:

    sudo ./bpfman list\n Program ID  Name  Type  Load Time\n

    Continue in Deploying Example eBPF Programs On Local Host if desired.

    "},{"location":"getting-started/running-release/#deploying-release-version-of-the-bpfman-operator","title":"Deploying Release Version of the bpfman-operator","text":"

    The quickest solution for running bpfman in a Kubernetes deployment is to run a Kubernetes KIND Cluster:

    kind create cluster --name=test-bpfman\n

    Next, deploy the bpfman CRDs:

    export BPFMAN_REL=0.5.4\nkubectl apply -f  https://github.com/bpfman/bpfman/releases/download/v${BPFMAN_REL}/bpfman-crds-install.yaml\n

    Next, deploy the bpfman-operator, which will also deploy the bpfman-daemon, which contains bpfman-rpc, bpfman Library and bpfman-agent:

    kubectl apply -f https://github.com/bpfman/bpfman/releases/download/v${BPFMAN_REL}/bpfman-operator-install-v${BPFMAN_REL}.yaml\n

    Finally, deploy an example eBPF program.

    kubectl apply -f https://github.com/bpfman/bpfman/releases/download/v${BPFMAN_REL}/go-xdp-counter-install-v${BPFMAN_REL}.yaml\n

    There are other example programs in the Releases page.

    Continue in Deploying the bpfman-operator or Deploying Example eBPF Programs On Kubernetes if desired. Keep in mind that prior to v0.4.0, bpfman was released as bpfd. So follow the release specific documentation.

    Use the following command to teardown the cluster:

    kind delete cluster -n test-bpfman\n
    "},{"location":"getting-started/running-rpm/","title":"Run bpfman From RPM","text":"

    This section describes how to deploy bpfman from an RPM. RPMs are generated each time a Pull Request is merged in github for Fedora 38, 39 and Rawhide (see Install Prebuilt RPM below). RPMs can also be built locally from a Fedora server (see Build RPM Locally below).

    "},{"location":"getting-started/running-rpm/#install-prebuilt-rpm","title":"Install Prebuilt RPM","text":"

    This section describes how to install an RPM built automatically by the Packit Service. The Packit Service builds RPMs for each Pull Request merged.

    "},{"location":"getting-started/running-rpm/#packit-service-prerequisites","title":"Packit Service Prerequisites","text":"

    To install an RPM generated by the Packit Service, the following packages need to be installed:

    dnf based OS:

    sudo dnf install -y dnf-plugins-core\n

    To install officially released versions:

    sudo dnf copr enable @ebpf-sig/bpfman\n

    To install nightly builds:

    sudo dnf copr enable @ebpf-sig/bpfman-next\n

    Note

    If both the bpfman and bpfman-next copr repos are enabled, dnf will automatically pull from bpfman-next. Either repo can be disabled. For example, to disable bpfman-next run:

    sudo dnf copr disable @ebpf-sig/bpfman-next\n
    "},{"location":"getting-started/running-rpm/#install-rpm-from-packit-service","title":"Install RPM From Packit Service","text":"

    To load a RPM from a specific commit (@ebpf-sig/bpfman-next needs to be enabled instead of @ebpf-sig/bpfman), find the commit from bpfman commits, and click on the green check showing a given Pull Request was verified. At the bottom of the list of checks are the RPM builds, click on the details, and follow the Packit Dashboard link to the Copr Build Results. Then install the given RPM:

    sudo dnf install -y bpfman-0.4.0~dev-1.20240117143006587102.main.191.gda44a71.fc38.x86_64\n

    bpfman is now installed but not running. To start bpfman:

    sudo systemctl daemon-reload\nsudo systemctl enable bpfman.socket\nsudo systemctl start bpfman.socket\n

    Verify bpfman is installed and running:

    $ sudo systemctl status bpfman.socket\n\u25cf bpfman.socket - bpfman API Socket\n     Loaded: loaded (/usr/lib/systemd/system/bpfman.socket; enabled; preset: disabled)\n     Active: active (listening) since Thu 2024-01-18 21:19:29 EST; 5s ago\n   Triggers: \u25cf bpfman.service\n     Listen: /run/bpfman-sock/bpfman.sock (Stream)\n     CGroup: /system.slice/bpfman.socket\n:\n\n$ sudo systemctl status bpfman.service\n\u25cb bpfman.service - Run bpfman as a service\n     Loaded: loaded (/usr/lib/systemd/system/bpfman.service; static)\n    Drop-In: /usr/lib/systemd/system/service.d\n             \u2514\u250010-timeout-abort.conf\n     Active: inactive (dead)\nTriggeredBy: \u25cf bpfman.socket\n:\n\n$ sudo bpfman list\n Program ID  Name  Type  Load Time\n
    "},{"location":"getting-started/running-rpm/#uninstall-given-rpm","title":"Uninstall Given RPM","text":"

    To determine the RPM that is currently loaded:

    $ sudo rpm -qa | grep bpfman\nbpfman-0.4.0~dev-1.20240117143006587102.main.191.gda44a71.fc39.x86_64\n

    To stop bpfman and uninstall the RPM:

    sudo systemctl stop bpfman.socket\nsudo systemctl disable bpfman.socket\n\nsudo dnf erase -y bpfman-0.4.0~dev-1.20240117143006587102.main.191.gda44a71.fc39.x86_64\n\nsudo systemctl daemon-reload\n
    "},{"location":"getting-started/running-rpm/#build-rpm-locally","title":"Build RPM Locally","text":"

    This section describes how to build and install an RPM locally.

    "},{"location":"getting-started/running-rpm/#local-build-prerequisites","title":"Local Build Prerequisites","text":"

    To build locally, the following packages need to be installed:

    dnf based OS:

    sudo dnf install packit\nsudo dnf install cargo-rpm-macros\n

    Note

    cargo-rpm-macros needs to be version 25 or higher. It appears this is only available on Fedora 37, 38, 39 and Rawhide at the moment.

    "},{"location":"getting-started/running-rpm/#build-locally","title":"Build Locally","text":"

    To build locally, run the following command:

    packit build locally\n

    This will generate several RPMs in a x86_64/ directory:

    $ ls x86_64/\nbpfman-0.4.1-1.20240521101705214906.main.19.b47994a3.fc39.x86_64.rpm\nbpfman-debuginfo-0.4.1-1.20240521101705214906.main.19.b47994a3.fc39.x86_64.rpm\nbpfman-debugsource-0.4.1-1.20240521101705214906.main.19.b47994a3.fc39.x86_64.rpm\n

    If local RPM builds were previously run on the system, the packit build locally command may fail with something similar to:

    packit build locally\n2024-05-21 10:00:03.904 base_git.py       INFO   Using user-defined script for ActionName.post_upstream_clone: [['bash', '-c', 'if [[ ! -d /var/tmp/cargo-vendor-filterer ]]; then git clone https://github.com/coreos/cargo-vendor-filterer.git /var/tmp/cargo-vendor-filterer; fi && cd /var/tmp/cargo-vendor-filterer && cargo build && cd - && cp /var/tmp/cargo-vendor-filterer/target/debug/cargo-vendor-filterer . && ./cargo-vendor-filterer --format tar.gz --prefix vendor bpfman-bpfman-vendor.tar.gz']]\n2024-05-21 10:00:03.956 logging.py        INFO   error: could not find `Cargo.toml` in `/var/tmp/cargo-vendor-filterer` or any parent directory\n2024-05-21 10:00:03.957 commands.py       ERROR  Command 'bash -c if [[ ! -d /var/tmp/cargo-vendor-filterer ]]; then git clone https://github.com/coreos/cargo-vendor-filterer.git /var/tmp/cargo-vendor-filterer; fi && cd /var/tmp/cargo-vendor-filterer && cargo build && cd - && cp /var/tmp/cargo-vendor-filterer/target/debug/cargo-vendor-filterer . && ./cargo-vendor-filterer --format tar.gz --prefix vendor bpfman-bpfman-vendor.tar.gz' failed.\n2024-05-21 10:00:03.957 utils.py          ERROR  Command 'bash -c if [[ ! -d /var/tmp/cargo-vendor-filterer ]]; then git clone https://github.com/coreos/cargo-vendor-filterer.git /var/tmp/cargo-vendor-filterer; fi && cd /var/tmp/cargo-vendor-filterer && cargo build && cd - && cp /var/tmp/cargo-vendor-filterer/target/debug/cargo-vendor-filterer . && ./cargo-vendor-filterer --format tar.gz --prefix vendor bpfman-bpfman-vendor.tar.gz' failed.\n

    To fix, run:

    sudo rm -rf /var/tmp/cargo-vendor-filterer/\n
    "},{"location":"getting-started/running-rpm/#install-local-build","title":"Install Local Build","text":"

    Install the RPM:

    sudo rpm -i x86_64/bpfman-0.4.1-1.20240521101705214906.main.19.b47994a3.fc39.x86_64.rpm\n

    bpfman is now installed but not running. To start bpfman:

    sudo systemctl daemon-reload\nsudo systemctl enable bpfman.socket\nsudo systemctl start bpfman.socket\n

    Verify bpfman is installed and running:

    $ sudo systemctl status bpfman.socket\n\u25cf bpfman.socket - bpfman API Socket\n     Loaded: loaded (/usr/lib/systemd/system/bpfman.socket; enabled; preset: disabled)\n     Active: active (listening) since Thu 2024-01-18 21:19:29 EST; 5s ago\n   Triggers: \u25cf bpfman.service\n     Listen: /run/bpfman-sock/bpfman.sock (Stream)\n     CGroup: /system.slice/bpfman.socket\n:\n\n$ sudo systemctl status bpfman.service\n\u25cb bpfman.service - Run bpfman as a service\n     Loaded: loaded (/usr/lib/systemd/system/bpfman.service; static)\n    Drop-In: /usr/lib/systemd/system/service.d\n             \u2514\u250010-timeout-abort.conf\n     Active: inactive (dead)\nTriggeredBy: \u25cf bpfman.socket\n:\n\n$ sudo bpfman list\n Program ID  Name  Type  Load Time\n
    "},{"location":"getting-started/running-rpm/#uninstall-local-build","title":"Uninstall Local Build","text":"

    To determine the RPM that is currently loaded:

    $ sudo rpm -qa | grep bpfman\nbpfman-0.4.1-1.20240521101705214906.main.19.b47994a3.fc39.x86_64\n

    To stop bpfman and uninstall the RPM:

    sudo systemctl stop bpfman.socket\nsudo systemctl disable bpfman.socket\n\nsudo rpm -e bpfman-0.4.1-1.20240521101705214906.main.19.b47994a3.fc39.x86_64\n\nsudo systemctl daemon-reload\n
    "},{"location":"getting-started/troubleshooting/","title":"Troubleshooting","text":"

    This section provides a list of common issues and solutions when working with bpfman.

    "},{"location":"getting-started/troubleshooting/#xdp","title":"XDP","text":""},{"location":"getting-started/troubleshooting/#xdp-program-fails-to-load","title":"XDP Program Fails to Load","text":"

    When attempting to load an XDP program and the program fails to load:

    $ sudo bpfman load image --image-url quay.io/bpfman-bytecode/xdp_pass:latest xdp --iface veth92cd99b --priority 100\nError: status: Aborted, message: \"An error occurred. dispatcher attach failed on interface veth92cd99b: `bpf_link_create` failed\", details: [], metadata: MetadataMap { headers: {\"content-type\": \"application/grpc\", \"date\": \"Tue, 28 Nov 2023 13:37:02 GMT\", \"content-length\": \"0\"} }\n

    The log may look something like this:

    Nov 28 08:36:58 ebpf03 bpfman[2081732]: The bytecode image: quay.io/bpfman-bytecode/xdp_pass:latest is signed\nNov 28 08:36:59 ebpf03 bpfman[2081732]: Loading program bytecode from container image: quay.io/bpfman-bytecode/xdp_pass:latest\nNov 28 08:37:01 ebpf03 bpfman[2081732]: The bytecode image: quay.io/bpfman/xdp-dispatcher:v2 is signed\nNov 28 08:37:02 ebpf03 bpfman[2081732]: BPFMAN load error: Error(\n                                            \"dispatcher attach failed on interface veth92cd99b: `bpf_link_create` failed\",\n                                        )\n

    The issue may be the there is already an external XDP program loaded on the given interface. bpfman allows multiple XDP programs on an interface by loading a dispatcher program which is the XDP program and additional programs are loaded as extensions to the dispatcher. Use bpftool to determine if any programs are already loaded on an interface:

    $ sudo bpftool net list dev veth92cd99b\nxdp:\nveth92cd99b(32) generic id 8733\n\ntc:\nveth92cd99b(32) clsact/ingress tc_dispatcher id 8922\n\nflow_dissector:\n
    "},{"location":"governance/CODE_OF_CONDUCT/","title":"Contributor Covenant Code of Conduct","text":""},{"location":"governance/CODE_OF_CONDUCT/#our-pledge","title":"Our Pledge","text":"

    We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, caste, color, religion, or sexual identity and orientation.

    We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community.

    "},{"location":"governance/CODE_OF_CONDUCT/#our-standards","title":"Our Standards","text":"

    Examples of behavior that contributes to a positive environment for our community include:

    • Demonstrating empathy and kindness toward other people
    • Being respectful of differing opinions, viewpoints, and experiences
    • Giving and gracefully accepting constructive feedback
    • Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience
    • Focusing on what is best not just for us as individuals, but for the overall community

    Examples of unacceptable behavior include:

    • The use of sexualized language or imagery, and sexual attention or advances of any kind
    • Trolling, insulting or derogatory comments, and personal or political attacks
    • Public or private harassment
    • Publishing others' private information, such as a physical or email address, without their explicit permission
    • Other conduct which could reasonably be considered inappropriate in a professional setting
    "},{"location":"governance/CODE_OF_CONDUCT/#enforcement-responsibilities","title":"Enforcement Responsibilities","text":"

    Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful.

    Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate.

    "},{"location":"governance/CODE_OF_CONDUCT/#scope","title":"Scope","text":"

    This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event.

    "},{"location":"governance/CODE_OF_CONDUCT/#enforcement","title":"Enforcement","text":"

    Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement directly. Maintainers are identified in the MAINTAINERS.md file and their contact information is on their GitHub profile page. All complaints will be reviewed and investigated promptly and fairly.

    All community leaders are obligated to respect the privacy and security of the reporter of any incident.

    "},{"location":"governance/CODE_OF_CONDUCT/#enforcement-guidelines","title":"Enforcement Guidelines","text":"

    Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct:

    "},{"location":"governance/CODE_OF_CONDUCT/#1-correction","title":"1. Correction","text":"

    Community Impact: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community.

    Consequence: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested.

    "},{"location":"governance/CODE_OF_CONDUCT/#2-warning","title":"2. Warning","text":"

    Community Impact: A violation through a single incident or series of actions.

    Consequence: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban.

    "},{"location":"governance/CODE_OF_CONDUCT/#3-temporary-ban","title":"3. Temporary Ban","text":"

    Community Impact: A serious violation of community standards, including sustained inappropriate behavior.

    Consequence: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban.

    "},{"location":"governance/CODE_OF_CONDUCT/#4-permanent-ban","title":"4. Permanent Ban","text":"

    Community Impact: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals.

    Consequence: A permanent ban from any sort of public interaction within the community.

    "},{"location":"governance/CODE_OF_CONDUCT/#attribution","title":"Attribution","text":"

    This Code of Conduct is adapted from the Contributor Covenant, version 2.1, available at https://www.contributor-covenant.org/version/2/1/code_of_conduct.html.

    Community Impact Guidelines were inspired by Mozilla's code of conduct enforcement ladder.

    For answers to common questions about this code of conduct, see the FAQ at https://www.contributor-covenant.org/faq. Translations are available at https://www.contributor-covenant.org/translations.

    "},{"location":"governance/CONTRIBUTING/","title":"Contributing Guide","text":"
    • Ways to Contribute
    • Find an Issue
    • Ask for Help
    • Pull Request Lifecycle
    • Development Environment Setup
    • Signoff Your Commits
    • Pull Request Checklist

    Welcome! We are glad that you want to contribute to our project! \ud83d\udc96

    As you get started, you are in the best position to give us feedback on areas of our project that we need help with including:

    • Problems found during setting up a new developer environment
    • Gaps in our Quickstart Guide or documentation
    • Bugs in our automation scripts

    If anything doesn't make sense, or doesn't work when you run it, please open a bug report and let us know!

    "},{"location":"governance/CONTRIBUTING/#ways-to-contribute","title":"Ways to Contribute","text":"

    We welcome many different types of contributions including:

    • New features
    • Builds, CI/CD
    • Bug fixes
    • Documentation
    • Issue Triage
    • Answering questions on Slack/Mailing List
    • Web design
    • Communications / Social Media / Blog Posts
    • Release management

    Not everything happens through a GitHub pull request. Please come to our meetings or contact us and let's discuss how we can work together.

    "},{"location":"governance/CONTRIBUTING/#come-to-meetings","title":"Come to Meetings","text":"

    Absolutely everyone is welcome to come to any of our meetings. You never need an invite to join us. In fact, we want you to join us, even if you don\u2019t have anything you feel like you want to contribute. Just being there is enough!

    You can find out more about our meetings here. You don\u2019t have to turn on your video. The first time you come, introducing yourself is more than enough. Over time, we hope that you feel comfortable voicing your opinions, giving feedback on others\u2019 ideas, and even sharing your own ideas, and experiences.

    "},{"location":"governance/CONTRIBUTING/#find-an-issue","title":"Find an Issue","text":"

    We have good first issues for new contributors and help wanted issues suitable for any contributor. good first issue has extra information to help you make your first contribution. help wanted are issues suitable for someone who isn't a core maintainer and is good to move onto after your first pull request.

    Sometimes there won\u2019t be any issues with these labels. That\u2019s ok! There is likely still something for you to work on. If you want to contribute but you don\u2019t know where to start or can't find a suitable issue, you can reach out to us on Slack and we will be happy to help.

    Once you see an issue that you'd like to work on, please post a comment saying that you want to work on it. Something like \"I want to work on this\" is fine.

    "},{"location":"governance/CONTRIBUTING/#ask-for-help","title":"Ask for Help","text":"

    The best way to reach us with a question when contributing is to ask on:

    • The original github issue
    • Our Slack channel
    "},{"location":"governance/CONTRIBUTING/#pull-request-lifecycle","title":"Pull Request Lifecycle","text":"

    Pull requests are managed by Mergify.

    Our process is currently as follows:

    1. When you open a PR a maintainer will automatically be assigned for review
    2. Make sure that your PR is passing CI - if you need help with failing checks please feel free to ask!
    3. Once it is passing all CI checks, a maintainer will review your PR and you may be asked to make changes.
    4. When you have received at least one approval from a maintainer, your PR will be merged automatically.

    In some cases, other changes may conflict with your PR. If this happens, you will get notified by a comment in the issue that your PR requires a rebase, and the needs-rebase label will be applied. Once a rebase has been performed, this label will be automatically removed.

    "},{"location":"governance/CONTRIBUTING/#development-environment-setup","title":"Development Environment Setup","text":"

    See Setup and Building bpfman

    "},{"location":"governance/CONTRIBUTING/#signoff-your-commits","title":"Signoff Your Commits","text":""},{"location":"governance/CONTRIBUTING/#dco","title":"DCO","text":"

    Licensing is important to open source projects. It provides some assurances that the software will continue to be available based under the terms that the author(s) desired. We require that contributors sign off on commits submitted to our project's repositories. The Developer Certificate of Origin (DCO) is a way to certify that you wrote and have the right to contribute the code you are submitting to the project.

    You sign-off by adding the following to your commit messages. Your sign-off must match the git user and email associated with the commit.

    This is my commit message\n\nSigned-off-by: Your Name <your.name@example.com>\n

    Git has a -s command line option to do this automatically:

    git commit -s -m 'This is my commit message'\n

    If you forgot to do this and have not yet pushed your changes to the remote repository, you can amend your commit with the sign-off by running

    git commit --amend -s\n
    "},{"location":"governance/CONTRIBUTING/#logical-grouping-of-commits","title":"Logical Grouping of Commits","text":"

    It is a recommended best practice to keep your changes as logically grouped as possible within individual commits. If while you're developing you prefer doing a number of commits that are \"checkpoints\" and don't represent a single logical change, please squash those together before asking for a review. When addressing review comments, please perform an interactive rebase and edit commits directly rather than adding new commits with messages like \"Fix review comments\".

    "},{"location":"governance/CONTRIBUTING/#commit-message-guidelines","title":"Commit message guidelines","text":"

    A good commit message should describe what changed and why.

    1. The first line should:

    2. contain a short description of the change (preferably 50 characters or less, and no more than 72 characters)

    3. be entirely in lowercase with the exception of proper nouns, acronyms, and the words that refer to code, like function/variable names
    4. be prefixed with the name of the sub crate being changed

    Examples:

    • bpfman: validate program section names
    • bpf: add dispatcher program test slot

    • Keep the second line blank.

    • Wrap all other lines at 72 columns (except for long URLs).
    • If your patch fixes an open issue, you can add a reference to it at the end of the log. Use the Fixes: # prefix and the issue number. For other references use Refs: #. Refs may include multiple issues, separated by a comma.

    Examples:

    • Fixes: #1337
    • Refs: #1234

    Sample complete commit message:

    subcrate: explain the commit in one line\n\nBody of commit message is a few lines of text, explaining things\nin more detail, possibly giving some background about the issue\nbeing fixed, etc.\n\nThe body of the commit message can be several paragraphs, and\nplease do proper word-wrap and keep columns shorter than about\n72 characters or so. That way, `git log` will show things\nnicely even when it is indented.\n\nFixes: #1337\nRefs: #453, #154\n
    "},{"location":"governance/CONTRIBUTING/#pull-request-checklist","title":"Pull Request Checklist","text":"

    When you submit your pull request, or you push new commits to it, our automated systems will run some checks on your new code. We require that your pull request passes these checks, but we also have more criteria than just that before we can accept and merge it. We recommend that you check the following things locally before you submit your code:

    • Verify that Rust code has been formatted and that all clippy lints have been fixed:
    • Verify that Go code has been formatted and linted
    • Verify that Yaml files have been formatted (see Install Yaml Formatter)
    • Verify that Bash scripts have been linted using shellcheck

      cd bpfman/\ncargo xtask lint\n
    • Verify that unit tests are passing locally (see Unit Testing):

      cd bpfman/\ncargo xtask unit-test\n
    • Verify any changes to the bpfman API have been \"blessed\". After running the below command, any changes to any of the files in bpfman/xtask/public-api/*.txt indicate changes to the bpfman API. Verify that these changes were intentional. CI uses the latest nightly Rust toolchain, so make sure the public-apis are verified against latest.

      cd bpfman/\nrustup update nightly\ncargo +nightly xtask public-api --bless\n
    • Verify that integration tests are passing locally (see Basic Integration Tests):

      cd bpfman/\ncargo xtask integration-test\n
    • If developing the bpfman-operator, verify that bpfman-operator unit and integration tests are passing locally:

      See Kubernetes Operator Tests.

    "},{"location":"governance/GOVERNANCE/","title":"bpfman Project Governance","text":"

    The bpfman project is dedicated to creating an easy way to run eBPF programs on a single host and in clusters. This governance explains how the project is run.

    • Values
    • Maintainers
    • Becoming a Maintainer
    • Meetings
    • Code of Conduct Enforcement
    • Security Response Team
    • Voting
    • Modifications
    "},{"location":"governance/GOVERNANCE/#values","title":"Values","text":"

    The bpfman project and its leadership embrace the following values:

    • Openness: Communication and decision-making happens in the open and is discoverable for future reference. As much as possible, all discussions and work take place in public forums and open repositories.

    • Fairness: All stakeholders have the opportunity to provide feedback and submit contributions, which will be considered on their merits.

    • Community over Product or Company: Sustaining and growing our community takes priority over shipping code or sponsors' organizational goals. Each contributor participates in the project as an individual.

    • Inclusivity: We innovate through different perspectives and skill sets, which can only be accomplished in a welcoming and respectful environment.

    • Participation: Responsibilities within the project are earned through participation, and there is a clear path up the contributor ladder into leadership positions.

    "},{"location":"governance/GOVERNANCE/#maintainers","title":"Maintainers","text":"

    bpfman Maintainers have write access to the project GitHub repository. They can merge their patches or patches from others. The list of current maintainers can be found at MAINTAINERS.md. Maintainers collectively manage the project's resources and contributors.

    This privilege is granted with some expectation of responsibility: maintainers are people who care about the bpfman project and want to help it grow and improve. A maintainer is not just someone who can make changes, but someone who has demonstrated their ability to collaborate with the team, get the most knowledgeable people to review code and docs, contribute high-quality code, and follow through to fix issues (in code or tests).

    A maintainer is a contributor to the project's success and a citizen helping the project succeed.

    The collective team of all Maintainers is known as the Maintainer Council, which is the governing body for the project.

    "},{"location":"governance/GOVERNANCE/#becoming-a-maintainer","title":"Becoming a Maintainer","text":"

    To become a Maintainer you need to demonstrate the following:

    • commitment to the project:
    • participate in discussions, contributions, code and documentation reviews, for 6 months or more,
    • perform reviews for 10 non-trivial pull requests,
    • contribute 10 non-trivial pull requests and have them merged,
    • ability to write quality code and/or documentation,
    • ability to collaborate with the team,
    • understanding of how the team works (policies, processes for testing and code review, etc),
    • understanding of the project's code base and coding and documentation style.

    A new Maintainer must be proposed by an existing maintainer by opening a Pull Request on GitHub to update the MAINTAINERS.md file. A simple majority vote of existing Maintainers approves the application. Maintainer nominations will be evaluated without prejudice to employers or demographics.

    Maintainers who are selected will be granted the necessary GitHub rights.

    "},{"location":"governance/GOVERNANCE/#removing-a-maintainer","title":"Removing a Maintainer","text":"

    Maintainers may resign at any time if they feel that they will not be able to continue fulfilling their project duties.

    Maintainers may also be removed after being inactive, failing to fulfill their Maintainer responsibilities, violating the Code of Conduct, or for other reasons. Inactivity is defined as a period of very low or no activity in the project for a year or more, with no definite schedule to return to full Maintainer activity.

    A Maintainer may be removed at any time by a 2/3 vote of the remaining maintainers.

    Depending on the reason for removal, a Maintainer may be converted to Emeritus status. Emeritus Maintainers will still be consulted on some project matters and can be rapidly returned to Maintainer status if their availability changes.

    "},{"location":"governance/GOVERNANCE/#meetings","title":"Meetings","text":"

    Time zones permitting, Maintainers are expected to participate in the public developer meeting, detailed in the meetings document.

    Maintainers will also have closed meetings to discuss security reports or Code of Conduct violations. Such meetings should be scheduled by any Maintainer on receipt of a security issue or CoC report. All current Maintainers must be invited to such closed meetings, except for any Maintainer who is accused of a CoC violation.

    "},{"location":"governance/GOVERNANCE/#code-of-conduct","title":"Code of Conduct","text":"

    Code of Conduct violations by community members will be discussed and resolved on the private maintainer Slack channel.

    "},{"location":"governance/GOVERNANCE/#security-response-team","title":"Security Response Team","text":"

    The Maintainers will appoint a Security Response Team to handle security reports. This committee may simply consist of the Maintainer Council themselves. If this responsibility is delegated, the Maintainers will appoint a team of at least two contributors to handle it. The Maintainers will review who is assigned to this at least once a year.

    The Security Response Team is responsible for handling all reports of security holes and breaches according to the security policy.

    "},{"location":"governance/GOVERNANCE/#voting","title":"Voting","text":"

    While most business in bpfman is conducted by \"lazy consensus\", periodically the Maintainers may need to vote on specific actions or changes. A vote can be taken on the private developer slack channel for security or conduct matters. Votes may also be taken at the developer meeting. Any Maintainer may demand a vote be taken.

    Most votes require a simple majority of all Maintainers to succeed, except where otherwise noted. Two-thirds majority votes mean at least two-thirds of all existing maintainers.

    "},{"location":"governance/GOVERNANCE/#modifying-this-charter","title":"Modifying this Charter","text":"

    Changes to this Governance and its supporting documents may be approved by a 2/3 vote of the Maintainers.

    "},{"location":"governance/MAINTAINERS/","title":"Maintainers","text":"

    See CONTRIBUTING.md for general contribution guidelines. See GOVERNANCE.md for governance guidelines and maintainer responsibilities. See CODEOWNERS for a detailed list of owners for the various source directories.

    Name Employer Responsibilities Dave Tucker Red Hat Catch all Andrew Stoycos Red Hat bpfman-operator, bpfman-agent Andre Fredette Red Hat All things tc-bpf Billy McFall Red Hat All things systemd"},{"location":"governance/MEETINGS/","title":"bpfman Community Meetings","text":""},{"location":"governance/MEETINGS/#meeting-time","title":"Meeting time","text":"

    We meet every Thursday at 10:00 AM Eastern Time. The meetings last up to 1 hour.

    "},{"location":"governance/MEETINGS/#meeting-location","title":"Meeting location","text":"

    Video call link: https://meet.google.com/ggz-zkmp-pxx Or dial: (US) +1 98ttp4-221-0859 PIN: 613 588 790# More phone numbers: https://tel.meet/ggz-zkmp-pxx?pin=3270510926446

    "},{"location":"governance/MEETINGS/#meeting-agenda-and-minutes","title":"Meeting agenda and minutes","text":"

    Meeting agenda

    "},{"location":"governance/REVIEWING/","title":"Reviewing Guide","text":"

    This document covers who may review pull requests for this project, and guides how to perform code reviews that meet our community standards and code of conduct. All reviewers must read this document and agree to follow the project review guidelines. Reviewers who do not follow these guidelines may have their privileges revoked.

    "},{"location":"governance/REVIEWING/#the-reviewer-role","title":"The Reviewer Role","text":"

    Only maintainers are REQUIRED to review pull requests. Other contributors may opt to review pull requests, but any LGTM from a non-maintainer won't count towards the required number of Approved Reviews in the Mergify policy.

    "},{"location":"governance/REVIEWING/#values","title":"Values","text":"

    All reviewers must abide by the Code of Conduct and are also protected by it. A reviewer should not tolerate poor behavior and is encouraged to report any behavior that violates the Code of Conduct. All of our values listed above are distilled from our Code of Conduct.

    Below are concrete examples of how it applies to code review specifically:

    "},{"location":"governance/REVIEWING/#inclusion","title":"Inclusion","text":"

    Be welcoming and inclusive. You should proactively ensure that the author is successful. While any particular pull request may not ultimately be merged, overall we want people to have a great experience and be willing to contribute again. Answer the questions they didn't know to ask or offer concrete help when they appear stuck.

    "},{"location":"governance/REVIEWING/#sustainability","title":"Sustainability","text":"

    Avoid burnout by enforcing healthy boundaries. Here are some examples of how a reviewer is encouraged to act to take care of themselves:

    • Authors should meet baseline expectations when submitting a pull request, such as writing tests.
    • If your availability changes, you can step down from a pull request and have someone else assigned.
    • If interactions with an author are not following the code of conduct, close the PR and raise it with your Code of Conduct committee or point of contact. It's not your job to coax people into behaving.
    "},{"location":"governance/REVIEWING/#trust","title":"Trust","text":"

    Be trustworthy. During a review, your actions both build and help maintain the trust that the community has placed in this project. Below are examples of ways that we build trust:

    • Transparency - If a pull request won't be merged, clearly say why and close it. If a pull request won't be reviewed for a while, let the author know so they can set expectations and understand why it's blocked.
    • Integrity - Put the project's best interests ahead of personal relationships or company affiliations when deciding if a change should be merged.
    • Stability - Only merge when the change won't negatively impact project stability. It can be tempting to merge a pull request that doesn't meet our quality standards, for example when the review has been delayed, or because we are trying to deliver new features quickly, but regressions can significantly hurt trust in our project.
    "},{"location":"governance/REVIEWING/#process","title":"Process","text":"
    • Reviewers are automatically assigned based on the CODEOWNERS file.
    • Reviewers should wait for automated checks to pass before reviewing
    • At least 1 approved review is required from a maintainer before a pull request can be merged
    • All CI checks must pass
    • If a PR is stuck for some reason it is down to the reviewer to determine the best course of action:
    • PRs may be closed if they are no longer relevant
    • A maintainer may choose to carry a PR forward on their own, but they should ALWAYS include the original author's commits
    • A maintainer may choose to open additional PRs to help lay a foundation on which the stuck PR can be unstuck. They may either rebase the stuck PR themselves or leave this to the author
    • Maintainers should not merge their pull requests without a review
    • Maintainers should let the Mergify bot merge PRs and not merge PRs directly
    • In times of need, i.e. to fix pressing security issues, the Maintainers may, at their discretion, merge PRs without review. They should at least add a comment to the PR explaining why they did so.
    "},{"location":"governance/REVIEWING/#checklist","title":"Checklist","text":"

    Below are a set of common questions that apply to all pull requests:

    • [ ] Is this PR targeting the correct branch?
    • [ ] Does the commit message provide an adequate description of the change?
    • [ ] Does the affected code have corresponding tests?
    • [ ] Are the changes documented, not just with inline documentation, but also with conceptual documentation such as an overview of a new feature, or task-based documentation like a tutorial? Consider if this change should be announced on your project blog.
    • [ ] Does this introduce breaking changes that would require an announcement or bumping of the major version?
    • [ ] Does this PR introduce any new dependencies?
    "},{"location":"governance/REVIEWING/#reading-list","title":"Reading List","text":"

    Reviewers are encouraged to read the following articles for help with common reviewer tasks:

    • The Art of Closing: How to close an unfinished or rejected pull request
    • Kindness and Code Reviews: Improving the Way We Give Feedback
    • Code Review Guidelines for Humans: Examples of good and back feedback
    "},{"location":"governance/SECURITY/","title":"Security Policy","text":""},{"location":"governance/SECURITY/#supported-versions","title":"Supported Versions","text":"

    No released versions of bpfman and bpfman-agent or bpfman-operator will receive regular security updates until a mainline release has been performed. A reported and fixed vulnerability will be included in the next minor release, which depending on the severity of the vulnerability may be immediate.

    "},{"location":"governance/SECURITY/#reporting-a-vulnerability","title":"Reporting a Vulnerability","text":"

    To report a vulnerability, please use the Private Vulnerability Reporting Feature on GitHub. We will endevour to respond within 48hrs of reporting. If a vulnerability is reported but considered low priority it may be converted into an issue and handled on the public issue tracker. Should a vulnerability be considered severe we will endeavour to patch it within 48hrs of acceptance, and may ask for you to collaborate with us on a temporary private fork of the repository.

    "},{"location":"blog/archive/2024/","title":"2024","text":""},{"location":"blog/archive/2023/","title":"2023","text":""},{"location":"blog/category/community-meeting/","title":"Community Meeting","text":""},{"location":"blog/category/2024/","title":"2024","text":""}]} \ No newline at end of file +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Introduction","text":"

    Formerly know as bpfd

    "},{"location":"#bpfman-an-ebpf-manager","title":"bpfman: An eBPF Manager","text":"

    bpfman operates as an eBPF manager, focusing on simplifying the deployment and administration of eBPF programs. Its notable features encompass:

    • System Overview: Provides insights into how eBPF is utilized in your system.
    • eBPF Program Loader: Includes a built-in program loader that supports program cooperation for XDP and TC programs, as well as deployment of eBPF programs from OCI images.
    • eBPF Filesystem Management: Manages the eBPF filesystem, facilitating the deployment of eBPF applications without requiring additional privileges.

    Our program loader and eBPF filesystem manager ensure the secure deployment of eBPF applications. Furthermore, bpfman includes a Kubernetes operator, extending these capabilities to Kubernetes. This allows users to confidently deploy eBPF through custom resource definitions across nodes in a cluster.

    "},{"location":"#quick-start","title":"Quick Start","text":"

    To get up and running with bpfman go straight to the quick start documentation.

    "},{"location":"#why-ebpf","title":"Why eBPF?","text":"

    eBPF is a powerful general-purpose framework that allows running sandboxed programs in the kernel. It can be used for many purposes, including networking, monitoring, tracing and security.

    "},{"location":"#why-ebpf-in-kubernetes","title":"Why eBPF in Kubernetes?","text":"

    Demand is increasing from both Kubernetes developers and users. Examples of eBPF in Kubernetes include:

    • Cilium and Calico CNIs
    • Pixie: Open source observability
    • KubeArmor: Container-aware runtime security enforcement system
    • Blixt: Gateway API L4 conformance implementation
    • NetObserv: Open source operator for network observability
    "},{"location":"#challenges-for-ebpf-in-kubernetes","title":"Challenges for eBPF in Kubernetes","text":"
    • Requires privileged pods:
      • eBPF-enabled apps require at least CAP_BPF permissions and potentially more depending on the type of program that is being attached.
      • Since the Linux capabilities are very broad it is challenging to constrain a pod to the minimum set of privileges required. This can allow them to do damage (either unintentionally or intentionally).
    • Handling multiple eBPF programs on the same eBPF hooks:
      • Not all eBPF hooks are designed to support multiple programs.
      • Some software using eBPF assumes exclusive use of an eBPF hook and can unintentionally eject existing programs when being attached. This can result in silent failures and non-deterministic failures.
    • Debugging problems with deployments is hard:
      • The cluster administrator may not be aware that eBPF programs are being used in a cluster.
      • It is possible for some eBPF programs to interfere with others in unpredictable ways.
      • SSH access or a privileged pod is necessary to determine the state of eBPF programs on each node in the cluster.
    • Lifecycle management of eBPF programs:
      • While there are libraries for the basic loading and unloading of eBPF programs, a lot of code is often needed around them for lifecycle management.
    • Deployment on Kubernetes is not simple:
      • It is an involved process that requires first writing a daemon that loads your eBPF bytecode and deploying it using a DaemonSet.
      • This requires careful design and intricate knowledge of the eBPF program lifecycle to ensure your program stays loaded and that you can easily tolerate pod restarts and upgrades.
      • In eBPF enabled K8s deployments today, the eBPF Program is often embedded into the userspace binary that loads and interacts with it. This means there's no easy way to have fine-grained versioning control of the bpfProgram in relation to it's accompanying userspace counterpart.
    "},{"location":"#what-is-bpfman","title":"What is bpfman?","text":"

    bpfman is a software stack that aims to make it easy to load, unload, modify and monitor eBPF programs whether on a single host, or in a Kubernetes cluster. bpfman includes the following core components:

    • bpfman: A system daemon that supports loading, unloading, modifying and monitoring of eBPF programs exposed over a gRPC API.
    • eBPF CRDS: bpfman provides a set of CRDs (XdpProgram, TcProgram, etc.) that provide a way to express intent to load eBPF programs as well as a bpfman generated CRD (BpfProgram) used to represent the runtime state of loaded programs.
    • bpfman-agent: The agent runs in a container in the bpfman daemonset and ensures that the requested eBPF programs for a given node are in the desired state.
    • bpfman-operator: An operator, built using Operator SDK, that manages the installation and lifecycle of bpfman-agent and the CRDs in a Kubernetes cluster.

    bpfman is developed in Rust and built on top of Aya, a Rust eBPF library.

    The benefits of this solution include the following:

    • Security:
      • Improved security because only the bpfman daemon, which can be tightly controlled, has the privileges needed to load eBPF programs, while access to the API can be controlled via standard RBAC methods. Within bpfman, only a single thread keeps these capabilities while the other threads (serving RPCs) do not.
      • Gives the administrators control over who can load programs.
      • Allows administrators to define rules for the ordering of networking eBPF programs. (ROADMAP)
    • Visibility/Debuggability:
      • Improved visibility into what eBPF programs are running on a system, which enhances the debuggability for developers, administrators, and customer support.
      • The greatest benefit is achieved when all apps use bpfman, but even if they don't, bpfman can provide visibility into all the eBPF programs loaded on the nodes in a cluster.
    • Multi-program Support:
      • Support for the coexistence of multiple eBPF programs from multiple users.
      • Uses the libxdp multiprog protocol to allow multiple XDP programs on single interface
      • This same protocol is also supported for TC programs to provide a common multi-program user experience across both TC and XDP.
    • Productivity:
      • Simplifies the deployment and lifecycle management of eBPF programs in a Kubernetes cluster.
      • Developers can stop worrying about program lifecycle (loading, attaching, pin management, etc.) and use existing eBPF libraries to interact with their program maps using well defined pin points which are managed by bpfman.
      • Developers can still use Cilium/libbpf/Aya/etc libraries for eBPF development, and load/unload with bpfman.
      • Provides eBPF Bytecode Image Specifications that allows fine-grained separate versioning control for userspace and kernelspace programs. This also allows for signing these container images to verify bytecode ownership.

    For more details, please see the following:

    • bpfman Overview for an overview of bpfman.
    • Quick Start for a quick installation of bpfman without having to download or build the code from source. Good for just getting familiar with bpfman and playing around with it.
    • Deploying Example eBPF Programs On Local Host for some examples of running bpfman on local host and using the CLI to install eBPF programs on the host.
    • Deploying Example eBPF Programs On Kubernetes for some examples of deploying eBPF programs through bpfman in a Kubernetes deployment.
    • Setup and Building bpfman for instructions on setting up your development environment and building bpfman.
    • Example eBPF Programs for some examples of eBPF programs written in Go, interacting with bpfman.
    • Deploying the bpfman-operator for details on launching bpfman in a Kubernetes cluster.
    • Meet the Community for details on community meeting details.

    We are a Cloud Native Computing Foundation sandbox project.

    "},{"location":"quick-start/","title":"Quick Start","text":"

    This section describes how to deploy bpfman quickly from pre-built release artifacts. Users can either deploy it locally via provided RPMs or in a kubernetes cluster via the provided container images and install yamls. See Releases for the complete set of bpfman releases.

    "},{"location":"quick-start/#deploy-released-rpm-from-copr-locally","title":"Deploy Released RPM from COPR Locally","text":"

    This section describes how to install an RPM built automatically by the Packit Service. The Packit Service builds RPMs for each release.

    To install an RPM generated by the Packit Service, the following packages need to be installed:

    dnf based OS:

    sudo dnf install -y dnf-plugins-core\n

    Additionally the bpfman copr repo needs to be enabled:

    sudo dnf copr enable @ebpf-sig/bpfman\n

    To see information about the latest released version of bpfman simply run

    sudo dnf info bpfman\n\nLast metadata expiration check: 0:03:10 ago on Mon 06 May 2024 10:37:37 AM EDT.\nAvailable Packages\nName         : bpfman\nVersion      : 0.4.2\nRelease      : 1.fc39\nArchitecture : src\nSize         : 41 M\nSource       : None\nRepository   : copr:copr.fedorainfracloud.org:group_ebpf-sig:bpfman\nSummary      : An eBPF program manager\nURL          : https://bpfman.io\nLicense      : Apache-2.0\nDescription  : An eBPF Program Manager.\n\nName         : bpfman\nVersion      : 0.4.2\nRelease      : 1.fc39\nArchitecture : x86_64\nSize         : 9.7 M\nSource       : bpfman-0.4.2-1.fc39.src.rpm\nRepository   : copr:copr.fedorainfracloud.org:group_ebpf-sig:bpfman\nSummary      : An eBPF program manager\nURL          : https://bpfman.io\nLicense      : Apache-2.0 AND Unicode-DFS-2016 AND BSD-3-Clause AND ISC AND MIT AND MPL-2.0\nDescription  : An eBPF Program Manager.\n

    Next install, either the latest version with:

    sudo dnf install bpfman \n

    Or install an older version with:

    sudo dnf install bpfman-<RELEASE_VERSION> \n

    bpfman is now installed but not running. To start the bpfman-rpc server process:

    sudo systemctl daemon-reload\nsudo systemctl enable bpfman.socket\nsudo systemctl start bpfman.socket\n

    Finally you can run one of the sample applications:

    sudo bpfman load image --image-url quay.io/bpfd-bytecode/tracepoint:latest tracepoint --tracepoint sched/sched_switch\n\nsudo bpfman list\n Program ID  Name          Type        Load Time                \n 1552        enter_openat  tracepoint  2024-05-06T10:50:57-0400 \n\nsudo bpfman unload 1552\n

    When ready to uninstall, determine the RPM that is currently loaded:

    $ sudo rpm -qa | grep bpfman\nbpfman-0.4.2-1.fc39.x86_64\n

    To stop bpfman and uninstall the RPM:

    sudo systemctl stop bpfman.socket\nsudo systemctl disable bpfman.socket\n\nsudo dnf erase -y bpfman-0.4.2-1.fc39.x86_64\n\nsudo systemctl daemon-reload\n
    "},{"location":"quick-start/#deploy-released-container-images-on-kubernetes","title":"Deploy Released Container Images on Kubernetes","text":"

    The quickest solution for running bpfman in a Kubernetes deployment is to run a local Kubernetes KIND Cluster:

    Note

    OpenShift has tighter security requirements and requires additional settings. When deploying bpfman on OpenShift, use the OperatorHub from the OpenShift console, search for ebpf, and install either the Bpfman Operator by Community or the eBPF Manager Operator by Red Hat. The Bpfman Operator by Community tracks the upstream releases of bpfman. The eBPF Manager Operator by Red Hat is based on bpfman at the time of the corresponding OpenShift release.

    kind create cluster --name=test-bpfman\n

    Next, deploy the bpfman CRDs:

    export BPFMAN_REL=0.5.4\nkubectl apply -f  https://github.com/bpfman/bpfman/releases/download/v${BPFMAN_REL}/bpfman-crds-install.yaml\n

    Next, deploy the bpfman-operator, which will also deploy the bpfman-daemon, which contains bpfman-rpc, bpfman Library and bpfman-agent:

    kubectl apply -f https://github.com/bpfman/bpfman/releases/download/v${BPFMAN_REL}/bpfman-operator-install.yaml\n

    Finally, deploy an example eBPF program:

    kubectl apply -f https://github.com/bpfman/bpfman/releases/download/v${BPFMAN_REL}/go-xdp-counter-install.yaml\n\nkubectl get xdpprograms\nNAME                     BPFFUNCTIONNAME   NODESELECTOR   STATUS\ngo-xdp-counter-example   xdp_stats         {}             ReconcileSuccess\n

    There are other example program install yamls in the artifacts for each Release payload.

    Use the following command to teardown the cluster:

    kind delete cluster -n test-bpfman\n
    "},{"location":"blog/","title":"Bpfman Blog","text":""},{"location":"blog/2023/11/25/a-new-logo-using-generative-ai-of-course/","title":"A New Logo: Using Generative AI, of course","text":"

    Since we renamed the project to bpfman we are in need of a new logo. Given that the tech buzz around Generative AI is infectious, we decided to explore using generative AI to create our new logo. What we found was that it was a great way to generate ideas, but a human (me) was still needed to create the final design.

    "},{"location":"blog/2023/11/25/a-new-logo-using-generative-ai-of-course/#the-brief","title":"The Brief","text":"

    I have a love of open source projects with animal mascots, so bpfman should be no different. The \"bee\" is used a lot for eBPF related projects. One such example is Crabby, the crab/bee hybrid, that I created for the Aya project.

    The logo should be cute and playful, but not too childish. As a nod to Podman, we'd like to use the same typeface and split color-scheme as they do, replacing purple with yellow.

    One bee is not enough! Since we're an eBPF manager, we need a more bees!

    via GIPHY

    And since those bees are bee-ing (sorry) managed, they should be organized. Maybe in a pyramid shape?

    "},{"location":"blog/2023/11/25/a-new-logo-using-generative-ai-of-course/#the-process","title":"The Process","text":"

    We used Bing Image Creator, which is backed by DALL-E 3.

    Initially we tried to use the following prompt:

    Logo for open source software project called \"bpfman\". \"bpf\" should be yellow and \"man\" should be black or grey. an illustration of some organized bees above the text. cute. playful

    Our AI overlords came up with:

    Not bad, but not quite what we were looking for. It's clear that as smart as AI is, it struggles with text, so whatever we need will need some manual post-processing. There are bees, if you squint a bit, but they're not very organized. Let's refine our prompt a bit:

    Logo for open source software project called \"bpfman\" as one word. The \"bpf\" should be yellow and \"man\" should be black or grey. an illustration of some organized bees above the text. cute. playful.

    That... is worse.

    Let's try again:

    Logo for a project called \"bpfman\". In the text \"bpfman\", \"bpf\" should be yellow and \"man\" should be black or grey. add an illustration of some organized bees above the text. cute and playful style.

    The bottom left one is pretty good! So I shared it with the rest of the maintainers to see what they thought.

    At this point the feedback that I got was the bees were too cute! We're a manager, and managers are serious business, so we need serious bees.

    Prompting the AI for the whole logo was far too ambitious, so I decided I would just use the AI to generate the bees and then I would add the text myself.

    I tried a few different prompts, but the one that worked best was:

    3 bees guarding a hive. stern expressions. simple vector style.

    The bottom right was exactly what I had in mind! With a little bit of post-processing, I ended up with this:

    Now it was time to solicit some feedback.

    "},{"location":"blog/2023/11/25/a-new-logo-using-generative-ai-of-course/#gathering-feedback","title":"Gathering Feedback","text":"

    After showing the logo to a few others, we decided that the bees were infact too stern. At this point we had a few options, like reverting back to our cute bees, however, this section in the [Bing Image Creator Terms of Service] was pointed out to me:

    Use of Creations. Subject to your compliance with this Agreement, the Microsoft Services Agreement, and our Content Policy, you may use Creations outside of the Online Services for any legal personal, non-commercial purpose.

    This means that we can't use the AI generated images for our logo.

    "},{"location":"blog/2023/11/25/a-new-logo-using-generative-ai-of-course/#was-it-all-for-nothing","title":"Was it all for nothing?","text":"

    Was it all for nothing? No! We learnt a lot from this process.

    Generative AI is great for generating ideas. Some of the logo compositions produced were great!

    It was also very useful to adjust the prompt based on feedback from team members so we could incorporate their ideas into the design.

    We also learnt that the AI is not great at text, so we should avoid using it for that.

    And finally, we learnt that we can't use the AI generated images for our logo. Well, not with the generator we used anyway.

    "},{"location":"blog/2023/11/25/a-new-logo-using-generative-ai-of-course/#the-semi-final-design-process","title":"The (Semi) Final Design Process","text":"

    I started from scratch, taking inspiration from the AI generated images. The bees were drawn first and composed around a hive - as our AI overlords suggested. I then added the text, and colours, but it still felt like it was missing something.

    What if we added a force field around the hive? That might be cool! And so, I added a force field around the hive and played around with the colours until I was happy.

    Here's what we ended up with:

    We consulted a few more people and got some feedback. The general consensus was that the logo was too busy... However, the reception to the force field was that the favicon I'd mocked would work better as the logo.

    "},{"location":"blog/2023/11/25/a-new-logo-using-generative-ai-of-course/#the-final-design","title":"The Final Design","text":"

    Here's the final design:

    Pretty cool, right? Even if I do say so myself.

    Our mascot is a queen bee, because she's the manager of the hive.

    The force field, is now no longer a force field - It's a pheramone cloud that represents the Queen Mandibular Pheromone (QMP) that the queen bee produces to keep the hive organized.

    "},{"location":"blog/2023/11/25/a-new-logo-using-generative-ai-of-course/#conclusion","title":"Conclusion","text":"

    I'm really happy with the result! I'm not a designer, so I'm sure there are things that could be improved, but I think it's a good start.

    What do you think? Join us on Slack and let us know!

    "},{"location":"blog/2024/02/27/bpfmans-integration-with-the-af_xdp-device-plugin-and-cni-for-kubernetes/","title":"bpfman's Integration with the AF_XDP Device Plugin and CNI for Kubernetes","text":"

    AF_XDP is an address/socket family that is optimized for high performance packet processing. It takes advantage of XDP (an in Kernel fastpath), which essentially runs an eBPF program as early as possible on a network driver's receive path, and redirects the packet to an AF_XDP socket.

    AF_XDP sockets (XSKs) are created in Userspace and have a 1:1 mapping with netdev queues. An XSKMAP is an eBPF map of AF_XDP sockets for a particular netdev. It's a simple key:value map where the key is the netdev's queue-id and the value is the AF_XDP socket that's attached to that queue. The eBPF program (at the XDP hook) will leverage the XSKMAP and the XDP_REDIRECT action to redirect packets to an AF_XDP socket. In the image below the XDP program is redirecting an incoming packet to the XSK attached to Queue 2.

    NOTE: If no XSK is attached to a queue, the XDP program will simply pass the packet to the Kernel Network Stack.

    +---------------------------------------------------+\n|     XSK A      |     XSK B       |      XSK C     |<---+  Userspace\n=========================================================|==========\n|    Queue 0     |     Queue 1     |     Queue 2    |    |  Kernel space\n+---------------------------------------------------+    |\n|                  Netdev eth0                      |    |\n+---------------------------------------------------+    |\n|                            +=============+        |    |\n|                            | key |  xsk  |        |    |\n|  +---------+               +=============+        |    |\n|  |         |               |  0  | xsk A |        |    |\n|  |         |               +-------------+        |    |\n|  |         |               |  1  | xsk B |        |    |\n|  | BPF     |               +-------------+        |    |\n|  | prog    |-- redirect -->|  2  | xsk C |-------------+\n|  | (XDP    |               +-------------+        |\n|  |  HOOK)  |                   xskmap             |\n|  |         |                                      |\n|  +---------+                                      |\n|                                                   |\n+---------------------------------------------------+\n

    The AF_XDP Device Plugin and CNI project provides the Kubernetes components to provision, advertise and manage AF_XDP networking devices for Kubernetes pods. These networking devices are typically used as a Secondary networking interface for a pod. A key goal of this project is to enable pods to run without any special privileges, without it pods that wish to use AF_XDP will need to run with elevated privileges in order to manage the eBPF program on the interface. The infrastructure will have little to no control over what these pods can load. Therefore it's ideal to leverage a central/infrastructure centric eBPF program management approach. This blog will discuss the eBPF program management journey for the AF_XDP Device Plugin and CNI.

    "},{"location":"blog/2024/02/27/bpfmans-integration-with-the-af_xdp-device-plugin-and-cni-for-kubernetes/#what-does-the-af_xdp-device-plugin-and-cni-do","title":"What does the AF_XDP Device Plugin and CNI do?","text":"

    For pods to create and use AF_XDP sockets on their interfaces, they can either:

    1. Create the AF_XDP socket on an interface already plumbed to the Pod (via SR-IOV Device Plugin and the Host CNI) --> But this requires CAP_BPF or CAP_SYS_ADMIN privileges in order to load the BPF program on the netdev.

    OR

    1. Use the AF_XDP Device Plugin (DP) and CNI in order to support a Pod without the aforementioned root like privileges.

      NOTE: Prior to kernel 5.19, all BPF sys calls required CAP_BPF, which are used to access maps shared between the BPF program and the userspace program. In kernel 5.19, a change went in that only requires CAP_BPF for map creation (BPF_MAP_CREATE) and loading programs (BPF_PROG_LOAD).

      In this scenario, the AF_XDP DP, will advertise resource pools (of netdevs) to Kubelet. When a Pod requests a resource from these pools, Kubelet will Allocate() one of these devices through the AF_XDP DP. The AF_XDP DP will load the eBPF program (to redirect packets to an AF_XDP socket) on the allocated device.

      The default behaviour of the AF_XDP DP (unless otherwise configured) is to take note of the XSKMAP File Descriptor (FD) for that netdev. It will also mount a Unix Domain Socket (UDS), as a hostpath mount, in the Pod. This UDS will be used by the AF_XDP application to perform a handshake with the AF_XDP DP to retrieve the XSKMAP FD. The application needs the XSKMAP FD to \"attach\" AF_XDP sockets it creates to the netdev queues.

      NOTE: Newer versions of the AF_XDP DP support eBPF map pinning which eliminate the need to perform this (non trivial) handshake with AF_XDP pods. It now mounts the pinned XSKMAP into the Pod using a hostpath mount. The downside of this approach is that the AF_XDP DP now needs to manage several eBPF File Systems (BPFFS), one per pod.

      The AF_XDP CNI (like any CNI) has the task of moving the netdev (with the loaded eBPF program) into the Pod namespace. It also does a few other important things:

      • It does not rename the netdev (to allow the DP to avoid IF_INDEX clashes as it manages the AF_XDP resource pools).
      • The CNI is also capable of configuring hardware filters on the NIC.
      • Finally, the CNI also unloads the eBPF program from the netdev and clear any hardware filters when the Pod is terminated.

      NOTE 1: The AF_XDP CNI manages the unloading of the eBPF program due to the AF_XDP DP not being aware of when a pod terminates (it's only invoked by Kubelet during pod creation).

      NOTE 2: Prior to bpfman integration, the CNI was extended to signal the AF_XDP DP on pod termination (via gRPC) in an effort to support eBPF map pinning directly in the AF_XDP DP. The AF_XDP DP was managing BPFFS(es) for map pinning and needed to be signalled to clean them up.

    "},{"location":"blog/2024/02/27/bpfmans-integration-with-the-af_xdp-device-plugin-and-cni-for-kubernetes/#bpfman-integration","title":"bpfman Integration","text":"

    Prior to bpfman integration the AF_XDP Device Plugin and CNI managed the eBPF program for redirecting incoming packets to AF_XDP sockets, its associated map (XSKMAP), and/or several BPFFS.

    "},{"location":"blog/2024/02/27/bpfmans-integration-with-the-af_xdp-device-plugin-and-cni-for-kubernetes/#integration-benefits","title":"Integration benefits","text":"

    So what are the benefits of bpfman integration for the AF_XDP DP and CNI?

    • Removes code for loading and managing eBPF from the AF_XDP DP and CNI codebase.

    • This presented a difficulty particularly when trying to find/update appropriate base container images to use for the AF_XDP device plugin. Different images supported different versions of eBPF management libraries (i.e libbpf or libxdp) which forced multiple changes around the loading and attaching of the base eBPF program.

    • Additionally the CNI runs as a binary on the Kubernetes node so we would need to statically compile libbpf/libxdp as part of the CNI.

    • More diverse XDP program support through bpfman's eBPF Bytecode Image Specification. Not only do the AF_XDP eBPF programs no longer need to be stored in the Device Plugin itself, but it's now configurable on a per pool basis.

    • No longer required to leverage Hostpath volume mounts to mount the AF_XDP maps inside a Pod. But rather take advantage of the bpfman CSI support to ensure that maps are pinned in the context of the Pod itself and not in a BPFFS on the host (then shared to the Pod).

    "},{"location":"blog/2024/02/27/bpfmans-integration-with-the-af_xdp-device-plugin-and-cni-for-kubernetes/#af_xdp-device-plugin-ebpf-programmap-management","title":"AF_XDP Device Plugin eBPF program/map management","text":"

    The role of the AF_XDP DP in eBPF program/map management prior to bpfman integration:

    • Loads the default AF_XDP BPF prog onto the netdev at Pod creation and manages info regarding the XSKMAP for that netdev.

    • Mounts a UDS as a hostpath volume in the Pod OR creates a BPFFS per netdev and pins the XSKMAP to it, then mounts this BPFFS as a hostpath volume in the Pod.

    • Shares the XSKMAP file descriptor via UDS (involves a handshake with the Pod).

    The role of the AF_XDP DP in eBPF program/map management after bpfman integration:

    • Uses bpfman's client APIs to load the BPF prog.

    • Shares the XSKMAP (that bpfman pinned ) with the Pod as a hostpath volume.

    "},{"location":"blog/2024/02/27/bpfmans-integration-with-the-af_xdp-device-plugin-and-cni-for-kubernetes/#af_xdp-cni-ebpf-programmap-management","title":"AF_XDP CNI eBPF program/map management","text":"

    The role of the AF_XDP CNI in eBPF program/map management prior to bpfman integration:

    • Unloads the eBPF program when a device is returned to the Host network namespace.

    The role of the AF_XDP CNI in eBPF program/map management after bpfman integration:

    • Uses gRPC to signal to the Device Plugin to request bpfman to unload the eBPF program using the client APIs.
    "},{"location":"blog/2024/02/27/bpfmans-integration-with-the-af_xdp-device-plugin-and-cni-for-kubernetes/#is-there-a-working-example","title":"Is there a working example?","text":"

    The bpfman integration with the AF_XDP Device Plugin and CNI was demo'ed as part of a series of demos that show the migration of a DPDK application to AF_XDP (without) any application modification. The demo can be watched below:

    "},{"location":"blog/2024/02/27/bpfmans-integration-with-the-af_xdp-device-plugin-and-cni-for-kubernetes/#af_xdp-dp-and-cnis-integration-with-bpfman-in-images","title":"AF_XDP DP and CNI's integration with bpfman in images","text":"

    The following sections will present the evolution of the AF_XDP DP and CNI from independent eBPF program management to leveraging bpfman to manage eBPF programs on their behalf.

    "},{"location":"blog/2024/02/27/bpfmans-integration-with-the-af_xdp-device-plugin-and-cni-for-kubernetes/#af_xdp-dp-and-cni-managing-ebpf-programs-independently","title":"AF_XDP DP and CNI managing eBPF programs independently","text":"

    The following diagram details how the AF_XDP DP and CNI worked prior to bpfman integration.

    1. Setup Subfunctions on the network devices (if the are supported/being used).

    2. Create an AF_XDP DP and CNI configuration file to setup the device resource pools and deploy the DP and CNI.

    3. When the AF_XDP DP runs it will discover the netdevs on the host and create the resource pools.

    4. The AF_XDP DP registers the resource pools with Kubelet.

    5. When a pod (that requests an AF_XDP resource) is started, Kubelet will send an Allocate() request to the AF_XDP DP. The AF_XDP DP loads the eBPF program on the interface and mounts the UDS in the pod and sets some environment variables in the pod using the Downward API.

    NOTE: In the case where eBPF map pinning is used rather than the UDS, the AF_XDP DP will create a BPFFS where it pins the XSKMAP and mounts the BPFFS as a hostpath volume in the pod.

    1. The AF_XDP DP signals success to the Kubelet so that the device is added to the pod.

    2. Kubelet triggers multus, which in turn triggers the AF_XDP CNI. The CNI does the relevant network configuration and moves the netdev into the pod network namespace.

    3. The application in the pod start and initiates a handshake with the AF_XDP DP over the mounted UDS to retrieve the XSKMAP FD.

    "},{"location":"blog/2024/02/27/bpfmans-integration-with-the-af_xdp-device-plugin-and-cni-for-kubernetes/#af_xdp-dp-and-cni-integrated-with-bpfman-no-csi","title":"AF_XDP DP and CNI integrated with bpfman (no csi)","text":"

    The following diagram details how the AF_XDP DP and CNI worked after bpfman integration.

    The main difference here is that when the Allocate() request comes in from Kubelet, the AF_XDP DP uses the bpfman client API to load the eBPF program on the relevant netdev. It takes note of where bpfman pins the XSKMAP and mounts this directory as a hostpath volume in the pod.

    "},{"location":"blog/2024/02/27/bpfmans-integration-with-the-af_xdp-device-plugin-and-cni-for-kubernetes/#af_xdp-dp-and-cni-integrated-with-bpfman-with-csi","title":"AF_XDP DP and CNI integrated with bpfman (with csi)","text":"

    The following diagram details how the AF_XDP DP and CNI will work with bpfman leveraging the new CSI implementation.

    The pod will include a volume definition as follows:

       volumes:\n   - name: bpf-maps\n     csi:\n       driver: csi.bpfman.dev\n       volumeAttributes:\n         csi.bpfman.dev/thru-annotations: true\n

    The idea here is when the Allocate() request comes in from Kubelet, the AF_XDP DP uses the bpfman client API to load the eBPF program on the relevant netdev. The AF_XDP DP will annotate the pod with the XdpProgram name, map and mountpath. When the bpfman CSI plugin is triggered by Kubelet, it will retrieve the information it needs from the pod annotations in order to pin the map inside the Pod.

    "},{"location":"blog/2023/11/23/bpfd-becomes-bpfman/","title":"bpfd becomes bpfman","text":"

    Bpfd is now bpfman! We've renamed the project to better reflect the direction we're taking. We're still the same project, just with a new name.

    "},{"location":"blog/2023/11/23/bpfd-becomes-bpfman/#why-the-name-change","title":"Why the name change?","text":"

    We've been using the name bpfd for a while now, but we were not the first to use it. There were projects before us that used the name bpfd, but since most were inactive, originally we didn't see this as an issue.

    More recently though the folks at Meta have started using the name systemd-bpfd for their proposed addition to systemd.

    In addition, we've been thinking about the future of the project, and particularly about security and whether it's wise to keep something with CAP_BPF capabilities running as a daemon - even if we've been very careful. This is similar to the issues faced by docker which eventually lead to the creation of podman.

    This issue led us down the path of redesigning the project to be daemonless. We'll be implementing these changes in the coming months and plan to perform our first release as bpfman in Q1 of 2024.

    The 'd' in bpfd stood for daemon, so with our new design and the confusion surrounding the name bpfd we though it was time for a change.

    Since we're a BPF manager, we're now bpfman! It's also a nice homage to podman, which we're big fans of.

    "},{"location":"blog/2023/11/23/bpfd-becomes-bpfman/#what-does-this-mean-for-me","title":"What does this mean for me?","text":"

    If you're a developer of bpfman you will need to update your Git remotes to point at our new organization and repository name. Github will redirect these for a while, but we recommend updating your remotes as soon as possible.

    If you're a user of bpfd or the bpfd-operator then version 0.3.1 will be the last release under the bpfd name. We will continue to support you as best we can, but we recommend upgrading to bpfman as soon as our first release is available.

    "},{"location":"blog/2023/11/23/bpfd-becomes-bpfman/#whats-next","title":"What's next?","text":"

    We've hinted at some of the changes we're planning, and of course, our roadmap is always available in Github. It's worth mentioning that we're also planning to expand our release packages to include RPMs and DEBs, making it even easier to install bpfman on your favorite Linux distribution.

    "},{"location":"blog/2023/11/23/bpfd-becomes-bpfman/#thanks","title":"Thanks!","text":"

    We'd like to thank everyone who has contributed to bpfd over the years. We're excited about the future of bpfman and we hope you are too! Please bear with us as we make this transition, and if you have any questions or concerns, please reach out to us on Slack. We're in the '#bpfd' channel, but we'll be changing that to '#bpfman' soon.

    "},{"location":"blog/2024/02/26/technical-challenges-for-attaching-ebpf-programs-in-containers/","title":"Technical Challenges for Attaching eBPF Programs in Containers","text":"

    We recently added support for attaching uprobes inside containers. The purpose of this blog is to give a brief overview of the feature, to document the technical challenges encountered, and describe our solutions for those challenges. In particular, how to attach an eBPF program inside of a container, and how to find the host Process ID (PID) on the node for the container?

    The solutions seem relatively straightforward now that they are done, but we found limited information elsewhere, so we thought it would be helpful to document them here.

    The uprobe implementation will be used as the example in this blog, but the concepts can (and will eventually) be applied to other program types.

    "},{"location":"blog/2024/02/26/technical-challenges-for-attaching-ebpf-programs-in-containers/#introduction","title":"Introduction","text":"

    A \"uprobe\" (user probe) is a type of eBPF program that can be attached to a specific location in a user-space application. This allows developers and system administrators to dynamically instrument a user-space binary to inspect its behavior, measure performance, or debug issues without modifying the application's source code or binary. When the program execution reaches the location to which the uprobe is attached, the eBPF program associated with the uprobe is executed.

    bpfman support for uprobes has existed for some time. We recently extended this support to allow users to attach uprobes inside of containers both in the general case of a container running on a Linux server and also for containers running in a Kubernetes cluster.

    The following is a bpfman command line example for loading a uprobe inside a container:

    bpfman load image --image-url quay.io/bpfman-bytecode/uprobe:latest uprobe --fn-name \"malloc\" --target \"libc\" --container-pid 102745\n

    The above command instructs bpfman to attach a uprobe to the malloc function in the libc library for the container with PID 102745. The main addition here is the ability to specify a container-pid, which is the PID of the container as it is known to the host server.

    The term \"target\" as used in the above bpfman command (and the CRD below) describes the library or executable that we want to attach the uprobe to. The fn-name (the name of the function within that target) and/or an explicit \"offset\" can be used to identify a specific offset from the beginning of the target. We also use the term \"target\" more generally to describe the intended location of the uprobe.

    For Kubernetes, the CRD has been extended to include a \"container selector\" to describe one or more containers as shown in the following example.

    apiVersion: bpfman.io/v1alpha1\nkind: UprobeProgram\nmetadata:\n  labels:\n    app.kubernetes.io/name: uprobeprogram\n  name: uprobe-example-containers\nspec:\n  # Select all nodes\n  nodeselector: {}\n  bpffunctionname: my_uprobe\n  func_name: malloc\n  # offset: 0 # optional offset w/in function\n  target: libc\n  retprobe: false\n  # pid: 0 # optional pid to execute uprobe for\n  bytecode:\n    image:\n      url: quay.io/bpfman-bytecode/uprobe:latest\n  containers:      <=== New section for specifying containers to attach uprobe to\n    namespace: bpfman\n    pods:\n      matchLabels:\n        name: bpfman-daemon\n    containernames:\n      - bpfman\n      - bpfman-agent\n

    In the Kubernetes case, the container selector (containers) is used to identify one or more containers in which to attach the uprobe. If containers identifies any containers on a given node, the bpfman agent on that node will determine their host PIDs and make the calls to bpfman to attach the uprobes.

    "},{"location":"blog/2024/02/26/technical-challenges-for-attaching-ebpf-programs-in-containers/#attaching-uprobes-in-containers","title":"Attaching uprobes in containers","text":"

    A Linux \"mount namespace\" is a feature that isolates the mount points seen by a group of processes. This means that processes in different mount namespaces can have different views of the filesystem. A container typically has its own mount namespace that is isolated both from those of other containers and its parent. Because of this, files that are visible in one container are likely not visible to other containers or even to the parent host (at least not directly). To attach a uprobe to a file in a container, we need to have access to that container's mount namespace so we can see the file to which the uprobe needs to be attached.

    From a high level, attaching a uprobe to an executable or library in a container is relatively straight forward. bpfman needs to change to the mount namespace of the container, attach the uprobe to the target in that container, and then return to our own mount namespace so that we can save the needed state and continue processing other requests.

    The main challenges are:

    1. Changing to the mount namespace of the target container.
    2. Returning to the bpfman mount namespace.
    3. setns (at least for the mount namespace) can't be called from a multi-threaded application, and bpfman is currently multithreaded.
    4. How to find the right PID for the target container.
    "},{"location":"blog/2024/02/26/technical-challenges-for-attaching-ebpf-programs-in-containers/#the-mount-namespace","title":"The Mount Namespace","text":"

    To enter the container namespace, bpfman uses the sched::setns function from the Rust nix crate. The setns function requires the file descriptor for the mount namespace of the target container.

    For a given container PID, the namespace file needed by the setns function can be found in the /proc/<PID>/ns/ directory. An example listing for the PID 102745 directory is shown below:

    sudo ls -l /proc/102745/ns/\ntotal 0\nlrwxrwxrwx 1 root root 0 Feb 15 12:10 cgroup -> 'cgroup:[4026531835]'\nlrwxrwxrwx 1 root root 0 Feb 15 12:10 ipc -> 'ipc:[4026532858]'\nlrwxrwxrwx 1 root root 0 Feb 15 12:10 mnt -> 'mnt:[4026532856]'\nlrwxrwxrwx 1 root root 0 Feb 15 12:07 net -> 'net:[4026532860]'\nlrwxrwxrwx 1 root root 0 Feb 15 12:10 pid -> 'pid:[4026532859]'\nlrwxrwxrwx 1 root root 0 Feb 15 12:10 pid_for_children -> 'pid:[4026532859]'\nlrwxrwxrwx 1 root root 0 Feb 15 12:10 time -> 'time:[4026531834]'\nlrwxrwxrwx 1 root root 0 Feb 15 12:10 time_for_children -> 'time:[4026531834]'\nlrwxrwxrwx 1 root root 0 Feb 15 12:10 user -> 'user:[4026531837]'\nlrwxrwxrwx 1 root root 0 Feb 15 12:10 uts -> 'uts:[4026532857]'\n

    In this case, the mount namespace file is /proc/102745/ns/mnt.

    NOTE: How to find the PID and the relationship between parent and child PIDs is described in the \"Finding The PID\" section below.

    When running directly on a Linux server, bpfman has access to the host /proc directory and can access the mount namespace file for any PID. However, on Kubernetes, bpfman runs in a container, so it doesn't have access to the namespace files of other containers or the /proc directory of the host by default. Therefore, in the Kubernetes implementation, /proc is mounted in the bpfman container so it has access to the ns directories of other containers.

    "},{"location":"blog/2024/02/26/technical-challenges-for-attaching-ebpf-programs-in-containers/#returning-to-the-bpfman-mount-namespace","title":"Returning to the bpfman Mount Namespace","text":"

    After bpfman does a setns to the target container mount namespace, it has access to the target binary in that container. However, it only has access to that container's view of the filesystem, and in most cases, this does not include access to bpfman's filesystem or the host filesystem. As a result, bpfman loses the ability to access its own mount namespace file.

    However, before calling setns, bpfman has access to it's own mount namespace file. Therefore, to avoid getting stranded in a different mount namespace, bpfman also opens its own mount namespace file prior to calling setns so it already has the file descriptor that will allow it to call setns to return to its own mount namespace.

    "},{"location":"blog/2024/02/26/technical-challenges-for-attaching-ebpf-programs-in-containers/#running-setns-from-a-multi-threaded-process","title":"Running setns From a Multi-threaded Process","text":"

    Calling setns to a mount namespace doesn't work from a multi-threaded process.

    To work around this issue, the logic was moved to a standalone single-threaded executable called bpfman-ns that does the job of entering the namespace, attaching the uprobe, and then returning to the bpfman namespace to save the needed info.

    "},{"location":"blog/2024/02/26/technical-challenges-for-attaching-ebpf-programs-in-containers/#finding-the-pid","title":"Finding the PID","text":""},{"location":"blog/2024/02/26/technical-challenges-for-attaching-ebpf-programs-in-containers/#finding-a-host-container-pid-on-a-linux-server","title":"Finding a Host Container PID on a Linux Server","text":"

    This section provides an overview of PID namespaces and shows several ways to find the host PID for a container.

    "},{"location":"blog/2024/02/26/technical-challenges-for-attaching-ebpf-programs-in-containers/#tldr","title":"tl;dr","text":"

    If you used Podman or Docker to run your container, and you gave the container a unique name, the following commands can be used to find the host PID of a container.

    podman inspect -f '{{.State.Pid}}' <CONTAINER_NAME>\n

    or, similarly,

    docker inspect -f '{{.State.Pid}}'  <CONTAINER_NAME>\n
    "},{"location":"blog/2024/02/26/technical-challenges-for-attaching-ebpf-programs-in-containers/#overview-of-pid-namespaces-and-container-host-pids","title":"Overview of PID namespaces and Container Host PIDs","text":"

    Each container has a PID namespace. Each PID namespace (other than the root) is contained within a parent PID namespace. In general, this relationship is hierarchical and PID namespaces can be nested within other PID namespaces. In this section, we will just cover the case of a root PID namepsace on a Linux server that has containers with PID namespaces that are direct children of the root. The multi-level case is described in the section on Nested Containers with kind below.

    The PID namespaces can be listed using the lsns -t pid command. Before we start any containers, we just have the one root pid namespace as shown below.

    sudo lsns -t pid\n        NS TYPE NPROCS PID USER COMMAND\n4026531836 pid     325   1 root /usr/lib/systemd/systemd rhgb --switched-root --system --deserialize 30\n

    Now lets start a container with the following command in a new shell:

    podman run -it --name=container_1 fedora:latest /bin/bash\n

    NOTE: In this section, we are using podman to run containers. However, all of the same commands can also be used with docker.

    Now back on the host we have:

    sudo lsns -t pid\n        NS TYPE NPROCS    PID USER      COMMAND\n4026531836 pid     337      1 root      /usr/lib/systemd/systemd rhgb --switched-root --system --deserialize 30\n4026532948 pid       1 150342 user_abcd /bin/bash\n

    We can see that the host PID for the container we just started is 150342.

    Now let's start another container in a new shell with the same command (except with a different name), and run the lsns command again on the host.

    podman run -it --name=container_2 fedora:latest /bin/bash\n

    On the host:

    sudo lsns -t pid\n        NS TYPE NPROCS    PID USER      COMMAND\n4026531836 pid     339      1 root      /usr/lib/systemd/systemd rhgb --switched-root --system --deserialize 30\n4026532948 pid       1 150342 user_abcd /bin/bash\n4026533041 pid       1 150545 user_abcd /bin/bash\n

    We now have 3 pid namespaces -- one for root and two for the containers. Since we already know that the first container had PID 150342 we can conclude that the second container has PID 150545. However, what would we do if we didn't already know the PID for one of the containers?

    If the container we were interested in was running a unique command, we could use that to disambiguate. However, in this case, both are running the same /bin/bash command.

    If something unique is running inside of the container, we can use the ps -e -o pidns,pid,args command to get some info.

    For example, run sleep 1111 in container_1, then

    sudo ps -e -o pidns,pid,args | grep 'sleep 1111'\n4026532948  150778 sleep 1111\n4026531836  151002 grep --color=auto sleep 1111\n

    This tells us that the sleep 1111 command is running in PID namespace 4026532948. And,

    sudo lsns -t pid | grep 4026532948\n4026532948 pid       2 150342 user_abcd /bin/bash\n

    Tells us that the container's host PID is 150342.

    Alternatively, we could run lsns inside of container_1.

    dnf install -y util-linux\nlsns -t pid\n        NS TYPE NPROCS PID USER COMMAND\n4026532948 pid       2   1 root /bin/bash\n

    This tells us a few interesting things.

    1. Inside the container, the PID is 1,
    2. We can't see any of the other PID namespaces inside the container.
    3. The container PID namespace is 4026532948.

    With the container PID namespace, we can run the lsns -t pid | grep 4026532948 command as we did above to find the container's host PID

    Finally, the container runtime knows the pid mapping. As mentioned at the beginning of this section, if the unique name of the container is known, the following command can be used to get the host PID.

    podman inspect -f '{{.State.Pid}}' container_1\n150342\n
    "},{"location":"blog/2024/02/26/technical-challenges-for-attaching-ebpf-programs-in-containers/#how-bpfman-agent-finds-the-pid-on-kubernetes","title":"How bpfman Agent Finds the PID on Kubernetes","text":"

    When running on Kubernetes, the \"containers\" field in the UprobeProgram CRD can be used to identify one or more containers using the following information:

    • Namespace
    • Pod Label
    • Container Name

    If the container selector matches any containers on a given node, the bpfman-agent determines the host PID for those containers and then calls bpfman to attach the uprobe in the container with the given PID.

    From what we can tell, there is no way to find the host PID for a container running in a Kubernetes pod from the Kubernetes interface. However, the container runtime does know this mapping.

    The bpfman-agent implementation uses multiple steps to find the set of PIDs on a given node (if any) for the containers that are identified by the container selector.

    1. It uses the Kubernetes interface to get a list of pods on the local node that match the container selector.
    2. It uses use crictl with the names of the pods found to get the pod IDs
    3. It uses crictl with the pod ID to find the containers in those pods and then checks whether any match the container selector.
    4. Finally, it uses crictl with the pod IDs found to get the host PIDs for the containers.

    As an example, the bpfman.io_v1alpha1_uprobe_uprobeprogram_containers.yaml file can be used with the kubectl apply -f command to install uprobes on two of the containers in the bpfman-agent pod. The bpfman code does this programmatically, but we will step through the process of finding the host PIDs for the two containers here using cli commands to demonstrate how it works.

    We will use a kind deployment with bpfman for this demo. See Deploy Locally via KIND for instructions on how to get this running.

    The container selector in the above yaml file is the following.

      containers:\n    namespace: bpfman\n    pods:\n      matchLabels:\n        name: bpfman-daemon\n    containernames:\n      - bpfman\n      - bpfman-agent\n

    bpfman accesses the Kubernetes API and uses crictl from the bpfman-agent container. However, the bpfman-agent container doesn't have a shell by default, so we will run the examples from the bpfman-deployment-control-plane node, which will yield the same results. bpfman-deployment-control-plane is a docker container in our kind cluster, so enter the container.

    docker exec -it c84cae77f800 /bin/bash\n
    Install crictl.

    apt update\napt install wget\nVERSION=\"v1.28.0\"\nwget https://github.com/kubernetes-sigs/cri-tools/releases/download/$VERSION/crictl-$VERSION-linux-amd64.tar.gz\ntar zxvf crictl-$VERSION-linux-amd64.tar.gz -C /usr/local/bin\nrm -f crictl-$VERSION-linux-amd64.tar.gz\n

    First use kubectl to get the list of pods that match our container selector.

    kubectl get pods -n bpfman -l name=bpfman-daemon\nNAME                  READY   STATUS    RESTARTS   AGE\nbpfman-daemon-cv9fm   3/3     Running   0          6m54s\n

    NOTE: The bpfman code also filters on the local node, but we only have one node in this deployment, so we'll ignore that here.

    Now, use crictl with the name of the pod found to get the pod ID.

    crictl pods --name bpfman-daemon-cv9fm\nPOD ID              CREATED             STATE               NAME                  NAMESPACE           ATTEMPT             RUNTIME\ne359900d3eca5       46 minutes ago      Ready               bpfman-daemon-cv9fm   bpfman              0                   (default)\n

    Now, use the pod ID to get the list of containers in the pod.

    crictl ps --pod e359900d3eca5\nCONTAINER           IMAGE               CREATED             STATE               NAME                    ATTEMPT             POD ID              POD\n5eb3b4e5b45f8       50013f94a28d1       48 minutes ago      Running             node-driver-registrar   0                   e359900d3eca5       bpfman-daemon-cv9fm\n629172270a384       e507ecf33b1f8       48 minutes ago      Running             bpfman-agent            0                   e359900d3eca5       bpfman-daemon-cv9fm\n6d2420b80ddf0       86a517196f329       48 minutes ago      Running             bpfman                  0                   e359900d3eca5       bpfman-daemon-cv9fm\n

    Now use the container IDs for the containers identified in the container selector to get the PIDs of the containers.

    # Get PIDs for bpfman-agent container\ncrictl inspect 629172270a384 | grep pid\n    \"pid\": 2158,\n            \"pid\": 1\n            \"type\": \"pid\"\n\n# Get PIDs for bpfman container\ncrictl inspect 6d2420b80ddf0 | grep pid\n    \"pid\": 2108,\n            \"pid\": 1\n            \"type\": \"pid\"\n

    From the above output, we can tell that the host PID for the bpfman-agent container is 2158, and the host PID for the bpfman container is 2108. So, now bpfman-agent would have the information needed to call bpfman with a request to install a uprobe in the containers.

    "},{"location":"blog/2024/02/26/technical-challenges-for-attaching-ebpf-programs-in-containers/#nested-containers-with-kind","title":"Nested Containers with kind","text":"

    kind is a tool for running local Kubernetes clusters using Docker container \u201cnodes\u201d. The kind cluster we used for the previous section had a single node.

    $ kubectl get nodes\nNAME                              STATUS   ROLES           AGE   VERSION\nbpfman-deployment-control-plane   Ready    control-plane   24h   v1.27.3\n

    We can see the container for that node on the base server from Docker as follows.

    docker ps\nCONTAINER ID   IMAGE                  COMMAND                  CREATED        STATUS        PORTS                       NAMES\nc84cae77f800   kindest/node:v1.27.3   \"/usr/local/bin/entr\u2026\"   25 hours ago   Up 25 hours   127.0.0.1:36795->6443/tcp   bpfman-deployment-control-plane\n

    Our cluster has a number of pods as shown below.

    kubectl get pods -A\nNAMESPACE            NAME                                                      READY   STATUS    RESTARTS   AGE\nbpfman               bpfman-daemon-cv9fm                                       3/3     Running   0          24h\nbpfman               bpfman-operator-7f67bc7c57-bpw9v                          2/2     Running   0          24h\nkube-system          coredns-5d78c9869d-7tw9b                                  1/1     Running   0          24h\nkube-system          coredns-5d78c9869d-wxwfn                                  1/1     Running   0          24h\nkube-system          etcd-bpfman-deployment-control-plane                      1/1     Running   0          24h\nkube-system          kindnet-lbzw4                                             1/1     Running   0          24h\nkube-system          kube-apiserver-bpfman-deployment-control-plane            1/1     Running   0          24h\nkube-system          kube-controller-manager-bpfman-deployment-control-plane   1/1     Running   0          24h\nkube-system          kube-proxy-sz8v9                                          1/1     Running   0          24h\nkube-system          kube-scheduler-bpfman-deployment-control-plane            1/1     Running   0          24h\nlocal-path-storage   local-path-provisioner-6bc4bddd6b-22glj                   1/1     Running   0          24h\n

    Using the lsns command in the node's docker container, we can see that it has a number of PID namespaces (1 for each container that is running in the pods in the cluster), and all of these containers are nested inside of the docker \"node\" container shown above.

    lsns -t pid\n        NS TYPE NPROCS   PID USER  COMMAND\n# Note: 12 rows have been deleted below to save space\n4026532861 pid      17     1 root  /sbin/init\n4026532963 pid       1   509 root  kube-scheduler --authentication-kubeconfig=/etc/kubernetes/scheduler.conf --authorization-kubeconfig=/etc/kubernetes/scheduler.conf --bind-addre\n4026532965 pid       1   535 root  kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/etc/kubernetes/controller-manager.conf --authorization-kubeconfi\n4026532967 pid       1   606 root  kube-apiserver --advertise-address=172.18.0.2 --allow-privileged=true --authorization-mode=Node,RBAC --client-ca-file=/etc/kubernetes/pki/ca.crt\n4026532969 pid       1   670 root  etcd --advertise-client-urls=https://172.18.0.2:2379 --cert-file=/etc/kubernetes/pki/etcd/server.crt --client-cert-auth=true --data-dir=/var/lib\n4026532972 pid       1  1558 root  local-path-provisioner --debug start --helper-image docker.io/kindest/local-path-helper:v20230510-486859a6 --config /etc/config/config.json\n4026533071 pid       1   957 root  /usr/local/bin/kube-proxy --config=/var/lib/kube-proxy/config.conf --hostname-override=bpfman-deployment-control-plane\n4026533073 pid       1  1047 root  /bin/kindnetd\n4026533229 pid       1  1382 root  /coredns -conf /etc/coredns/Corefile\n4026533312 pid       1  1896 65532 /usr/local/bin/kube-rbac-proxy --secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8174/ --logtostderr=true --v=0\n4026533314 pid       1  1943 65532 /bpfman-operator --health-probe-bind-address=:8175 --metrics-bind-address=127.0.0.1:8174 --leader-elect\n4026533319 pid       1  2108 root  ./bpfman system service --timeout=0 --csi-support\n4026533321 pid       1  2158 root  /bpfman-agent --health-probe-bind-address=:8175 --metrics-bind-address=127.0.0.1:8174\n4026533323 pid       1  2243 root  /csi-node-driver-registrar --v=5 --csi-address=/csi/csi.sock --kubelet-registration-path=/var/lib/kubelet/plugins/csi-bpfman/csi.sock\n
    We can see the bpfman containers we were looking at earlier in the output above. Let's take a deeper look at the bpfman-agent container that has a PID of 2158 on the Kubernetes node container and a PID namespace of 4026533321. If we go back to the base server, we can find the container's PID there.

    sudo lsns -t pid | grep 4026533321\n4026533321 pid       1 222225 root  /bpfman-agent --health-probe-bind-address=:8175 --metrics-bind-address=127.0.0.1:8174\n

    This command tells us that the PID of our bpfman-agent is 222225 on the base server. The information for this PID is contained in /proc/222225. The following command will show the PID mappings for that one container at each level.

    sudo grep NSpid /proc/222225/status\nNSpid:  222225  2158    1\n

    The output above tells us that the PIDs for the bpfman-agent container are 222225 on the base server, 2158 in the Docker \"node\" container, and 1 inside the container itself.

    "},{"location":"blog/2024/02/26/technical-challenges-for-attaching-ebpf-programs-in-containers/#moving-forward","title":"Moving Forward","text":"

    As always, there is more work to do. The highest priority goals are to support additional eBPF program types and to use the Container Runtime Interface directly.

    We chose uprobes first because we had a user with a specific need. However, there are use cases for other eBPF program types.

    We used crictl in this first implementation because it already exists, supports multiple container runtimes, handles the corner cases, and is maintained. This allowed us to focus on the bpfman implementation and get the feature done more quickly. However, it would be better to access the container runtime interface directly rather than using an external executable.

    "},{"location":"blog/2023/09/07/bpfman-a-novel-way-to-manage-ebpf/","title":"bpfman: A Novel Way to Manage eBPF","text":"

    In today's cloud ecosystem, there's a demand for low-level system access to enable high-performance observability, security, and networking functionality for applications. Historically these features have been implemented in user space, however, the ability to program such functionality into the kernel itself can provide many benefits including (but not limited to) performance. Regardless, many Linux users still opt away from in-tree or kernel module development due to the slow rate of iteration and ensuing large management burden. eBPF has emerged as a technology in the Linux Kernel looking to change all that.

    eBPF is a simple and efficient way to dynamically load programs into the kernel at runtime, with safety and performance provided by the kernel itself using a Just-In-Time (JIT) compiler and verification process. There are a wide variety of program types one can create with eBPF, which include everything from networking applications to security systems.

    However, eBPF is still a fairly nascent technology and it's not all kittens and rainbows. The process of developing, testing, deploying, and maintaining eBPF programs is not a road well traveled yet, and the story gets even more complicated when you want to deploy your programs in a multi-node system, such as a Kubernetes cluster. It was these kinds of problems that motivated the creation of bpfman, a system daemon for loading and managing eBPF programs in both traditional systems and Kubernetes clusters. In this blog post, we'll discuss the problems bpfman can help solve, and how to deploy and use it.

    "},{"location":"blog/2023/09/07/bpfman-a-novel-way-to-manage-ebpf/#current-challenges-with-developing-and-deploying-ebpf-programs","title":"Current Challenges with Developing and Deploying eBPF Programs","text":"

    While some organizations have had success developing, deploying, and maintaining production software which includes eBPF programs, the barrier to entry is still very high.

    Following the basic eBPF development workflow, which often involves many hours trying to interpret and fix mind-bending eBPF verifier errors, the process of deploying a program in testing and staging environments often results in a lot of custom program loading and management functionality specific to the application. When moving to production systems in environments like Kubernetes clusters the operational considerations continue to compound.

    Security is another significant challenge, which we will cover in more depth in a follow-on blog. However, at a high level, applications that use eBPF typically load their own eBPF programs, which requires at least CAP_BPF. Many BPF programs and attach points require additional capabilities from CAP_SYS_PTRACE, CAP_NET_ADMIN and even including CAP_SYS_ADMIN. These privileges include capabilities that aren\u2019t strictly necessary for eBPF and are too coarsely grained to be useful. Since the processes that load eBPF are usually long-lived and often don\u2019t drop privileges it leaves a wide attack surface.

    While it doesn't solve all the ergonomic and maintenance problems associated with adopting eBPF, bpfman does try to address several of these issues -- particularly as it pertains to security and the lifecycle management of eBPF programs. In the coming sections, we will go into more depth about what eBPF does, and how it can help reduce the costs associated with deploying and managing eBPF-powered workloads.

    "},{"location":"blog/2023/09/07/bpfman-a-novel-way-to-manage-ebpf/#bpfman-overview","title":"bpfman Overview","text":"

    The bpfman project provides a software stack that makes it easy to manage the full lifecycle of eBPF programs. In particular, it can load, unload, modify, and monitor eBPF programs on a single host, or across a full Kubernetes cluster. The key components of bpfman include the bpfman daemon itself which can run independently on any Linux box, an accompanying Kubernetes Operator designed to bring first-class support to clusters via Custom Resource Definitions (CRDs), and eBPF program packaging.

    These components will be covered in more detail in the following sections.

    "},{"location":"blog/2023/09/07/bpfman-a-novel-way-to-manage-ebpf/#bpfman-daemon","title":"bpfman Daemon","text":"

    The bpfman daemon works directly with the operating system to manage eBPF programs. It loads, updates, and unloads eBPF programs, pins maps, and provides visibility into the eBPF programs loaded on a system. Currently, bpfman fully supports XDP, TC, Tracepoint, uProbe, and kProbe eBPF programs. In addition, bpfman can display information about all types of eBPF programs loaded on a system whether they were loaded by bpfman or some other mechanism. bpfman is developed in the Rust programming language and uses Aya, an eBPF library which is also developed in Rust.

    When used on an individual server, bpfman runs as a system daemon, and applications communicate with it using a gRPC API. bpfman can also be used via a command line which in turn uses the gRPC API. The following is an example of using bpfman to load and attach an xdp program.

    bpfman load-from-image -g GLOBAL_u8=01 -i quay.io/bpfman-bytecode/xdp_pass:latest xdp -i eth0 -p 100\n

    This architecture is depicted in the following diagram.

    Using bpfman in this manner significantly improves security because the API is secured using mTLS, and only bpfman needs the privileges required to load and manage eBPF programs and maps.

    Writing eBPF code is tough enough as it is. Typically, an eBPF-based application would need to also implement support for the lifecycle management of the required eBPF programs. bpfman does that for you and allows you to focus on developing your application.

    Another key functional advantage that bpfman offers over libbpf or the Cilium ebpf-go library is support for multiple XDP programs. Standard XDP only allows a single XDP program on a given interface, while bpfman supports loading multiple XDP programs on each interface using the multi-prog protocol defined in libxdp. This allows the user to add, delete, update, prioritize, and re-prioritize the multiple programs on each interface. There is also support to configure whether the flow of execution should terminate and return or continue to the next program in the list based on the return value.

    While TC natively supports multiple programs on each attach point, it lacks the controls and flexibility enabled by the multi-prog protocol. bpfman therefore also supports the same XDP multi-prog solution for TC programs which has the added benefit of a consistent user experience for both XDP and TC programs.

    eBPF programs are also difficult to debug on a system. The visibility provided by bpfman can be a key tool in understanding what is deployed and how they may interact.

    "},{"location":"blog/2023/09/07/bpfman-a-novel-way-to-manage-ebpf/#bpfman-kubernetes-support","title":"bpfman Kubernetes Support","text":"

    The benefits of bpfman are brought to Kubernetes by the bpfman operator. The bpfman operator is developed in Go using the Operator SDK framework, so it should be familiar to most Kubernetes application developers. The bpfman operator deploys a daemonset, containing both bpfman and the bpfman agent processes on each node. Rather than making requests directly to bpfman with the gRPC API or CLI as described above, Kubernetes applications use bpfman custom resource definitions (CRDs) to make requests to bpfman to load and attach eBPF programs. bpfman uses two types of CRDs; Program CRDs for each eBPF program type (referred to as *Program CRDs, where * = Xdp, Tc, etc.) created by the application to express the desired state of an eBPF program on the cluster, and per node BpfProgram CRDs created by the bpfman agent to report the current state of the eBPF program on each node.

    Using XDP as an example, the application can request that an XDP program be loaded on multiple nodes using the XdpProgram CRD, which includes the necessary information such as the bytecode image to load, interface to attach it to, and priority. An XdpProgram CRD that would do the same thing as the CLI command shown above on every node in a cluster is shown below.

    apiVersion: bpfman.io/v1alpha1\nkind: XdpProgram\nmetadata:\n  labels:\n    app.kubernetes.io/name: xdpprogram\n  name: xdp-pass-all-nodes\nspec:\n  name: pass\n  # Select all nodes\n  nodeselector: {}\n  interfaceselector:\n    primarynodeinterface: true\n  priority: 0\n  bytecode:\n    image:\n      url: quay.io/bpfman-bytecode/xdp_pass:latest\n  globaldata:\n    GLOBAL_u8:\n      - 0x01\n

    The bpfman agent on each node watches for the *Program CRDs, and makes calls to the local instance of bpfman as necessary to ensure that the state on the local node reflects the state requested in the *Program CRD. The bpfman agent on each node in turn creates and updates a BpfProgram object for the *Program CRD that reflects the state of the program on that node and reports the eBPF map information for the program. The following is the BpfProgram CRD on one node for the above XdpProgram CRD.

    kubectl get bpfprograms.bpfman.io xdp-pass-all-nodes-bpfman-deployment-control-plane-eth0 -o yaml\n
    apiVersion: bpfman.io/v1alpha1\nkind: BpfProgram\nmetadata:\n  annotations:\n    bpfman.io.xdpprogramcontroller/interface: eth0\n  creationTimestamp: \"2023-08-29T22:08:12Z\"\n  finalizers:\n  - bpfman.io.xdpprogramcontroller/finalizer\n  generation: 1\n  labels:\n    bpfman.io/ownedByProgram: xdp-pass-all-nodes\n    kubernetes.io/hostname: bpfman-deployment-control-plane\n  name: xdp-pass-all-nodes-bpfman-deployment-control-plane-eth0\n  ownerReferences:\n  - apiVersion: bpfman.io/v1alpha1\n    blockOwnerDeletion: true\n    controller: true\n    kind: XdpProgram\n    name: xdp-pass-all-nodes\n    uid: 838dc2f8-a348-427e-9dc4-f6a6ea621930\n  resourceVersion: \"2690\"\n  uid: 5a622961-e5b0-44fe-98af-30756b2d0b62\nspec:\n  type: xdp\nstatus:\n  conditions:\n  - lastTransitionTime: \"2023-08-29T22:08:14Z\"\n    message: Successfully loaded bpfProgram\n    reason: bpfmanLoaded\n    status: \"True\"\n    type: Loaded\n

    Finally, the bpfman operator watches for updates to the BpfProgram objects and reports the global state of each eBPF program. If the program was successfully loaded on every selected node, it will report success, otherwise, it will identify the node(s) that had a problem. The following is the XdpProgram CRD as updated by the operator.

    kubectl get xdpprograms.bpfman.io xdp-pass-all-nodes -o yaml\n
    apiVersion: bpfman.io/v1alpha1\nkind: XdpProgram\nmetadata:\n  annotations:\n    kubectl.kubernetes.io/last-applied-configuration: |\n      {\"apiVersion\":\"bpfman.io/v1alpha1\",\"kind\":\"XdpProgram\",\"metadata\":{\"annotations\":{},\"labels\":{\"app.kubernetes.io/name\":\"xdpprogram\"},\"name\":\"xdp-pass-all-nodes\"},\"spec\":{\"bytecode\":{\"image\":{\"url\":\"quay.io/bpfman-bytecode/xdp_pass:latest\"}},\"globaldata\":{\"GLOBAL_u8\":[1]},\"interfaceselector\":{\"primarynodeinterface\":true},\"nodeselector\":{},\"priority\":0,\"bpffunctionname\":\"pass\"}}\n  creationTimestamp: \"2023-08-29T22:08:12Z\"\n  finalizers:\n  - bpfman.io.operator/finalizer\n  generation: 2\n  labels:\n    app.kubernetes.io/name: xdpprogram\n  name: xdp-pass-all-nodes\n  resourceVersion: \"2685\"\n  uid: 838dc2f8-a348-427e-9dc4-f6a6ea621930\nspec:\n  bytecode:\n    image:\n      imagepullpolicy: IfNotPresent\n      url: quay.io/bpfman-bytecode/xdp_pass:latest\n  globaldata:\n    GLOBAL_u8: 0x01\n  interfaceselector:\n    primarynodeinterface: true\n  mapownerselector: {}\n  nodeselector: {}\n  priority: 0\n  proceedon:\n  - pass\n  - dispatcher_return\n  name: pass\nstatus:\n  conditions:\n  - lastTransitionTime: \"2023-08-29T22:08:12Z\"\n    message: Waiting for Program Object to be reconciled to all nodes\n    reason: ProgramsNotYetLoaded\n    status: \"True\"\n    type: NotYetLoaded\n  - lastTransitionTime: \"2023-08-29T22:08:12Z\"\n    message: bpfProgramReconciliation Succeeded on all nodes\n    reason: ReconcileSuccess\n    status: \"True\"\n    type: ReconcileSuccess\n

    More details about this process can be seen here

    "},{"location":"blog/2023/09/07/bpfman-a-novel-way-to-manage-ebpf/#ebpf-program-packaging","title":"eBPF program packaging","text":"

    The eBPF Bytecode Image specification was created as part of the bpfman project to define a way to package eBPF bytecode as OCI container images. Its use was illustrated in the CLI and XdpProgram CRD examples above in which the XDP program was loaded from quay.io/bpfman-bytecode/xdp_pass:latest. The initial motivation for this image spec was to facilitate the deployment of eBPF programs in container orchestration systems such as Kubernetes, where it is necessary to provide a portable way to distribute bytecode to all nodes that need it. However, bytecode images have proven useful on standalone Linux systems as well. When coupled with BPF CO-RE (Compile Once \u2013 Run Everywhere), portability is further enhanced in that applications can use the same bytecode images across different kernel versions without the need to recompile them for each version. Another benefit of bytecode containers is image signing. There is currently no way to sign and validate raw eBPF bytecode. However, the bytecode containers can be signed and validated by bpfman using sigstore to improve supply chain security.

    "},{"location":"blog/2023/09/07/bpfman-a-novel-way-to-manage-ebpf/#key-benefits-of-bpfman","title":"Key benefits of bpfman","text":"

    This section reviews some of the key benefits of bpfman. These benefits mostly apply to both standalone and Kubernetes deployments, but we will focus on the benefits for Kubernetes here.

    "},{"location":"blog/2023/09/07/bpfman-a-novel-way-to-manage-ebpf/#security","title":"Security","text":"

    Probably the most compelling benefit of using bpfman is enhanced security. When using bpfman, only the bpfman daemon, which can be tightly controlled, needs the privileges required to load eBPF programs, while access to the API can be controlled via standard RBAC methods on a per-application and per-CRD basis. Additionally, the signing and validating of bytecode images enables supply chain security.

    "},{"location":"blog/2023/09/07/bpfman-a-novel-way-to-manage-ebpf/#visibility-and-debuggability","title":"Visibility and Debuggability","text":"

    eBPF programs can interact with each other in unexpected ways. The multi-program support described above helps control these interactions by providing a common mechanism to prioritize and control the flow between the programs. However, there can still be problems, and there may be eBPF programs running on nodes that were loaded by other mechanisms that you don\u2019t even know about. bpfman helps here too by reporting all of the eBPF programs running on all of the nodes in a Kubernetes cluster.

    "},{"location":"blog/2023/09/07/bpfman-a-novel-way-to-manage-ebpf/#productivity","title":"Productivity","text":"

    As described above, managing the lifecycle of eBPF programs is something that each application currently needs to do on its own. It is even more complicated to manage the lifecycle of eBPF programs across a Kubernetes cluster. bpfman does this for you so you don't have to. eBPF bytecode images help here as well by simplifying the distribution of eBPF bytecode to multiple nodes in a cluster, and also allowing separate fine-grained versioning control for user space and kernel space code.

    "},{"location":"blog/2023/09/07/bpfman-a-novel-way-to-manage-ebpf/#demonstration","title":"Demonstration","text":"

    This demonstration is adapted from the instructions documented by Andrew Stoycos here.

    These instructions use kind and bpfman release v0.2.1. It should also be possible to run this demo on other environments such as minikube or an actual cluster.

    Another option is to build the code yourself and use make run-on-kind

    to create the cluster as is described in the given links. Then, start with step 5.

    "},{"location":"blog/2023/09/07/bpfman-a-novel-way-to-manage-ebpf/#run-the-demo","title":"Run the demo","text":"

    1. Create Kind Cluster

    kind create cluster --name=test-bpfman\n

    2. Deploy Cert manager

    kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.11.0/cert-manager.yaml\n

    3. Deploy bpfman Crds

    kubectl apply -f  https://github.com/bpfman/bpfman/releases/download/v0.2.1/bpfman-crds-install-v0.2.1.yaml\n

    4. Deploy bpfman-operator

    kubectl apply -f https://github.com/bpfman/bpfman/releases/download/v0.2.1/bpfman-operator-install-v0.2.1.yaml\n

    5. Verify the deployment

    kubectl get pods -A\n
    NAMESPACE            NAME                                              READY   STATUS    RESTARTS   AGE\nbpfman                 bpfman-daemon-nkzpf                                 2/2     Running   0          28s\nbpfman                 bpfman-operator-77d697fdd4-clrf7                    2/2     Running   0          33s\ncert-manager         cert-manager-99bb69456-x8n84                      1/1     Running   0          57s\ncert-manager         cert-manager-cainjector-ffb4747bb-pt4hr           1/1     Running   0          57s\ncert-manager         cert-manager-webhook-545bd5d7d8-z5brw             1/1     Running   0          57s\nkube-system          coredns-565d847f94-gjjft                          1/1     Running   0          61s\nkube-system          coredns-565d847f94-mf2cq                          1/1     Running   0          61s\nkube-system          etcd-test-bpfman-control-plane                      1/1     Running   0          76s\nkube-system          kindnet-lv6f9                                     1/1     Running   0          61s\nkube-system          kube-apiserver-test-bpfman-control-plane            1/1     Running   0          76s\nkube-system          kube-controller-manager-test-bpfman-control-plane   1/1     Running   0          77s\nkube-system          kube-proxy-dtmvb                                  1/1     Running   0          61s\nkube-system          kube-scheduler-test-bpfman-control-plane            1/1     Running   0          78s\nlocal-path-storage   local-path-provisioner-684f458cdd-8gxxv           1/1     Running   0          61s\n

    Note that we have the bpfman-operator, bpf-daemon and cert-manager pods running.

    6. Deploy the XDP counter program and user space application

    kubectl apply -f https://github.com/bpfman/bpfman/releases/download/v0.2.1/go-xdp-counter-install-v0.2.1.yaml\n

    7. Confirm that the programs are loaded

    Userspace program:

    kubectl get pods -n go-xdp-counter\n
    NAME                      READY   STATUS              RESTARTS   AGE\ngo-xdp-counter-ds-9lpgp   0/1     ContainerCreating   0          5s\n

    XDP program:

    kubectl get xdpprograms.bpfman.io -o wide\n
    NAME                     BPFFUNCTIONNAME   NODESELECTOR   PRIORITY   INTERFACESELECTOR               PROCEEDON\ngo-xdp-counter-example   stats             {}             55         {\"primarynodeinterface\":true}   [\"pass\",\"dispatcher_return\"]\n

    8. Confirm that the counter program is counting packets.

    Notes:

    • The counters are updated every 5 seconds, and stats are being collected for the pod's primary node interface, which may not have a lot of traffic. However, running the kubectl command below generates traffic on that interface, so run the command a few times and give it a few seconds in between to confirm whether the counters are incrementing.
    • Replace \"go-xdp-counter-ds-9lpgp\" with the go-xdp-counter pod name for your deployment.
    kubectl logs go-xdp-counter-ds-9lpgp -n go-xdp-counter | tail\n
    2023/09/05 16:58:21 1204 packets received\n2023/09/05 16:58:21 13741238 bytes received\n\n2023/09/05 16:58:24 1220 packets received\n2023/09/05 16:58:24 13744258 bytes received\n\n2023/09/05 16:58:27 1253 packets received\n2023/09/05 16:58:27 13750364 bytes received\n

    9. Deploy the xdp-pass-all-nodes program with priority set to 50 and proceedon set to drop as shown below

    kubectl apply -f - <<EOF\napiVersion: bpfman.io/v1alpha1\nkind: XdpProgram\nmetadata:\n  labels:\n    app.kubernetes.io/name: xdpprogram\n  name: xdp-pass-all-nodes\nspec:\n  name: pass\n  nodeselector: {}\n  interfaceselector:\n    primarynodeinterface: true\n  priority: 50\n  proceedon:\n    - drop\n  bytecode:\n    image:\n      url: quay.io/bpfman-bytecode/xdp_pass:latest\nEOF\n

    10. Verify both XDP programs are loaded.

    kubectl get xdpprograms.bpfman.io -o wide\n
    NAME                     BPFFUNCTIONNAME   NODESELECTOR   PRIORITY   INTERFACESELECTOR               PROCEEDON\ngo-xdp-counter-example   stats             {}             55         {\"primarynodeinterface\":true}   [\"pass\",\"dispatcher_return\"]\nxdp-pass-all-nodes       pass              {}             50         {\"primarynodeinterface\":true}   [\"drop\"]\n

    The priority setting determines the order in which programs attached to the same interface are executed by the dispatcher with a lower number being a higher priority. The go-xdp-counter-example program was loaded at priority 55, so the xdp-pass-all-nodes program will execute before the go-xdp-counter-example program.

    The proceedon setting tells the dispatcher whether to \"proceed\" to execute the next lower priority program attached to the same interface depending on the program's return value. When we set proceedon to drop, execution will proceed only if the program returns XDP_DROP. However, the xdp-pass-all-nodes program only returns XDP_PASS, so execution will terminate after it runs.

    Therefore, by loading the xdp-pass-all-nodes program in this way, we should have effectively stopped the go-xdp-counter-example program from running. Let's confirm that.

    11. Verify that packet counts are not being updated anymore

    Run the following command several times

    kubectl logs go-xdp-counter-ds-9lpgp -n go-xdp-counter | tail\n
    2023/09/05 17:10:27 1395 packets received\n2023/09/05 17:10:27 13799730 bytes received\n\n2023/09/05 17:10:30 1395 packets received\n2023/09/05 17:10:30 13799730 bytes received\n\n2023/09/05 17:10:33 1395 packets received\n2023/09/05 17:10:33 13799730 bytes received\n

    12. Now, change the priority of the xdp-pass program to 60

    kubectl apply -f - <<EOF\napiVersion: bpfman.io/v1alpha1\nkind: XdpProgram\nmetadata:\n  labels:\n    app.kubernetes.io/name: xdpprogram\n  name: xdp-pass-all-nodes\nspec:\n  name: pass\n  # Select all nodes\n  nodeselector: {}\n  interfaceselector:\n    primarynodeinterface: true\n  priority: 60\n  proceedon:\n    - drop\n  bytecode:\n    image:\n      url: quay.io/bpfman-bytecode/xdp_pass:latest\nEOF\n

    13. Confirm that packets are being counted again

    Run the following command several times

    kubectl logs go-xdp-counter-ds-9lpgp -n go-xdp-counter | tail\n
    2023/09/05 17:12:21 1435 packets received\n2023/09/05 17:12:21 13806214 bytes received\n\n2023/09/05 17:12:24 1505 packets received\n2023/09/05 17:12:24 13815359 bytes received\n\n2023/09/05 17:12:27 1558 packets received\n2023/09/05 17:12:27 13823065 bytes received\n

    We can see that the counters are incrementing again.

    14. Clean everything up

    Delete the programs

    kubectl delete xdpprogram xdp-pass-all-nodes\nkubectl delete -f https://github.com/bpfman/bpfman/releases/download/v0.2.0/go-xdp-counter-install-v0.2.0.yaml\n

    And/or, delete the whole kind cluster

    kind delete clusters test-bpfman\n
    "},{"location":"blog/2023/09/07/bpfman-a-novel-way-to-manage-ebpf/#joining-the-bpfman-community","title":"Joining the bpfman community","text":"

    If you're interested in bpfman and want to get involved, you can connect with the community in multiple ways. If you have some simple questions or need some help feel free to start a discussion. If you find an issue, or you want to request a new feature, please create an issue. If you want something a little more synchronous, the project maintains a #bpfman channel on Kubernetes Slack and we have a weekly community meeting where everyone can join and bring topics to discuss about the project. We hope to see you there!

    "},{"location":"blog/2024/01/15/bpfmans-shift-towards-a-daemonless-design-and-using-sled-a-high-performance-embedded-database/","title":"bpfman's Shift Towards a Daemonless Design and Using Sled: a High Performance Embedded Database","text":"

    As part of issue #860 the community has steadily been converting all of the internal state management to go through a sled database instance which is part of the larger effort to make bpfman completely damonless.

    This article will go over the reasons behind the change and dive into some of the details of the actual implementation.

    "},{"location":"blog/2024/01/15/bpfmans-shift-towards-a-daemonless-design-and-using-sled-a-high-performance-embedded-database/#why","title":"Why?","text":"

    State management in bpfman has always been a headache, not because there's a huge amount of disparate data but there's multiple representations of the same data. Additionally the delicate filesystem interactions and layout previously used to ensure persistence across restarts often led to issues.

    Understanding the existing flow of data in bpfman can help make this a bit clearer:

    With this design there was a lot of data wrangling required to convert the tonic generated rust bindings for the protocol buffer API into data structures that were useful for bpfman. Specifically, data would arrive via GRPC server as specified in bpfman.v1.rs where rust types are inferred from the protobuf definition. In rpc.rs data was then converted to an internal set of structures defined in command.rs. Prior to pull request #683 there was an explosion of types, with each bpfman command having it's own set of internal structures and enums. Now, most of the data for a program that bpfman needs internally for all commands to manage an eBPF program is stored in the ProgramData structure, which we'll take a deeper look at a bit later. Additionally, there is extra complexity for XDP and TC program types which rely on an eBPF dispatcher program to provide multi-program support on a single network interface, however this article will try to instead focus on the simpler examples.

    The tree of data stored by bpfman is quite complex and this is made even more complicated since bpfman has to be persistent across restarts. To support this, raw data was often flushed to disk in the form of JSON files (all types in command.rs needed to implement serde's Serialize and Deserialize). Specific significance would also be encoded to bpfman's directory structure, i.e all program related information was encoded in /run/bpfd/programs/<ID>. The extra infrastructure and failure modes introduced by this process was a constant headache, pushing the community to find a better solution.

    "},{"location":"blog/2024/01/15/bpfmans-shift-towards-a-daemonless-design-and-using-sled-a-high-performance-embedded-database/#why-sled","title":"Why Sled?","text":"

    Sled is an open source project described in github as \"the champagne of beta embedded databases\". The \"reasons\" for choosing an embedded database from the project website are pretty much spot on:

    Embedded databases are useful in several cases:\n\n- you want to store data on disk, without facing the complexity of files\n- you want to be simple, without operating an external database\n- you want to be fast, without paying network costs\n- using disk storage as a building block in your system\n

    As discussed in the previous section, persistence across restarts, is one of bpfman's core design constraints, and with sled we almost get it for free! Additionally due to the pervasive nature of data management to bpfman's core workflow the data-store needed to be kept as simple and light weight as possible, ruling out heavier production-ready external database systems such as MySQL or Redis.

    Now this mostly focused on why embedded dbs in general, but why did we choose sled...well because it's written in :crab: Rust :crab: of course! Apart from the obvious we took a small dive into the project before rewriting everything by transitioning the OCI bytecode image library to use the db rather than the filesystem. Overall the experience was extremely positive due to the following:

    • No more dealing directly with the filesystem, the sled instance is flushed to the fs automatically every 500 ms by default and for good measure we manually flush it before shutting down.
    • The API is extremely simple, traditional get and insert operations function as expected.
    • Error handling with sled:Error is relatively simple and easy to map explicitly to a bpfmanError
    • The db \"tree\" concept makes it easy to have separate key-spaces within the same instance.
    "},{"location":"blog/2024/01/15/bpfmans-shift-towards-a-daemonless-design-and-using-sled-a-high-performance-embedded-database/#transitioning-to-sled","title":"Transitioning to Sled","text":"

    Using the new embedded database started with the creation of a sled instance which could be easily shared across all of the modules in bpfman. To do this we utilized a globally available [lazy_static] variable called ROOT_DB in main.rs:

    #[cfg(not(test))]\nlazy_static! {\n    pub static ref ROOT_DB: Db = Config::default()\n        .path(STDIR_DB)\n        .open()\n        .expect(\"Unable to open root database\");\n}\n\n#[cfg(test)]\nlazy_static! {\n    pub static ref ROOT_DB: Db = Config::default()\n        .temporary(true)\n        .open()\n        .expect(\"Unable to open temporary root database\");\n}\n

    This block creates OR opens the filesystem backed database at /var/lib/bpfman/db database only when the ROOT_DB variable is first accessed, and also allows for the creation of a temporary db instance if running in unit tests. With this setup all of the modules within bpfman can now easily access the database instance by simply using it i.e use crate::ROOT_DB.

    Next the existing bpfman structures needed to be flattened in order to work with the db, the central ProgramData can be used to demonstrate how this was completed. Prior to the recent sled conversion that structure looked like:

    /// ProgramInfo stores information about bpf programs that are loaded and managed\n/// by bpfd.\n#[derive(Debug, Serialize, Deserialize, Clone, Default)]\npub(crate) struct ProgramData {\n    // known at load time, set by user\n    name: String,\n    location: Location,\n    metadata: HashMap<String, String>,\n    global_data: HashMap<String, Vec<u8>>,\n    map_owner_id: Option<u32>,\n\n    // populated after load\n    kernel_info: Option<KernelProgramInfo>,\n    map_pin_path: Option<PathBuf>,\n    maps_used_by: Option<Vec<u32>>,\n\n    // program_bytes is used to temporarily cache the raw program data during\n    // the loading process.  It MUST be cleared following a load so that there\n    // is not a long lived copy of the program data living on the heap.\n    #[serde(skip_serializing, skip_deserializing)]\n    program_bytes: Vec<u8>,\n}\n

    This worked well enough, but as mentioned before the process of flushing the data to disk involved manual serialization to JSON, which needed to occur at a specific point in time (following program load) which made disaster recovery almost impossible and could sometimes result in lost or partially reconstructed state.

    With sled the first idea was to completely flatten ALL of bpfman's data into a single key-space, so that program.name now simply turns into a db.get(\"program_<ID>_name\"), however removing all of the core structures would have resulted in a complex diff which would have been hard to review and merge. Therefore a more staged approach was taken, the ProgramData structure was kept around, and now looks like:

    /// ProgramInfo stores information about bpf programs that are loaded and managed\n/// by bpfman.\n#[derive(Debug, Clone)]\npub(crate) struct ProgramData {\n    // Prior to load this will be a temporary Tree with a random ID, following\n    // load it will be replaced with the main program database tree.\n    db_tree: sled::Tree,\n\n    // populated after load, randomly generated prior to load.\n    id: u32,\n\n    // program_bytes is used to temporarily cache the raw program data during\n    // the loading process.  It MUST be cleared following a load so that there\n    // is not a long lived copy of the program data living on the heap.\n    program_bytes: Vec<u8>,\n}\n

    All of the fields are now removed in favor of a private reference to the unique [sled::Tree] instance for this ProgramData which is named using the unique kernel id for the program. Each sled::Tree represents a single logical key-space / namespace / bucket which allows key generation to be kept simple, i.e db.get(\"program_<ID>_name\") now can be db_tree_prog_0000.get(\"program_name). Additionally getters and setters are now built for each existing field so that access to the db can be controlled and the serialization/deserialization process can be hidden from the caller:

    ...\npub(crate) fn set_name(&mut self, name: &str) -> Result<(), BpfmanError> {\n    self.insert(\"name\", name.as_bytes())\n}\n\npub(crate) fn get_name(&self) -> Result<String, BpfmanError> {\n    self.get(\"name\").map(|v| bytes_to_string(&v))\n}\n...\n

    Therefore, ProgramData is now less of a container for program data and more of a wrapper for accessing program data. The getters/setters act as a bridge between standard Rust types and the raw bytes stored in the database, i.e the [sled::IVec type].

    Once this was completed for all the relevant fields on all the relevant types, see pull request #874, the data bpfman needed for it's managed eBPF programs was now automatically synced to disk :partying_face:

    "},{"location":"blog/2024/01/15/bpfmans-shift-towards-a-daemonless-design-and-using-sled-a-high-performance-embedded-database/#tradeoffs","title":"Tradeoffs","text":"

    All design changes come with some tradeoffs: for bpfman's conversion to using sled the main negative ended up being with the complexity introduced with the [sled::IVec type]. It is basically just a thread-safe reference-counting pointer to a raw byte slice, and the only type raw database operations can be performed with. Previously when using serde_json all serialization/deserialization was automatically handled, however with sled the conversion is manual handled internally. Therefore, instead of a library handling the conversion of a rust string (std::string::String) to raw bytes &[u8] bpfman has to handle it internally, using [std::string::String::as_bytes] and bpfman::utils::bytes_to_string:

    pub(crate) fn bytes_to_string(bytes: &[u8]) -> String {\n    String::from_utf8(bytes.to_vec()).expect(\"failed to convert &[u8] to string\")\n}\n

    For strings, conversion was simple enough, but when working with more complex rust data types like HashMaps and Vectors this became a bit more of an issue. For Vectors, we simply flatten the structure into a group of key/values with indexes encoded into the key:

        pub(crate) fn set_kernel_map_ids(&mut self, map_ids: Vec<u32>) -> Result<(), BpfmanError> {\n        let map_ids = map_ids.iter().map(|i| i.to_ne_bytes()).collect::<Vec<_>>();\n\n        map_ids.iter().enumerate().try_for_each(|(i, v)| {\n            sled_insert(&self.db_tree, format!(\"kernel_map_ids_{i}\").as_str(), v)\n        })\n    }\n

    The sled scan_prefix(<K>) api then allows for easy fetching and rebuilding of the vector:

        pub(crate) fn get_kernel_map_ids(&self) -> Result<Vec<u32>, BpfmanError> {\n        self.db_tree\n            .scan_prefix(\"kernel_map_ids_\".as_bytes())\n            .map(|n| n.map(|(_, v)| bytes_to_u32(v.to_vec())))\n            .map(|n| {\n                n.map_err(|e| {\n                    BpfmanError::DatabaseError(\"Failed to get map ids\".to_string(), e.to_string())\n                })\n            })\n            .collect()\n    }\n

    For HashMaps, we follow a similar paradigm, except the map key is encoded in the database key:

        pub(crate) fn set_metadata(\n        &mut self,\n        data: HashMap<String, String>,\n    ) -> Result<(), BpfmanError> {\n        data.iter().try_for_each(|(k, v)| {\n            sled_insert(\n                &self.db_tree,\n                format!(\"metadata_{k}\").as_str(),\n                v.as_bytes(),\n            )\n        })\n    }\n\n    pub(crate) fn get_metadata(&self) -> Result<HashMap<String, String>, BpfmanError> {\n    self.db_tree\n        .scan_prefix(\"metadata_\")\n        .map(|n| {\n            n.map(|(k, v)| {\n                (\n                    bytes_to_string(&k)\n                        .strip_prefix(\"metadata_\")\n                        .unwrap()\n                        .to_string(),\n                    bytes_to_string(&v).to_string(),\n                )\n            })\n        })\n        .map(|n| {\n            n.map_err(|e| {\n                BpfmanError::DatabaseError(\"Failed to get metadata\".to_string(), e.to_string())\n            })\n        })\n        .collect()\n    }\n

    The same result could be achieved by creating individual database trees for each Vector/HashMap instance, however our goal was to keep the layout as flat as possible. Although this resulted in some extra complexity within the data layer, the overall benefits still outweighed the extra code once the conversion was complete.

    "},{"location":"blog/2024/01/15/bpfmans-shift-towards-a-daemonless-design-and-using-sled-a-high-performance-embedded-database/#moving-forward-and-getting-involved","title":"Moving forward and Getting Involved","text":"

    Once the conversion to sled is fully complete, see issue #860, the project will be able to completely transition to becoming a library without having to worry about data and state management.

    If you are interested in in memory databases, eBPF, Rust, or any of the technologies discussed today please don't hesitate to reach out at kubernetes slack on channel #bpfman or join one of the community meetings to get involved.

    "},{"location":"blog/2024/01/04/community-meeting-january-4-2024/","title":"Community Meeting: January 4, 2024","text":""},{"location":"blog/2024/01/04/community-meeting-january-4-2024/#welcome-to-2024","title":"Welcome to 2024!","text":"

    Welcome to the first bpfman Community Meeting of 2024. We are happy to start off a new year and excited for all the changes in store for bpfman in 2024!

    Below were some of the discussion points from this weeks Community Meeting.

    • bpfman-csi Needs To Become Its Own Binary
    • Kubernetes Support For Attaching uprobes In Containers
    • Building The Community
    "},{"location":"blog/2024/01/04/community-meeting-january-4-2024/#bpfman-csi-needs-to-become-its-own-binary","title":"bpfman-csi Needs To Become Its Own Binary","text":"

    Some of the next work items for bpfman revolve around removing the async code from the code base, make bpfman-core a rust library, and removing all the gRPC logic. Dave (@dave-tucker) is currently investigating this. One area to help out is to take the bpfman-csi thread and making it it's own binary. This may require making bpfman a bin and lib crate (which is fine, just needs a lib.rs and to be very careful about what we\u2019re exporting). Andrew (@astoycos) is starting to take a look at this.

    "},{"location":"blog/2024/01/04/community-meeting-january-4-2024/#kubernetes-support-for-attaching-uprobes-in-containers","title":"Kubernetes Support For Attaching uprobes In Containers","text":"

    Base support for attaching uprobes in containers is currently merged. Andre (@anfredette) pushed PR#875 for the integration with Kubernetes. The hard problems are solved, like getting the Container PID, but the current PR has some shortcuts to get the functionality working before the holiday break. So the PR#875 is not ready for review, but Dave (@dave-tucker) and Andre (@anfredette) may have a quick review to verify the design principles.

    "},{"location":"blog/2024/01/04/community-meeting-january-4-2024/#building-the-community","title":"Building The Community","text":"

    Short discussion on building the Community. In a previous meeting, Dave (@dave-tucker) suggested capturing the meeting minutes in blogs. By placing in a blog, they become searchable from search engines. Billy (@billy99) re-raised this topic and volunteered to start capturing the content. In future meetings, we may use the transcript feature from Google Meet to capture the content and try generating the blog via ChatGTP.

    "},{"location":"blog/2024/01/04/community-meeting-january-4-2024/#light-hearted-moments-and-casual-conversations","title":"Light-hearted Moments and Casual Conversations","text":"

    Amidst the technical discussions, the community members took a moment to share some light-hearted moments and casual conversations. Topics ranged from the challenges of post-holiday credit card bills to the complexities of managing family schedules during exam week. The discussion touched on the quirks of public school rules and the unique challenges of parenting during exam periods.

    The meeting ended on a friendly note, with plans for further collaboration and individual tasks assigned for the upcoming days. Participants expressed their commitment to pushing updates and improvements, with a promise to reconvene in the near future.

    "},{"location":"blog/2024/01/04/community-meeting-january-4-2024/#attendees","title":"Attendees","text":"
    • Andre Fredette (Red Hat)
    • Andrew Stoycos (Red Hat)
    • Billy McFall (Red Hat)
    • Dave Tucker (Red Hat)
    "},{"location":"blog/2024/01/04/community-meeting-january-4-2024/#bpfman-community-info","title":"bpfman Community Info","text":"

    A friendly reminder that the Community Meetings are every Thursday 10am-11am Eastern US Time and all are welcome!

    Google Meet joining info:

    • Google Meet
    • Or dial: (US) +1 984-221-0859 PIN: 613 588 790#
    • Agenda Document
    "},{"location":"blog/2024/01/19/community-meeting-january-11-and-18-2024/","title":"Community Meeting: January 11 and 18, 2024","text":""},{"location":"blog/2024/01/19/community-meeting-january-11-and-18-2024/#hit-the-ground-running","title":"Hit the Ground Running","text":"

    Another set of bpfman Community Meetings for 2024. There is a lot going on with bpfman in Q1 of 2024. Spending a lot of time making bpfman daemonless. I bailed for a ski trip after the Jan 11 meeting, so the notes didn't get written up. So this summary will include two weeks of meetings.

    Below were some of the discussion points from the last two weeks Community Meetings.

    • Manpage/CLI TAB Completion Questions (Jan 11)
    • Kubernetes Support for Attaching uprobes in Containers (Jan 11)
    • netify Preview in Github Removed (Jan 11)
    • RPM Builds and Socket Activation (Jan 18)
    • KubeCon EU Discussion (Jan 18)
    "},{"location":"blog/2024/01/19/community-meeting-january-11-and-18-2024/#january-11-2024","title":"January 11, 2024","text":""},{"location":"blog/2024/01/19/community-meeting-january-11-and-18-2024/#manpagecli-tab-completion-questions-jan-11","title":"Manpage/CLI TAB Completion Questions (Jan 11)","text":"

    The bpfman CLI now has TAB Completion and man pages. However, a couple nits need to be cleaned up Issue#913 and Billy (@billy99) wanted to clarify a few issues encountered. The current implementation for both features is using an environment variable to set the destination directory for the generated files. Other features don't work this way and there was a discussion on the proper location for the generated files. The decision was to use .output/..

    There was another discussion around clap (Rust CLI crate) and passing variables to clap from the Cargo.toml file. In the CLI code, #[command(author, version, about, long_about = None)] implies to pull the values from the Config.toml file, but we aren\u2019t setting any of those variables. Also, for cargo xtask build-man-page and cargo xtask build-completion they pull from the xtask Cargo.toml file. The decision was to set the variables implicitly in code and not pull from Cargo.toml.

    "},{"location":"blog/2024/01/19/community-meeting-january-11-and-18-2024/#kubernetes-support-for-attaching-uprobes-in-containers-jan-11","title":"Kubernetes Support for Attaching uprobes in Containers (Jan 11)","text":"

    Andre (@anfredette) is working on a feature to enable attaching uprobes in other Containers. Currently, bpfman only supports attaching uprobes within the bpfman container. There was a discussion on proper way to format a query to the KubeAPI server to match on NodeName on a Pod list. The discussion included so code walk through. Andrew (@astoycos) found a possible solution client-go:Issue#410 and Dave (@dave-tucker) suggested kubernetes-api:podspec-v1-core.

    "},{"location":"blog/2024/01/19/community-meeting-january-11-and-18-2024/#netify-preview-in-github-removed-jan-11","title":"netify Preview in Github Removed (Jan 11)","text":"

    Lastly, there was a discussion on the netify preview being removed from github and a reminder why. Dave (@dave-tucker) explained that with the docs release history now in place, \"current\" is from a branch and it is not easy to preview. So for now, document developers need to run mkdocs locally (See generate-documention).

    "},{"location":"blog/2024/01/19/community-meeting-january-11-and-18-2024/#attendees-jan-11","title":"Attendees (Jan 11)","text":"
    • Andre Fredette (Red Hat)
    • Andrew Stoycos (Red Hat)
    • Billy McFall (Red Hat)
    • Dave Tucker (Red Hat)
    • Shane Utt (Kong)
    "},{"location":"blog/2024/01/19/community-meeting-january-11-and-18-2024/#january-18-2024","title":"January 18, 2024","text":""},{"location":"blog/2024/01/19/community-meeting-january-11-and-18-2024/#rpm-builds-and-socket-activation-jan-18","title":"RPM Builds and Socket Activation (Jan 18)","text":"

    RPM Builds for bpfman went in fairly recently and Billy (@billy99) had some questions around their implementation. RPM and Socket Activation were developed and merged around the same time and the RPM builds are not installing socket activation properly. Just verifying that RPMs should be installing the bpfman.socket file. And they should. There were also some questions on how to build RPMs locally. Verified that packit build locally is the way forward.

    Note: Socket activation was added to RPM Builds along with documentation on building and using RPMs in PR#922

    "},{"location":"blog/2024/01/19/community-meeting-january-11-and-18-2024/#kubecon-eu-discussion-jan-18","title":"KubeCon EU Discussion (Jan 18)","text":"

    With KubeCon EU just around the corner (March 19-22, 2024 in Paris), discussion around bpfman talks and who was attending. Dave (@dave-tucker) is probably attending and Shane (@shaneutt) might attend. So if you are planning on attending KubeCon EU and are interested in bpfman or just eBPF, keep an eye out for these guys for some lively discussions!

    "},{"location":"blog/2024/01/19/community-meeting-january-11-and-18-2024/#attendees-jan-18","title":"Attendees (Jan 18)","text":"
    • Billy McFall (Red Hat)
    • Dave Tucker (Red Hat)
    • Shane Utt (Kong)
    "},{"location":"blog/2024/01/19/community-meeting-january-11-and-18-2024/#bpfman-community-info","title":"bpfman Community Info","text":"

    A friendly reminder that the Community Meetings are every Thursday 10am-11am Eastern US Time and all are welcome!

    Google Meet joining info:

    • Google Meet
    • Or dial: (US) +1 984-221-0859 PIN: 613 588 790#
    • Agenda Document
    "},{"location":"design/clusterVsNamespaceScoped/","title":"bpfman CRDs - Cluster vs Namespace Scoped","text":""},{"location":"design/clusterVsNamespaceScoped/#status","title":"Status","text":"

    This design was implemented with bpfman-operator pull request #344. The feature was first releases in the bpfman-operator v0.5.5 release.

    "},{"location":"design/clusterVsNamespaceScoped/#introduction","title":"Introduction","text":"

    For security reasons, cluster admins may want to limit certain applications to only loading eBPF programs within a given namespace. Currently, all bpfman Custom Resource Definitions (CRDs) are Cluster scoped. To provide cluster admins with tighter controls on eBPF program loading, some of the bpfman CRDs also need to be Namespace scoped.

    Not all eBPF programs make sense to be namespaced scoped. Some eBPF programs like kprobe cannot be constrained to a namespace. The following programs will have a namespaced scoped variant:

    • Uprobe
    • TC
    • TCX
    • XDP

    There will also be a namespace scoped BpfApplication variant that is limited to namespaced scoped eBPF programs listed above.

    "},{"location":"design/clusterVsNamespaceScoped/#current-implementation","title":"Current Implementation","text":"

    Currently, the reconciler code is broken into two layers (for both the bpfman-operator and the bpfman-agent). There is the *Program layer, where there is a reconcile for each program type (Fentry, Fexit, Kprobe, etc). At this layer, the program specific code handles creating the program specific structure. The *Program layer then calls the Common layer to handle processing that is common across all programs.

    There are some structures, then an interface that defines the set of methods the structure needs to support.

    "},{"location":"design/clusterVsNamespaceScoped/#struct","title":"struct","text":"

    There are a set of structures (one for the BPF Program CRD and then one for each *Program CRD) that define the contents of the CRDs (bpfman-operator/apis/v1alpha). Each object (BPF Program CRD and *Program CRD) also has a List object.

    type BpfProgram struct {\n    metav1.TypeMeta   `json:\",inline\"`\n    metav1.ObjectMeta `json:\"metadata,omitempty\"`\n\n    Spec BpfProgramSpec `json:\"spec\"`\n    // +optional\n    Status BpfProgramStatus `json:\"status,omitempty\"`\n}\ntype BpfProgramList struct {\n    metav1.TypeMeta `json:\",inline\"`\n    metav1.ListMeta `json:\"metadata,omitempty\"`\n    Items           []BpfProgram `json:\"items\"`\n}\n\ntype FentryProgram struct {\n    metav1.TypeMeta   `json:\",inline\"`\n    metav1.ObjectMeta `json:\"metadata,omitempty\"`\n\n    Spec FentryProgramSpec `json:\"spec\"`\n    // +optional\n    Status FentryProgramStatus `json:\"status,omitempty\"`\n}\ntype FentryProgramList struct {\n    metav1.TypeMeta `json:\",inline\"`\n    metav1.ListMeta `json:\"metadata,omitempty\"`\n    Items           []FentryProgram `json:\"items\"`\n}\n\n:\n

    There is a reconciler for each *Program. For the implementation, there is a common set of data used by each *Program reconciler which is contained in the base struct ReconcilerCommon. Then there is a *Program struct, which includes each *Program\u2019s Program struct and the base struct ReconcilerCommon. Below are the bpfman-agent structures, but the bpfman-operator follows the same pattern.

    type ReconcilerCommon struct {\n    client.Client\n    Scheme       *runtime.Scheme\n    GrpcConn     *grpc.ClientConn\n    BpfmanClient gobpfman.BpfmanClient\n    Logger       logr.Logger\n    NodeName     string\n    progId       *uint32\n    finalizer    string\n    recType      string\n    appOwner     metav1.Object // Set if the owner is an application\n}\n\ntype FentryProgramReconciler struct {\n    ReconcilerCommon\n    currentFentryProgram *bpfmaniov1alpha1.FentryProgram\n    ourNode              *v1.Node\n}\n\ntype FexitProgramReconciler struct {\n    ReconcilerCommon\n    currentFexitProgram *bpfmaniov1alpha1.FexitProgram\n    ourNode             *v1.Node\n}\n\n:\n
    "},{"location":"design/clusterVsNamespaceScoped/#interface","title":"interface","text":"

    The bpfmanReconciler interface defines the set of methods the *Program structs must implement to use the common reconciler code. Below are the bpfman-agent structures, but the bpfman-operator uses a ProgramReconciler, which follows the same pattern.

    type bpfmanReconciler interface {\n    SetupWithManager(mgr ctrl.Manager) error\n    Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error)\n    getFinalizer() string\n    getOwner() metav1.Object\n    getRecType() string\n    getProgType() internal.ProgramType\n    getName() string\n    getExpectedBpfPrograms(ctx context.Context)\n    (*bpfmaniov1alpha1.BpfProgramList, error)\n    getLoadRequest(bpfProgram *bpfmaniov1alpha1.BpfProgram,\n    mapOwnerId *uint32) (*gobpfman.LoadRequest, error)\n    getNode() *v1.Node\n    getBpfProgramCommon() *bpfmaniov1alpha1.BpfProgramCommon\n    setCurrentProgram(program client.Object) error\n    getNodeSelector() *metav1.LabelSelector\n    getBpfGlobalData() map[string][]byte\n    getAppProgramId() string\n}\n

    There are also some common reconciler functions that perform common code.

    func (r *ReconcilerCommon) reconcileCommon(ctx context.Context,\n  rec bpfmanReconciler,\n    programs []client.Object) (bool, ctrl.Result, error) {\n:\n}\n\nfunc (r *ReconcilerCommon) reconcileBpfProgram(ctx context.Context,\n    rec bpfmanReconciler,\n    loadedBpfPrograms map[string]*gobpfman.ListResponse_ListResult,\n    bpfProgram *bpfmaniov1alpha1.BpfProgram,\n    isNodeSelected bool,\n    isBeingDeleted bool,\n    mapOwnerStatus *MapOwnerParamStatus)\n(bpfmaniov1alpha1.BpfProgramConditionType, error) {\n:\n}\n\nfunc (r *ReconcilerCommon) reconcileBpfProgramSuccessCondition(\n    isLoaded bool,\n    shouldBeLoaded bool,\n    isNodeSelected bool,\n    isBeingDeleted bool,\n    noContainersOnNode bool,\n  mapOwnerStatus *MapOwnerParamStatus) bpfmaniov1alpha1.BpfProgramConditionType {\n:\n}\n

    So looks something like this:

                         --- FentryProgramReconciler\n                     |     func (r *FentryProgramReconciler) getFinalizer() string {}\n                     |\nbpfmanReconciler   ----- FexitProgramReconciler\n  ReconcilerCommon   |     func (r *FexitProgramReconciler) getFinalizer() string {}\n                     |\n                     --- \u2026\n
    "},{"location":"design/clusterVsNamespaceScoped/#adding-namespaced-scoped-crds","title":"Adding Namespaced Scoped CRDs","text":"

    While the contents are mostly the same for the namespace and cluster-scoped CRD in most cases, Kubernetes requires different CRD for each type.

    "},{"location":"design/clusterVsNamespaceScoped/#struct_1","title":"struct","text":"

    The set of CRD structures will need to be duplicated for each Namespaced scoped CRD (bpfman-operator/apis/v1alpha). Note, data is the similar, just a new object. The primary change is the existing ContainerSelector struct will be replaced with a ContainerNsSelector. For Namespaced scoped CRDs, the namespace in the ContainerSelector is removed. The Namespace field for the object is embedded the metav1.ObjectMeta structure. Not all Program Types will have a Namespaced version, only those that can be contained by a namespace:

    • TC
    • TCX
    • Uprobe
    • XDP

    The Application Program will also have a namespaced version, but it will only allow the Program Types that are namespaced.

    type BpfProgram struct {\n    metav1.TypeMeta   `json:\",inline\"`\n    metav1.ObjectMeta `json:\"metadata,omitempty\"`\n\n    Spec BpfProgramSpec `json:\"spec\"`\n    // +optional\n    Status BpfProgramStatus `json:\"status,omitempty\"`\n}\ntype BpfProgramList struct {\n    metav1.TypeMeta `json:\",inline\"`\n    metav1.ListMeta `json:\"metadata,omitempty\"`\n    Items           []BpfProgram `json:\"items\"`\n}\n\ntype BpfNsProgram struct {\n    metav1.TypeMeta   `json:\",inline\"`\n    metav1.ObjectMeta `json:\"metadata,omitempty\"`\n\n    Spec BpfProgramSpec `json:\"spec\"`\n    // +optional\n    Status BpfProgramStatus `json:\"status,omitempty\"`\n}\ntype BpfNsProgramList struct {\n    metav1.TypeMeta `json:\",inline\"`\n    metav1.ListMeta `json:\"metadata,omitempty\"`\n    Items           []BpfProgram `json:\"items\"`\n}\n\ntype TcProgram struct {\n    metav1.TypeMeta   `json:\",inline\"`\n    metav1.ObjectMeta `json:\"metadata,omitempty\"`\n\n    Spec TcProgramSpec `json:\"spec\"`\n    // +optional\n    Status TcProgramStatus `json:\"status,omitempty\"`\n}\ntype TcProgramList struct {\n    metav1.TypeMeta `json:\",inline\"`\n    metav1.ListMeta `json:\"metadata,omitempty\"`\n    Items           []TcProgram `json:\"items\"`\n}\n\ntype TcNsProgram struct {\n    metav1.TypeMeta   `json:\",inline\"`\n    metav1.ObjectMeta `json:\"metadata,omitempty\"`\n\n    Spec TcNsProgramSpec `json:\"spec\"`\n    // +optional\n    Status TcProgramStatus `json:\"status,omitempty\"`\n}\ntype TcNsProgramList struct {\n    metav1.TypeMeta `json:\",inline\"`\n    metav1.ListMeta `json:\"metadata,omitempty\"`\n    Items           []TcNsProgram `json:\"items\"`\n}\n\n:\n
    "},{"location":"design/clusterVsNamespaceScoped/#interface_1","title":"interface","text":"

    The problem is that the bpfmanReconciler interface and common functions use the types bpfmanReconciler, BpfProgram and BpfProgramList, which will need to be cluster or namespaced objects.

    To allow the common code to act on both Cluster or Namespaced objects, two new interfaces will be introduced. First is BpfProg. Both BpfProgram and BpfNsProgram need to implement these functions.

    type BpfProg interface {\n    GetName() string\n    GetUID() types.UID\n    GetAnnotations() map[string]string\n    GetLabels() map[string]string\n    GetStatus() *bpfmaniov1alpha1.BpfProgramStatus\n    GetClientObject() client.Object\n}\n

    The second interface is BpfProgList. Both BpfProgramList and BpfNsProgramList will need to implement these functions. Because the list objects have lists of the BpfProgramor BpfNsProgram, the base interface is a generic, where type T can be a either BpfProgram or BpfNsProgram.

    type BpfProgList[T any] interface {\n    GetItems() []T\n}\n

    The reconciler base struct ReconcilerCommon then becomes a generic as well, and all references to the types bpfmanReconciler, BpfProgram and BpfProgramList become the types bpfmanReconciler[T,TL], T and TL. Below are the bpfman-agent structures, but the bpfman-operator follows the same pattern.

    type ReconcilerCommon[T BpfProg, TL BpfProgList[T]] struct {\n    : // Data is the same\n}\n\nfunc (r *ReconcilerCommon) reconcileCommon(ctx context.Context,\nrec bpfmanReconciler[T, TL],\n    programs []client.Object) (bool, ctrl.Result, error) {\n:\n}\n\nfunc (r *ReconcilerCommon) reconcileBpfProgram(ctx context.Context,\n    rec bpfmanReconciler[T, TL],\n    loadedBpfPrograms map[string]*gobpfman.ListResponse_ListResult,\n    bpfProgram *T,\n    isNodeSelected bool,\n    isBeingDeleted bool,\n    mapOwnerStatus *MapOwnerParamStatus)\n(bpfmaniov1alpha1.BpfProgramConditionType, error) {\n:\n}\n\nfunc (r *ReconcilerCommon) reconcileBpfProgramSuccessCondition(\n    isLoaded bool,\n    shouldBeLoaded bool,\n    isNodeSelected bool,\n    isBeingDeleted bool,\n    noContainersOnNode bool,\n  mapOwnerStatus *MapOwnerParamStatus) bpfmaniov1alpha1.BpfProgramConditionType {\n:\n}\n

    Same for the bpfmanReconciler interface.

    type bpfmanReconciler[T BpfProg, TL BpfProgList[T]] interface {\n    SetupWithManager(mgr ctrl.Manager) error\n    Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error)\n    getFinalizer() string\n    getOwner() metav1.Object\n    getRecType() string\n    getProgType() internal.ProgramType\n    getName() string\n    getExpectedBpfPrograms(ctx context.Context)(*TL, error)\n    getLoadRequest(bpfProgram *T,\n    mapOwnerId *uint32) (*gobpfman.LoadRequest, error)\n    getNode() *v1.Node\n    getBpfProgramCommon() *bpfmaniov1alpha1.BpfProgramCommon\n    setCurrentProgram(program client.Object) error\n    getNodeSelector() *metav1.LabelSelector\n    getBpfGlobalData() map[string][]byte\n    getAppProgramId() string\n}\n

    Issues arose when ReconcilerCommon functions needed to modify the BpfProgram or BpfNsProgram data. For the modifications to be applied, the types need to be pointers bpfmanReconciler[*T, *TL], *T and *TL. However, the compiler would not allow this:

    cannot use type BpfProgList[*T] outside a type constraint: interface contains type constraints\n

    To work around this, a new layer was added. A struct for cluster scoped code and a one for namespaced code. So looks something like this:

                         +--- ClusterProgramReconciler\n                     |     |\n                     |     +--- FentryProgramReconciler\n                     |     |     func (r *FentryProgramReconciler) getFinalizer() string {}\n                     |     |     :\n                     |     |\n                     |     +--- FexitProgramReconciler\n                     |     |     func (r *FexitProgramReconciler) getFinalizer() string {}\n                     |     |     :\n                     |     :\nbpfmanReconciler   --+\n  ReconcilerCommon   |\n                     +--- NamespaceProgramReconciler\n                           |\n                           +--- FentryNsProgramReconciler\n                           |     func (r *FentryProgramReconciler) getFinalizer() string {}\n                           |     :\n                           |\n                           +--- FexitNsProgramReconciler\n                           |     func (r *FexitProgramReconciler) getFinalizer() string {}\n                           |     :\n                           :\n
    type ClusterProgramReconciler struct {\n    ReconcilerCommon[BpfProgram, BpfProgramList]\n}\n\ntype NamespaceProgramReconciler struct {\n    ReconcilerCommon[BpfNsProgram, BpfNsProgramList]\n}\n

    Several functions were added to the bpfmanReconciler interface that are implemented by these structures.

    type bpfmanReconciler[T BpfProg, TL BpfProgList[T]] interface {\n    // BPF Cluster of Namespaced Reconciler\n    getBpfList(ctx context.Context, opts []client.ListOption) (*TL, error)\n    updateBpfStatus(ctx context.Context, bpfProgram *T, condition metav1.Condition) error\n    createBpfProgram(\n        attachPoint string,\n        rec bpfmanReconciler[T, TL],\n        annotations map[string]string,\n    ) (*T, error)\n\n    // *Program Reconciler\n  SetupWithManager(mgr ctrl.Manager) error\n    :\n}\n

    And the *Programs use the ClusterProgramReconciler or NamespaceProgramReconciler structs instead of the ReconcilerCommon struct.

    type TcProgramReconciler struct {\n    ClusterProgramReconciler\n    currentTcProgram *bpfmaniov1alpha1.TcProgram\n    interfaces       []string\n    ourNode          *v1.Node\n}\n\ntype TcNsProgramReconciler struct {\n    NamespaceProgramReconciler\n    currentTcNsProgram *bpfmaniov1alpha1.TcNsProgram\n    interfaces         []string\n    ourNode            *v1.Node\n}\n\n:\n
    "},{"location":"design/clusterVsNamespaceScoped/#naming","title":"Naming","text":"

    In the existing codebase, all the CRDs are cluster scoped:

    • BpfApplicationProgram
    • BpfProgram
    • FentryProgram
    • FexitProgram
    • KprobeProgram
    • ...

    Common practice is for cluster scoped objects to include \"Cluster\" in the name and for namespaced objects to not have an identifier. So the current CRDs SHOULD have been named:

    • ClusterBpfApplicationProgram
    • ClusterBpfProgram
    • ClusterFentryProgram
    • ClusterFexitProgram
    • ClusterKprobeProgram
    • ...

    Around the same time this feature is being developed, another feature is being developed which will break the loading and attaching of eBPF programs in bpfman into two steps. As part of this feature, all the CRDs will be completely reworked. With this in mind, the plan for adding namespace scoped CRDs is to make the namespaced CRDs carry the identifier. After the load/attach split work is complete, the CRDs will be renamed to follow the common convention in which the cluster-scoped CRD names are prefixed with \"Cluster\".

    The current plan is for the namespaced scoped CRDs to use \"NsProgram\" identifier and cluster scoped CRDs to use \"Program\" identifier. With the new namespace scope feature, below are the list of CRDs supported by bpfman-operator:

    • BpfNsApplicationProgram
    • BpfApplicationProgram
    • BpfNsProgram
    • BpfProgram
    • FentryProgram
    • FexitProgram
    • KprobeProgram
    • TcNsProgram
    • TcProgram
    • TcxNsProgram
    • TcxProgram
    • TracepointProgram
    • UprobeNsProgram
    • UprobeProgram
    • XdpNsProgram
    • XdpProgram
    "},{"location":"design/daemonless/","title":"Daemonless bpfd","text":""},{"location":"design/daemonless/#introduction","title":"Introduction","text":"

    The bpfd daemon is a userspace daemon that runs on the host and responds to gRPC API requests over a unix socket, to load, unload and list the eBPF programs on a host.

    The rationale behind running as a daemon was because something needs to be listening on the unix socket for API requests, and that we also maintain some state in-memory about the programs that have been loaded. However, since this daemon requires root privileges to load and unload eBPF programs it is a security risk for this to be a long-running - even with the mitigations we have in place to drop privileges and run as a non-root user. This risk is equivalent to that of something like Docker.

    This document describes the design of a daemonless bpfd, which is a bpfd that runs only runs when required, for example, to load or unload an eBPF program.

    "},{"location":"design/daemonless/#design","title":"Design","text":"

    The daemonless bpfd is a single binary that collects some of the functionality from both bpfd and bpfctl.

    :note: Daemonless, not rootless. Since CAP_BPF (and more) is required to load and unload eBPF programs, we will still need to run as root. But at least we can run as root for a shorter period of time.

    "},{"location":"design/daemonless/#command-bpfd-system-service","title":"Command: bpfd system service","text":"

    This command will run the bpfd gRPC API server - for one or more of the gRPC API services we support.

    It will listen on a unix socket (or tcp socket) for API requests - provided via a positional argument, defaulting to unix:///var/run/bpfd.sock. It will shutdown after a timeout of inactivity - provided by a --timeout flag defaulting to 5 seconds.

    It will support being run as a systemd service, via socket activation, which will allow it to be started on demand when a request is made to the unix socket. When in this mode it will not create the unix socket itself, but will instead use LISTEN_FDS to determine the file descriptor of the unix socket to use.

    Usage in local development (or packaged in a container) is still possible by running as follows:

    sudo bpfd --timeout=0 unix:///var/run/bpfd.sock\n

    :note: The bpfd user and group will be deprecated. We will also remove some of the unit-file complexity (i.e directories) and handle this in bpfd itself.

    "},{"location":"design/daemonless/#command-bpfd-load-file","title":"Command: bpfd load file","text":"

    As the name suggests, this command will load an eBPF program from a file. This was formerly bpfctl load-from-file.

    "},{"location":"design/daemonless/#command-bpfd-load-image","title":"Command: bpfd load image","text":"

    As the name suggests, this command will load an eBPF program from a container image. This was formerly bpfctl load-from-image.

    "},{"location":"design/daemonless/#command-bpfd-unload","title":"Command: bpfd unload","text":"

    This command will unload an eBPF program. This was formerly bpfctl unload.

    "},{"location":"design/daemonless/#command-bpfd-list","title":"Command: bpfd list","text":"

    This command will list the eBPF programs that are currently loaded. This was formerly bpfctl list.

    "},{"location":"design/daemonless/#command-bpfd-pull","title":"Command: bpfd pull","text":"

    This command will pull the bpfd container image from a registry. This was formerly bpfctl pull.

    "},{"location":"design/daemonless/#command-bpfd-images","title":"Command: bpfd images","text":"

    This command will list the bpfd container images that are available. This command didn't exist, but makes sense to add.

    "},{"location":"design/daemonless/#command-bpfd-version","title":"Command: bpfd version","text":"

    This command will print the version of bpfd. This command didn't exist, but makes sense to add.

    "},{"location":"design/daemonless/#state-management","title":"State Management","text":"

    This is perhaps the most significant change from how bpfd currently works.

    Currently bpfd maintains state in-memory about the programs that have been loaded (by bpfd, and the kernel). Some of this state is flushed to disk, so if bpfd is restarted it can reconstruct it.

    Flushing to disk and state reconstruction is cumbersome at present and having to move all state management out of in-memory stores is a forcing function to improve this. We will replace the existing state management with sled, which gives us a familiar API to work with while also being fast, reliable and persistent.

    "},{"location":"design/daemonless/#metrics-and-monitoring","title":"Metrics and Monitoring","text":"

    While adding metrics and monitoring is not a goal of this design, it should nevertheless be a consideration. In order to provide metrics to Prometheus or OpenTelemetry we will require an additional exporter process.

    We can either:

    1. Use the bpfd socket and retrieve metrics via the gRPC API
    2. Place state access + metrics gathering functions in a library, such that they could be used directly by the exporter process without requiring the bpfd socket.

    The latter would be more inline with how podman-prometheus-exporter works. The benefit here is that, the metrics exporter process can be long running with less privileges - whereas if it were to hit the API over the socket it would effectively negate the point of being daemonless in the first place since collection will likley occur more frequently than the timeout on the socket.

    "},{"location":"design/daemonless/#benefits","title":"Benefits","text":"

    The benefits of this design are:

    • No long-running daemon with root privileges
    • No need to run as a non-root user, this is important since the number of capabilities required is only getting larger.
    • We only need to ship a single binary.
    • We can use systemd socket activation to start bpfd on demand + timeout after a period of inactivity.
    • Forcs us to fix state management, since we can never rely on in-memory state.
    • Bpfd becomes more modular - if we wish to add programs for runtime enforcement, metrics, or any other purpose then it's design is decoupled from that of bpfd. It could be another binary, or a subcommand on the CLI etc...
    "},{"location":"design/daemonless/#drawbacks","title":"Drawbacks","text":"

    None yet.

    "},{"location":"design/daemonless/#backwards-compatibility","title":"Backwards Compatibility","text":"
    • The bpfctl command will be removed and all functionality folded into bpfd
    • The bpfd command will be renamed to bpfd system service
    "},{"location":"developer-guide/api-spec/","title":"API Specification","text":"

    Packages:

    • bpfman.io/v1alpha1
    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1","title":"bpfman.io/v1alpha1","text":"

    Package v1alpha1 contains API Schema definitions for the bpfman.io API group.

    Resource Types:

    • BpfApplication
    • BpfProgram
    • FentryProgram
    • FexitProgram
    • KprobeProgram
    • TcProgram
    • TracepointProgram
    • UprobeProgram
    • XdpProgram
    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.BpfApplication","title":"BpfApplication","text":"

    BpfApplication is the Schema for the bpfapplications API

    Field Description apiVersion string bpfman.io/v1alpha1 kind string BpfApplication metadata Kubernetes meta/v1.ObjectMeta Refer to the Kubernetes API documentation for the fields of the metadata field. spec BpfApplicationSpec BpfAppCommon BpfAppCommon

    (Members of BpfAppCommon are embedded into this type.)

    programs []BpfApplicationProgram

    Programs is a list of bpf programs supported for a specific application. It\u2019s possible that the application can selectively choose which program(s) to run from this list.

    status BpfApplicationStatus"},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.BpfProgram","title":"BpfProgram","text":"

    BpfProgram is the Schema for the Bpfprograms API

    Field Description apiVersion string bpfman.io/v1alpha1 kind string BpfProgram metadata Kubernetes meta/v1.ObjectMeta Refer to the Kubernetes API documentation for the fields of the metadata field. spec BpfProgramSpec type string (Optional)

    Type specifies the bpf program type

    status BpfProgramStatus (Optional)"},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.FentryProgram","title":"FentryProgram","text":"

    FentryProgram is the Schema for the FentryPrograms API

    Field Description apiVersion string bpfman.io/v1alpha1 kind string FentryProgram metadata Kubernetes meta/v1.ObjectMeta Refer to the Kubernetes API documentation for the fields of the metadata field. spec FentryProgramSpec FentryProgramInfo FentryProgramInfo

    (Members of FentryProgramInfo are embedded into this type.)

    BpfAppCommon BpfAppCommon

    (Members of BpfAppCommon are embedded into this type.)

    status FentryProgramStatus (Optional)"},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.FexitProgram","title":"FexitProgram","text":"

    FexitProgram is the Schema for the FexitPrograms API

    Field Description apiVersion string bpfman.io/v1alpha1 kind string FexitProgram metadata Kubernetes meta/v1.ObjectMeta Refer to the Kubernetes API documentation for the fields of the metadata field. spec FexitProgramSpec FexitProgramInfo FexitProgramInfo

    (Members of FexitProgramInfo are embedded into this type.)

    BpfAppCommon BpfAppCommon

    (Members of BpfAppCommon are embedded into this type.)

    status FexitProgramStatus (Optional)"},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.KprobeProgram","title":"KprobeProgram","text":"

    KprobeProgram is the Schema for the KprobePrograms API

    Field Description apiVersion string bpfman.io/v1alpha1 kind string KprobeProgram metadata Kubernetes meta/v1.ObjectMeta Refer to the Kubernetes API documentation for the fields of the metadata field. spec KprobeProgramSpec KprobeProgramInfo KprobeProgramInfo

    (Members of KprobeProgramInfo are embedded into this type.)

    BpfAppCommon BpfAppCommon

    (Members of BpfAppCommon are embedded into this type.)

    status KprobeProgramStatus (Optional)"},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.TcProgram","title":"TcProgram","text":"

    TcProgram is the Schema for the TcProgram API

    Field Description apiVersion string bpfman.io/v1alpha1 kind string TcProgram metadata Kubernetes meta/v1.ObjectMeta Refer to the Kubernetes API documentation for the fields of the metadata field. spec TcProgramSpec TcProgramInfo TcProgramInfo

    (Members of TcProgramInfo are embedded into this type.)

    BpfAppCommon BpfAppCommon

    (Members of BpfAppCommon are embedded into this type.)

    status TcProgramStatus (Optional)"},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.TracepointProgram","title":"TracepointProgram","text":"

    TracepointProgram is the Schema for the TracepointPrograms API

    Field Description apiVersion string bpfman.io/v1alpha1 kind string TracepointProgram metadata Kubernetes meta/v1.ObjectMeta Refer to the Kubernetes API documentation for the fields of the metadata field. spec TracepointProgramSpec TracepointProgramInfo TracepointProgramInfo

    (Members of TracepointProgramInfo are embedded into this type.)

    BpfAppCommon BpfAppCommon

    (Members of BpfAppCommon are embedded into this type.)

    status TracepointProgramStatus (Optional)"},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.UprobeProgram","title":"UprobeProgram","text":"

    UprobeProgram is the Schema for the UprobePrograms API

    Field Description apiVersion string bpfman.io/v1alpha1 kind string UprobeProgram metadata Kubernetes meta/v1.ObjectMeta Refer to the Kubernetes API documentation for the fields of the metadata field. spec UprobeProgramSpec UprobeProgramInfo UprobeProgramInfo

    (Members of UprobeProgramInfo are embedded into this type.)

    BpfAppCommon BpfAppCommon

    (Members of BpfAppCommon are embedded into this type.)

    status UprobeProgramStatus (Optional)"},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.XdpProgram","title":"XdpProgram","text":"

    XdpProgram is the Schema for the XdpPrograms API

    Field Description apiVersion string bpfman.io/v1alpha1 kind string XdpProgram metadata Kubernetes meta/v1.ObjectMeta Refer to the Kubernetes API documentation for the fields of the metadata field. spec XdpProgramSpec XdpProgramInfo XdpProgramInfo

    (Members of XdpProgramInfo are embedded into this type.)

    BpfAppCommon BpfAppCommon

    (Members of BpfAppCommon are embedded into this type.)

    status XdpProgramStatus (Optional)"},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.BpfAppCommon","title":"BpfAppCommon","text":"

    (Appears on: BpfApplicationSpec, FentryProgramSpec, FexitProgramSpec, KprobeProgramSpec, TcProgramSpec, TracepointProgramSpec, UprobeProgramSpec, XdpProgramSpec)

    BpfAppCommon defines the common attributes for all BpfApp programs

    Field Description nodeselector Kubernetes meta/v1.LabelSelector

    NodeSelector allows the user to specify which nodes to deploy the bpf program to. This field must be specified, to select all nodes use standard metav1.LabelSelector semantics and make it empty.

    globaldata map[string][]byte (Optional)

    GlobalData allows the user to set global variables when the program is loaded with an array of raw bytes. This is a very low level primitive. The caller is responsible for formatting the byte string appropriately considering such things as size, endianness, alignment and packing of data structures.

    bytecode BytecodeSelector

    Bytecode configures where the bpf program\u2019s bytecode should be loaded from.

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.BpfApplicationProgram","title":"BpfApplicationProgram","text":"

    (Appears on: BpfApplicationSpec)

    BpfApplicationProgram defines the desired state of BpfApplication

    Field Description type EBPFProgType

    Type specifies the bpf program type

    xdp XdpProgramInfo (Optional)

    xdp defines the desired state of the application\u2019s XdpPrograms.

    tc TcProgramInfo (Optional)

    tc defines the desired state of the application\u2019s TcPrograms.

    tcx TcProgramInfo (Optional)

    tcx defines the desired state of the application\u2019s TcPrograms.

    fentry FentryProgramInfo (Optional)

    fentry defines the desired state of the application\u2019s FentryPrograms.

    fexit FexitProgramInfo (Optional)

    fexit defines the desired state of the application\u2019s FexitPrograms.

    kprobe KprobeProgramInfo (Optional)

    kprobe defines the desired state of the application\u2019s KprobePrograms.

    kretprobe KprobeProgramInfo (Optional)

    kretprobe defines the desired state of the application\u2019s KretprobePrograms.

    uprobe UprobeProgramInfo (Optional)

    uprobe defines the desired state of the application\u2019s UprobePrograms.

    uretprobe UprobeProgramInfo (Optional)

    uretprobe defines the desired state of the application\u2019s UretprobePrograms.

    tracepoint TracepointProgramInfo (Optional)

    tracepoint defines the desired state of the application\u2019s TracepointPrograms.

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.BpfApplicationSpec","title":"BpfApplicationSpec","text":"

    (Appears on: BpfApplication)

    BpfApplicationSpec defines the desired state of BpfApplication

    Field Description BpfAppCommon BpfAppCommon

    (Members of BpfAppCommon are embedded into this type.)

    programs []BpfApplicationProgram

    Programs is a list of bpf programs supported for a specific application. It\u2019s possible that the application can selectively choose which program(s) to run from this list.

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.BpfApplicationStatus","title":"BpfApplicationStatus","text":"

    (Appears on: BpfApplication)

    BpfApplicationStatus defines the observed state of BpfApplication

    Field Description BpfProgramStatusCommon BpfProgramStatusCommon

    (Members of BpfProgramStatusCommon are embedded into this type.)

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.BpfProgramCommon","title":"BpfProgramCommon","text":"

    (Appears on: FentryProgramInfo, FexitProgramInfo, KprobeProgramInfo, TcProgramInfo, TracepointProgramInfo, UprobeProgramInfo, XdpProgramInfo)

    BpfProgramCommon defines the common attributes for all BPF programs

    Field Description bpffunctionname string

    BpfFunctionName is the name of the function that is the entry point for the BPF program

    mapownerselector Kubernetes meta/v1.LabelSelector (Optional)

    MapOwnerSelector is used to select the loaded eBPF program this eBPF program will share a map with. The value is a label applied to the BpfProgram to select. The selector must resolve to exactly one instance of a BpfProgram on a given node or the eBPF program will not load.

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.BpfProgramConditionType","title":"BpfProgramConditionType (string alias)","text":"

    BpfProgramConditionType is a condition type to indicate the status of a BPF program at the individual node level.

    Value Description

    \"BytecodeSelectorError\"

    BpfProgCondBytecodeSelectorError indicates that an error occurred when trying to process the bytecode selector.

    \"Loaded\"

    BpfProgCondLoaded indicates that the eBPF program was successfully loaded into the kernel on a specific node.

    \"MapOwnerNotFound\"

    BpfProgCondMapOwnerNotFound indicates that the eBPF program sharing a map with another eBPF program and that program does not exist.

    \"MapOwnerNotLoaded\"

    BpfProgCondMapOwnerNotLoaded indicates that the eBPF program sharing a map with another eBPF program and that program is not loaded.

    \"NoContainersOnNode\"

    BpfProgCondNoContainersOnNode indicates that there are no containers on the node that match the container selector.

    \"None\"

    None of the above conditions apply

    \"NotLoaded\"

    BpfProgCondNotLoaded indicates that the eBPF program has not yet been loaded into the kernel on a specific node.

    \"NotSelected\"

    BpfProgCondNotSelected indicates that the eBPF program is not scheduled to be loaded on a specific node.

    \"NotUnLoaded\"

    BpfProgCondUnloaded indicates that in the midst of trying to remove the eBPF program from the kernel on the node, that program has not yet been removed.

    \"Unloaded\"

    BpfProgCondUnloaded indicates that the eBPF program has been unloaded from the kernel on a specific node.

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.BpfProgramSpec","title":"BpfProgramSpec","text":"

    (Appears on: BpfProgram)

    BpfProgramSpec defines the desired state of BpfProgram

    Field Description type string (Optional)

    Type specifies the bpf program type

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.BpfProgramStatus","title":"BpfProgramStatus","text":"

    (Appears on: BpfProgram)

    BpfProgramStatus defines the observed state of BpfProgram TODO Make these a fixed set of metav1.Condition.types and metav1.Condition.reasons

    Field Description conditions []Kubernetes meta/v1.Condition

    Conditions houses the updates regarding the actual implementation of the bpf program on the node Known .status.conditions.type are: \u201cAvailable\u201d, \u201cProgressing\u201d, and \u201cDegraded\u201d

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.BpfProgramStatusCommon","title":"BpfProgramStatusCommon","text":"

    (Appears on: BpfApplicationStatus, FentryProgramStatus, FexitProgramStatus, KprobeProgramStatus, TcProgramStatus, TracepointProgramStatus, UprobeProgramStatus, XdpProgramStatus)

    BpfProgramStatusCommon defines the BpfProgram status

    Field Description conditions []Kubernetes meta/v1.Condition

    Conditions houses the global cluster state for the eBPFProgram. The explicit condition types are defined internally.

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.BytecodeImage","title":"BytecodeImage","text":"

    (Appears on: BytecodeSelector)

    BytecodeImage defines how to specify a bytecode container image.

    Field Description url string

    Valid container image URL used to reference a remote bytecode image.

    imagepullpolicy PullPolicy (Optional)

    PullPolicy describes a policy for if/when to pull a bytecode image. Defaults to IfNotPresent.

    imagepullsecret ImagePullSecretSelector (Optional)

    ImagePullSecret is the name of the secret bpfman should use to get remote image repository secrets.

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.BytecodeSelector","title":"BytecodeSelector","text":"

    (Appears on: BpfAppCommon)

    BytecodeSelector defines the various ways to reference bpf bytecode objects.

    Field Description image BytecodeImage

    Image used to specify a bytecode container image.

    path string

    Path is used to specify a bytecode object via filepath.

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.ContainerSelector","title":"ContainerSelector","text":"

    (Appears on: UprobeProgramInfo)

    ContainerSelector identifies a set of containers. For example, this can be used to identify a set of containers in which to attach uprobes.

    Field Description namespace string (Optional)

    Target namespaces.

    pods Kubernetes meta/v1.LabelSelector

    Target pods. This field must be specified, to select all pods use standard metav1.LabelSelector semantics and make it empty.

    containernames []string (Optional)

    Name(s) of container(s). If none are specified, all containers in the pod are selected.

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.EBPFProgType","title":"EBPFProgType (string alias)","text":"

    (Appears on: BpfApplicationProgram)

    EBPFProgType defines the supported eBPF program types

    Value Description

    \"Fentry\"

    ProgTypeFentry refers to the Fentry program type.

    \"Fexit\"

    ProgTypeFexit refers to the Fexit program type.

    \"Kprobe\"

    ProgTypeKprobe refers to the Kprobe program type.

    \"Kretprobe\"

    ProgTypeKretprobe refers to the Kprobe program type.

    \"TC\"

    ProgTypeTC refers to the TC program type.

    \"TCX\"

    ProgTypeTCX refers to the TCx program type.

    \"Tracepoint\"

    ProgTypeTracepoint refers to the Tracepoint program type.

    \"Uprobe\"

    ProgTypeUprobe refers to the Uprobe program type.

    \"Uretprobe\"

    ProgTypeUretprobe refers to the Uretprobe program type.

    \"XDP\"

    ProgTypeXDP refers to the XDP program type.

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.FentryProgramInfo","title":"FentryProgramInfo","text":"

    (Appears on: BpfApplicationProgram, FentryProgramSpec)

    FentryProgramInfo defines the Fentry program details

    Field Description BpfProgramCommon BpfProgramCommon

    (Members of BpfProgramCommon are embedded into this type.)

    func_name string

    Function to attach the fentry to.

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.FentryProgramSpec","title":"FentryProgramSpec","text":"

    (Appears on: FentryProgram)

    FentryProgramSpec defines the desired state of FentryProgram

    Field Description FentryProgramInfo FentryProgramInfo

    (Members of FentryProgramInfo are embedded into this type.)

    BpfAppCommon BpfAppCommon

    (Members of BpfAppCommon are embedded into this type.)

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.FentryProgramStatus","title":"FentryProgramStatus","text":"

    (Appears on: FentryProgram)

    FentryProgramStatus defines the observed state of FentryProgram

    Field Description BpfProgramStatusCommon BpfProgramStatusCommon

    (Members of BpfProgramStatusCommon are embedded into this type.)

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.FexitProgramInfo","title":"FexitProgramInfo","text":"

    (Appears on: BpfApplicationProgram, FexitProgramSpec)

    FexitProgramInfo defines the Fexit program details

    Field Description BpfProgramCommon BpfProgramCommon

    (Members of BpfProgramCommon are embedded into this type.)

    func_name string

    Function to attach the fexit to.

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.FexitProgramSpec","title":"FexitProgramSpec","text":"

    (Appears on: FexitProgram)

    FexitProgramSpec defines the desired state of FexitProgram

    Field Description FexitProgramInfo FexitProgramInfo

    (Members of FexitProgramInfo are embedded into this type.)

    BpfAppCommon BpfAppCommon

    (Members of BpfAppCommon are embedded into this type.)

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.FexitProgramStatus","title":"FexitProgramStatus","text":"

    (Appears on: FexitProgram)

    FexitProgramStatus defines the observed state of FexitProgram

    Field Description BpfProgramStatusCommon BpfProgramStatusCommon

    (Members of BpfProgramStatusCommon are embedded into this type.)

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.ImagePullSecretSelector","title":"ImagePullSecretSelector","text":"

    (Appears on: BytecodeImage)

    ImagePullSecretSelector defines the name and namespace of an image pull secret.

    Field Description name string

    Name of the secret which contains the credentials to access the image repository.

    namespace string

    Namespace of the secret which contains the credentials to access the image repository.

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.InterfaceSelector","title":"InterfaceSelector","text":"

    (Appears on: TcProgramInfo, XdpProgramInfo)

    InterfaceSelector defines interface to attach to.

    Field Description interfaces []string (Optional)

    Interfaces refers to a list of network interfaces to attach the BPF program to.

    primarynodeinterface bool (Optional)

    Attach BPF program to the primary interface on the node. Only \u2018true\u2019 accepted.

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.KprobeProgramInfo","title":"KprobeProgramInfo","text":"

    (Appears on: BpfApplicationProgram, KprobeProgramSpec)

    KprobeProgramInfo defines the common fields for KprobeProgram

    Field Description BpfProgramCommon BpfProgramCommon

    (Members of BpfProgramCommon are embedded into this type.)

    func_name string

    Functions to attach the kprobe to.

    offset uint64 (Optional)

    Offset added to the address of the function for kprobe. Not allowed for kretprobes.

    retprobe bool (Optional)

    Whether the program is a kretprobe. Default is false

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.KprobeProgramSpec","title":"KprobeProgramSpec","text":"

    (Appears on: KprobeProgram)

    KprobeProgramSpec defines the desired state of KprobeProgram

    Field Description KprobeProgramInfo KprobeProgramInfo

    (Members of KprobeProgramInfo are embedded into this type.)

    BpfAppCommon BpfAppCommon

    (Members of BpfAppCommon are embedded into this type.)

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.KprobeProgramStatus","title":"KprobeProgramStatus","text":"

    (Appears on: KprobeProgram)

    KprobeProgramStatus defines the observed state of KprobeProgram

    Field Description BpfProgramStatusCommon BpfProgramStatusCommon

    (Members of BpfProgramStatusCommon are embedded into this type.)

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.ProgramConditionType","title":"ProgramConditionType (string alias)","text":"

    ProgramConditionType is a condition type to indicate the status of a BPF program at the cluster level.

    Value Description

    \"DeleteError\"

    ProgramDeleteError indicates that the BPF program was marked for deletion, but deletion was unsuccessful.

    \"NotYetLoaded\"

    ProgramNotYetLoaded indicates that the program in question has not yet been loaded on all nodes in the cluster.

    \"ReconcileError\"

    ProgramReconcileError indicates that an unforeseen situation has occurred in the controller logic, and the controller will retry.

    \"ReconcileSuccess\"

    BpfmanProgConfigReconcileSuccess indicates that the BPF program has been successfully reconciled.

    TODO: we should consider removing \u201creconciled\u201d type logic from the public API as it\u2019s an implementation detail of our use of controller runtime, but not necessarily relevant to human users or integrations.

    See: https://github.com/bpfman/bpfman/issues/430

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.PullPolicy","title":"PullPolicy (string alias)","text":"

    (Appears on: BytecodeImage)

    PullPolicy describes a policy for if/when to pull a container image

    Value Description

    \"Always\"

    PullAlways means that bpfman always attempts to pull the latest bytecode image. Container will fail If the pull fails.

    \"IfNotPresent\"

    PullIfNotPresent means that bpfman pulls if the image isn\u2019t present on disk. Container will fail if the image isn\u2019t present and the pull fails.

    \"Never\"

    PullNever means that bpfman never pulls an image, but only uses a local image. Container will fail if the image isn\u2019t present

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.TcProceedOnValue","title":"TcProceedOnValue (string alias)","text":"

    (Appears on: TcProgramInfo)

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.TcProgramInfo","title":"TcProgramInfo","text":"

    (Appears on: BpfApplicationProgram, TcProgramSpec)

    TcProgramInfo defines the tc program details

    Field Description BpfProgramCommon BpfProgramCommon

    (Members of BpfProgramCommon are embedded into this type.)

    interfaceselector InterfaceSelector

    Selector to determine the network interface (or interfaces)

    priority int32

    Priority specifies the priority of the tc program in relation to other programs of the same type with the same attach point. It is a value from 0 to 1000 where lower values have higher precedence.

    direction string

    Direction specifies the direction of traffic the tc program should attach to for a given network device.

    proceedon []TcProceedOnValue (Optional)

    ProceedOn allows the user to call other tc programs in chain on this exit code. Multiple values are supported by repeating the parameter.

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.TcProgramSpec","title":"TcProgramSpec","text":"

    (Appears on: TcProgram)

    TcProgramSpec defines the desired state of TcProgram

    Field Description TcProgramInfo TcProgramInfo

    (Members of TcProgramInfo are embedded into this type.)

    BpfAppCommon BpfAppCommon

    (Members of BpfAppCommon are embedded into this type.)

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.TcProgramStatus","title":"TcProgramStatus","text":"

    (Appears on: TcProgram)

    TcProgramStatus defines the observed state of TcProgram

    Field Description BpfProgramStatusCommon BpfProgramStatusCommon

    (Members of BpfProgramStatusCommon are embedded into this type.)

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.TracepointProgramInfo","title":"TracepointProgramInfo","text":"

    (Appears on: BpfApplicationProgram, TracepointProgramSpec)

    TracepointProgramInfo defines the Tracepoint program details

    Field Description BpfProgramCommon BpfProgramCommon

    (Members of BpfProgramCommon are embedded into this type.)

    names []string

    Names refers to the names of kernel tracepoints to attach the bpf program to.

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.TracepointProgramSpec","title":"TracepointProgramSpec","text":"

    (Appears on: TracepointProgram)

    TracepointProgramSpec defines the desired state of TracepointProgram

    Field Description TracepointProgramInfo TracepointProgramInfo

    (Members of TracepointProgramInfo are embedded into this type.)

    BpfAppCommon BpfAppCommon

    (Members of BpfAppCommon are embedded into this type.)

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.TracepointProgramStatus","title":"TracepointProgramStatus","text":"

    (Appears on: TracepointProgram)

    TracepointProgramStatus defines the observed state of TracepointProgram

    Field Description BpfProgramStatusCommon BpfProgramStatusCommon

    (Members of BpfProgramStatusCommon are embedded into this type.)

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.UprobeProgramInfo","title":"UprobeProgramInfo","text":"

    (Appears on: BpfApplicationProgram, UprobeProgramSpec)

    UprobeProgramInfo contains the information about the uprobe program

    Field Description BpfProgramCommon BpfProgramCommon

    (Members of BpfProgramCommon are embedded into this type.)

    func_name string (Optional)

    Function to attach the uprobe to.

    offset uint64 (Optional)

    Offset added to the address of the function for uprobe.

    target string

    Library name or the absolute path to a binary or library.

    retprobe bool (Optional)

    Whether the program is a uretprobe. Default is false

    pid int32 (Optional)

    Only execute uprobe for given process identification number (PID). If PID is not provided, uprobe executes for all PIDs.

    containers ContainerSelector (Optional)

    Containers identifes the set of containers in which to attach the uprobe. If Containers is not specified, the uprobe will be attached in the bpfman-agent container. The ContainerSelector is very flexible and even allows the selection of all containers in a cluster. If an attempt is made to attach uprobes to too many containers, it can have a negative impact on on the cluster.

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.UprobeProgramSpec","title":"UprobeProgramSpec","text":"

    (Appears on: UprobeProgram)

    UprobeProgramSpec defines the desired state of UprobeProgram

    Field Description UprobeProgramInfo UprobeProgramInfo

    (Members of UprobeProgramInfo are embedded into this type.)

    BpfAppCommon BpfAppCommon

    (Members of BpfAppCommon are embedded into this type.)

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.UprobeProgramStatus","title":"UprobeProgramStatus","text":"

    (Appears on: UprobeProgram)

    UprobeProgramStatus defines the observed state of UprobeProgram

    Field Description BpfProgramStatusCommon BpfProgramStatusCommon

    (Members of BpfProgramStatusCommon are embedded into this type.)

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.XdpProceedOnValue","title":"XdpProceedOnValue (string alias)","text":"

    (Appears on: XdpProgramInfo)

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.XdpProgramInfo","title":"XdpProgramInfo","text":"

    (Appears on: BpfApplicationProgram, XdpProgramSpec)

    XdpProgramInfo defines the common fields for all XdpProgram types

    Field Description BpfProgramCommon BpfProgramCommon

    (Members of BpfProgramCommon are embedded into this type.)

    interfaceselector InterfaceSelector

    Selector to determine the network interface (or interfaces)

    priority int32

    Priority specifies the priority of the bpf program in relation to other programs of the same type with the same attach point. It is a value from 0 to 1000 where lower values have higher precedence.

    proceedon []XdpProceedOnValue"},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.XdpProgramSpec","title":"XdpProgramSpec","text":"

    (Appears on: XdpProgram)

    XdpProgramSpec defines the desired state of XdpProgram

    Field Description XdpProgramInfo XdpProgramInfo

    (Members of XdpProgramInfo are embedded into this type.)

    BpfAppCommon BpfAppCommon

    (Members of BpfAppCommon are embedded into this type.)

    "},{"location":"developer-guide/api-spec/#bpfman.io/v1alpha1.XdpProgramStatus","title":"XdpProgramStatus","text":"

    (Appears on: XdpProgram)

    XdpProgramStatus defines the observed state of XdpProgram

    Field Description BpfProgramStatusCommon BpfProgramStatusCommon

    (Members of BpfProgramStatusCommon are embedded into this type.)

    Generated with gen-crd-api-reference-docs.

    "},{"location":"developer-guide/configuration/","title":"Configuration","text":""},{"location":"developer-guide/configuration/#bpfman-configuration-file","title":"bpfman Configuration File","text":"

    bpfman looks for a configuration file to be present at /etc/bpfman/bpfman.toml. If no file is found, defaults are assumed. There is an example at scripts/bpfman.toml, similar to:

    [interfaces]\n  [interfaces.eth0]\n  xdp_mode = \"drv\" # Valid xdp modes are \"hw\", \"skb\" and \"drv\". Default: \"drv\", but will fall back to \"skb\" on failure.\n\n[signing]\nallow_unsigned = true\nverify_enabled = true\n\n[database]\nmax_retries = 10\nmillisec_delay = 1000\n
    "},{"location":"developer-guide/configuration/#config-section-interfaces","title":"Config Section: [interfaces]","text":"

    This section of the configuration file allows the XDP Mode for a given interface to be set. If not set, the default value of skb will be used. Multiple interfaces can be configured.

    [interfaces]\n  [interfaces.eth0]\n  xdp_mode = \"drv\"\n  [interfaces.eth1]\n  xdp_mode = \"hw\"\n  [interfaces.eth2]\n  xdp_mode = \"skb\"\n

    Valid fields:

    • xdp_mode: XDP Mode for a given interface. Valid values: [\"drv\"|\"hw\"|\"skb\"]
    "},{"location":"developer-guide/configuration/#config-section-signing","title":"Config Section: [signing]","text":"

    This section of the configuration file allows control over whether signatures on OCI packaged eBPF bytecode as container images are verified, and whether they are required to be signed via cosign.

    By default, images are verified, and unsigned images are allowed. See eBPF Bytecode Image Specifications for more details on building and shipping bytecode in a container image.

    Valid fields:

    • allow_unsigned: Flag indicating whether unsigned images are allowed. Valid values: [\"true\"|\"false\"]

    • verify_enabled: Flag indicating whether signatures should be verified. Valid values: [\"true\"|\"false\"]

    "},{"location":"developer-guide/configuration/#config-section-database","title":"Config Section: [database]","text":"

    bpfman uses an embedded database to store state and persistent data on disk which can only be accessed synchronously by a single process at a time. To avoid returning database lock errors and enhance the user experience, bpfman performs retries when opening of the database. The number of retries and the time between retries is configurable.

    Valid fields:

    • max_retries: The number of times to retry opening the database on a given request.
    • millisec_delay: Time in milliseconds to wait between retry attempts.
    "},{"location":"developer-guide/configuration/#config-section-registry","title":"Config Section: [registry]","text":"

    bpfman uses the latest public container images for the xdp and tc dispatchers by default. Optionally, the configuration values for these images are user-configurable. For example, it may be desirable in certain enterprise environments to source the xdp and tc dispatcher images from a self-hosted OCI image registry. In this case, the default values for the xdp and tc dispatcher images can be overridden below.

    Valid fields:

    • xdp_dispatcher_image: The locator of the xdp dispatcher image in the format quay.io/bpfman/xdp-dispatcher:latest
    • tc_dispatcher_image: The locator of the tc dispatcher image in the format quay.io/bpfman/tc-dispatcher:latest
    "},{"location":"developer-guide/debugging/","title":"Debugging using VSCode and lldb on a remote machine or VM","text":"
    1. Install code-lldb vscode extension
    2. Add a configuration to .vscode/launch.json like the following (customizing for a given system using the comment in the configuration file):

          {\n        \"name\": \"Remote debug bpfman\",\n        \"type\": \"lldb\",\n        \"request\": \"launch\",\n        \"program\": \"<ABSOLUTE_PATH>/github.com/bpfman/bpfman/target/debug/bpfman\", // Local path to latest debug binary.\n        \"initCommands\": [\n            \"platform select remote-linux\", // Execute `platform list` for a list of available remote platform plugins.\n            \"platform connect connect://<IP_ADDRESS_OF_VM>:8175\", // replace <IP_ADDRESS_OF_VM>\n            \"settings set target.inherit-env false\",\n        ],\n        \"env\": {\n            \"RUST_LOG\": \"debug\"\n        },\n        \"cargo\": {\n            \"args\": [\n                \"build\",\n                \"--bin=bpfman\",\n                \"--package=bpfman\"\n            ],\n            \"filter\": {\n                \"name\": \"bpfman\",\n                \"kind\": \"bin\"\n            }\n        },\n        \"cwd\": \"${workspaceFolder}\",\n    },\n
    3. On the VM or Server install lldb-server:

      dnf based OS:

          sudo dnf install lldb\n

      apt based OS:

          sudo apt install lldb\n
    4. Start lldb-server on the VM or Server (make sure to do this in the ~/home directory)

          cd ~\n    sudo lldb-server platform --server --listen 0.0.0.0:8081\n
    5. Add breakpoints as needed via the vscode GUI and then hit F5 to start debugging!

    "},{"location":"developer-guide/develop-operator/","title":"Developing the bpfman-operator","text":"

    This section is intended to give developer level details regarding the layout and design of the bpfman-operator. At its core the operator was implemented using the operator-sdk framework which make those docs another good resource if anything is missed here.

    "},{"location":"developer-guide/develop-operator/#high-level-design-overview","title":"High level design overview","text":"

    This repository houses two main processes, the bpfman-agent and the bpfman-operator along with CRD api definitions for BpfProgram and *Program Objects. The following diagram depicts how all these components work together to create a functioning operator.

    "},{"location":"developer-guide/develop-operator/#building-and-deploying","title":"Building and Deploying","text":"

    For building and deploying the bpfman-operator simply see the attached make help output.

    $ make help\n\nUsage:\n  make <target>\n\nGeneral\n  help             Display this help.\n\nLocal Dependencies\n  kustomize        Download kustomize locally if necessary.\n  controller-gen   Download controller-gen locally if necessary.\n  register-gen     Download register-gen locally if necessary.\n  informer-gen     Download informer-gen locally if necessary.\n  lister-gen       Download lister-gen locally if necessary.\n  client-gen       Download client-gen locally if necessary.\n  envtest          Download envtest-setup locally if necessary.\n  opm              Download opm locally if necessary.\n\nDevelopment\n  manifests        Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects.\n  generate         Generate ALL auto-generated code.\n  generate-register  Generate register code see all `zz_generated.register.go` files.\n  generate-deepcopy  Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations see all `zz_generated.register.go` files.\n  generate-typed-clients  Generate typed client code\n  generate-typed-listers  Generate typed listers code\n  generate-typed-informers  Generate typed informers code\n  vendors          Refresh vendors directory.\n  fmt              Run go fmt against code.\n  verify           Verify all the autogenerated code\n  lint             Run linter (golangci-lint).\n  test             Run Unit tests.\n  test-integration  Run Integration tests.\n  bundle           Generate bundle manifests and metadata, then validate generated files.\n  build-release-yamls  Generate the crd install bundle for a specific release version.\n\nBuild\n  build            Build bpfman-operator and bpfman-agent binaries.\n  build-images     Build bpfman-agent and bpfman-operator images.\n  build-operator-image  Build bpfman-operator image.\n  build-agent-image  Build bpfman-agent image.\n  push-images      Push bpfman-agent and bpfman-operator images.\n  load-images-kind  Load bpfman-agent, and bpfman-operator images into the running local kind devel cluster.\n  bundle-build     Build the bundle image.\n  bundle-push      Push the bundle image.\n  catalog-update   Generate catalog yaml file.\n  catalog-build    Build a catalog image.\n  catalog-push     Push a catalog image.\n\nCRD Deployment\n  install          Install CRDs into the K8s cluster specified in ~/.kube/config.\n  uninstall        Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.\n\nVanilla K8s Deployment\n  setup-kind       Setup Kind cluster\n  destroy-kind     Destroy Kind cluster\n  deploy           Deploy bpfman-operator to the K8s cluster specified in ~/.kube/config with the csi driver initialized.\n  undeploy         Undeploy bpfman-operator from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.\n  kind-reload-images  Reload locally build images into a kind cluster and restart the ds and deployment so they're picked up.\n  run-on-kind      Kind Deploy runs the bpfman-operator on a local kind cluster using local builds of bpfman, bpfman-agent, and bpfman-operator\n\nOpenshift Deployment\n  deploy-openshift  Deploy bpfman-operator to the Openshift cluster specified in ~/.kube/config.\n  undeploy-openshift  Undeploy bpfman-operator from the Openshift cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.\n  catalog-deploy   Deploy a catalog image.\n  catalog-undeploy  Undeploy a catalog image.\n
    "},{"location":"developer-guide/develop-operator/#project-layout","title":"Project Layout","text":"

    The bpfman-operator project layout is guided by the recommendations from both the operator-sdk framework and the standard golang project-layout. The following is a brief description of the main directories under bpfman-operator/ and their contents.

    NOTE: Bolded directories contain auto-generated code

    • apis/v1alpha1/*_types.go: Contains the K8s CRD api definitions (*_types.go) for each version.
    • apis/v1alpha1/zz_generated.*.go: Contains the auto-generated register (zz_generate.register.go) and deepcopy (zz_generated.deepcopy.go) methods.
    • bundle/: Contains the OLM bundle manifests and metadata for the operator. More details can be found in the operator-sdk documentation.
    • cmd/: Contains the main entry-points for the bpfman-operator and bpfman-agent processes.
    • config/: Contains the configuration files for launching the bpfman-operator on a cluster.
      • bpfman-deployment/: Contains static deployment yamls for the bpfman-daemon, this includes two containers, one for bpfman and the other for the bpfman-agent. This DaemonSet yaml is NOT deployed statically by kustomize, instead it's statically copied into the operator image which is then responsible for deploying and configuring the bpfman-daemon DaemonSet. Lastly, this directory also contains the default config used to configure the bpfman-daemon, along with the cert-manager certificates used to encrypt communication between the bpfman-agent and bpfman.
      • bpfman-operator-deployment/: Contains the static deployment yaml for the bpfman-operator. This is deployed statically by kustomize.
      • crd/: Contains the CRD manifests for all of the bpfman-operator APIs.
        • bases/: Is where the actual CRD definitions are stored. These definitions are auto-generated by controller-gen.
        • patches/: Contains kustomize patch files for each Program Type, which enables a conversion webhook for the CRD and adds a directive for certmanager to inject CA into the CRD.
      • default/: Contains the default deployment configuration for the bpfman-operator.
      • manifests/: Contains the bases for generating OLM manifests.
      • openshift/: Contains the Openshift specific deployment configuration for the bpfman-operator.
      • prometheus/: Contains the prometheus manifests used to deploy Prometheus to a cluster. At the time of writing this the bpfman-operator is NOT exposing any metrics to prometheus, but this is a future goal.
      • rbac/: Contains rbac yamls for getting bpfman and the bpfman-operator up and running on Kubernetes.
        • bpfman-agent/: Contains the rbac yamls for the bpfman-agent. They are automatically generated by kubebuilder via build tags in the bpfman-agent controller code.
        • bpfman-operator/: Contains the rbac yamls for the bpfman-operator. They are automatically generated by kubebuilder via build tags in the bpfman-operator controller code.
      • samples/: Contains sample CR definitions that can be deployed by users for each of our supported APIs.
      • scorecard/: Contains the scorecard manifests used to deploy scorecard to a cluster. At the time of writing this the bpfman-operator is NOT running any scorecard tests.
      • test/: Contains the test manifests used to deploy the bpfman-operator to a kind cluster for integration testing.
    • controllers/: Contains the controller implementations for all of the bpfman-operator APIs. Each controller is responsible for reconciling the state of the cluster with the desired state defined by the user. This is where the source of truth for the auto-generated RBAC can be found, keep an eye out for //+kubebuilder:rbac:groups=bpfman.io comment tags.
      • bpfmanagent/: Contains the controller implementations which reconcile user created *Program types to multiple BpfProgram objects.
      • bpfmanoperator/: Contains the controller implementations which reconcile global BpfProgram object state back to the user by ensuring the user created *Program objects are reporting the correct status.
    • hack/: Contains any scripts+static files used by the bpfman-operator to facilitate development.
    • internal/: Contains all private library code and is used by the bpfman-operator and bpfman-agent controllers.
    • pkg/: Contains all public library code this is consumed externally and internally.
      • client/: Contains the autogenerated clientset, informers and listers for all of the bpfman-operator APIs. These are autogenerated by the k8s.io/code-generator project, and can be consumed by users wishing to programmatically interact with bpfman specific APIs.
      • helpers/: Contains helper functions which can be consumed by users wishing to programmatically interact with bpfman specific APIs.
    • test/integration/: Contains integration tests for the bpfman-operator. These tests are run against a kind cluster and are responsible for testing the bpfman-operator in a real cluster environment. It uses the kubernetes-testing-framework project to programmatically spin-up all of the required infrastructure for our unit tests.
    • Makefile: Contains all of the make targets used to build, test, and generate code used by the bpfman-operator.
    "},{"location":"developer-guide/develop-operator/#rpc-protobuf-generation","title":"RPC Protobuf Generation","text":"

    Technically part of the bpfman API, the RPC Protobufs are usually not coded until a bpfman feature is integrated into the bpfman-operator and bpfman-agent code. To modify the RPC Protobuf definition, edit proto/bpfman.proto. Then to generate the protobufs from the updated RPC Protobuf definitions:

    cd bpfman/\ncargo xtask build-proto\n

    This will generate:

    • bpfman-api/src/bpfman.v1.rs: Generated Rust Protobuf source code.
    • clients/gobpfman/v1/: Directory that contains the generated Go Client code for interacting with bpfman over RPC from a Go application.

    When editing proto/bpfman.proto, follow best practices describe in Proto Best Practices.

    Note

    cargo xtask build-proto also pulls in proto/csi.proto (which is in the same directory as proto/bpfman.proto). proto/csi.proto is taken from container-storage-interface/spec/csi.proto. See container-storage-interface/spec/spec.md for more details.

    "},{"location":"developer-guide/develop-operator/#generated-files","title":"Generated Files","text":"

    The operator-sdk framework will generate multiple categories of files (Custom Resource Definitions (CRD), RBAC ClusterRole, Webhook Configuration, typed client, listeners and informers code, etc). If any of the bpfman-operator/apis/v1alpha1/*Program_types.go files are modified, then regenerate these files using:

    cd bpfman-operator/\nmake generate\n

    This command will generate all auto-generated code. There are commands to generate each sub-category if needed. See make help to list all the generate commands.

    "},{"location":"developer-guide/develop-operator/#building","title":"Building","text":"

    To run in Kubernetes, bpfman components need to be containerized. However, building container images can take longer than just building the code. During development, it may be quicker to find and fix build errors by just building the code. To build the code:

    cd bpfman-operator/\nmake build\n

    To build the container images, run the following command:

    cd bpfman-operator/\nmake build-images\n

    If the make build command is skipped above, the code will be built in the build-images command. If the make build command is run, the built code will be leveraged in this step. This command generates the following images:

    docker images\nREPOSITORY                       TAG      IMAGE ID       CREATED          SIZE\nquay.io/bpfman/bpfman            latest   69df038ccea3   43 seconds ago   515MB\nquay.io/bpfman/bpfman-agent      latest   f6af33c5925b   2 minutes ago    464MB\nquay.io/bpfman/bpfman-operator   latest   4fe444b7abf1   2 minutes ago    141MB\n:\n
    "},{"location":"developer-guide/develop-operator/#running-locally-in-kind","title":"Running Locally in KIND","text":"

    Deploying the bpfman-operator goes into more detail on ways to launch bpfman in a Kubernetes cluster. To run locally in a Kind cluster with an up to date build simply run:

    cd bpfman-operator/\nmake run-on-kind\n

    The make run-on-kind will run the make build-images if the images do not exist or need updating.

    Then rebuild and load a fresh build run:

    cd bpfman-operator/\nmake build-images\nmake kind-reload-images\n

    Which will rebuild the bpfman-operator and bpfman-agent images, and load them into the kind cluster.

    By default, the make run-on-kind uses the local images described above. The container images used for bpfman, bpfman-agent, and bpfman-operator can also be manually configured:

    BPFMAN_IMG=<your/image/url> BPFMAN_AGENT_IMG=<your/image/url> BPFMAN_OPERATOR_IMG=<your/image/url> make run-on-kind\n
    "},{"location":"developer-guide/develop-operator/#testing-locally","title":"Testing Locally","text":"

    See Kubernetes Operator Tests.

    "},{"location":"developer-guide/develop-operator/#troubleshooting","title":"Troubleshooting","text":""},{"location":"developer-guide/develop-operator/#metricshealth-port-issues","title":"Metrics/Health port issues","text":"

    In some scenarios, the health and metric ports may are already in use by other services on the system. When this happens the bpfman-agent container fails to deploy. The ports currently default to 8175 and 8174.

    The ports are passed in through the daemonset.yaml for the bpfman-daemon and deployment.yaml and manager_auth_proxy_patch.yaml for the bpfman-operator. The easiest way to change which ports are used is to update these yaml files and rebuild the container images. The container images need to be rebuilt because the bpfman-daemon is deployed from the bpfman-operator and the associated yaml files are copied into the bpfman-operator image.

    If rebuild the container images is not desirable, then the ports can be changed on the fly. For the bpfman-operator, the ports can be updated by editing the bpfman-operator Deployment.

    kubectl edit deployment -n bpfman bpfman-operator\n\napiVersion: apps/v1\nkind: Deployment\n:\nspec:\n  template:\n  :\n  spec:\n    containers:\n    -args:\n      - --secure-listen-address=0.0.0.0:8443\n      - --upstream=http://127.0.0.1:8174/        <-- UPDATE\n      - --logtostderr=true\n      - --v=0\n      name: kube-rbac-proxy\n      :\n    - args:\n      - --health-probe-bind-address=:8175        <-- UPDATE\n      - --metrics-bind-address=127.0.0.1:8174    <-- UPDATE\n      - --leader-elect\n      :\n      livenessProbe:\n          failureThreshold: 3\n          httpGet:\n            path: /healthz\n            port: 8175                           <-- UPDATE\n            scheme: HTTP\n            :\n      name: bpfman-operator\n      readinessProbe:\n          failureThreshold: 3\n          httpGet:\n            path: /readyz\n            port: 8175                           <-- UPDATE\n            scheme: HTTP\n      :\n

    For the bpfman-daemon, the ports could be updated by editing the bpfman-daemon DaemonSet. However, if bpfman-daemon is restarted for any reason by the bpfman-operator, the changes will be lost. So it is recommended to update the ports for the bpfman-daemon via the bpfman bpfman-config ConfigMap.

    kubectl edit configmap -n bpfman bpfman-config\n\napiVersion: v1\ndata:\n  bpfman.agent.healthprobe.addr: :8175                    <-- UPDATE\n  bpfman.agent.image: quay.io/bpfman/bpfman-agent:latest\n  bpfman.agent.log.level: info\n  bpfman.agent.metric.addr: 127.0.0.1:8174                <-- UPDATE\n  bpfman.image: quay.io/bpfman/bpfman:latest\n  bpfman.log.level: debug\nkind: ConfigMap\n:\n
    "},{"location":"developer-guide/documentation/","title":"Documentation","text":"

    This section describes how to modify the related documentation around bpfman. All bpfman's documentation is written in Markdown, and leverages mkdocs to generate a static site, which is hosted on netlify.

    If this is the first time building using mkdocs, jump to the Development Environment Setup section for help installing the tooling.

    "},{"location":"developer-guide/documentation/#documentation-notes","title":"Documentation Notes","text":"

    This section describes some notes on the dos and don'ts when writing documentation.

    "},{"location":"developer-guide/documentation/#website-management","title":"Website Management","text":"

    The headings and layout of the website, as well as other configuration settings, are managed from the mkdocs.yml file in the project root directory.

    "},{"location":"developer-guide/documentation/#markdown-style","title":"Markdown Style","text":"

    When writing documentation via a Markdown file, the following format has been followed:

    • Text on a given line should not exceed 100 characters, unless it's example syntax or a link that should be broken up.
    • Each new sentence should start on a new line. That way, if text needs to be inserted, whole paragraphs don't need to be adjusted.
    • Links to other markdown files are relative to the file the link is placed in.
    "},{"location":"developer-guide/documentation/#governance-files","title":"Governance Files","text":"

    There are a set of well known governance files that are typically placed in the root directory of most projects, like README.md, MAINTAINERS.md, CONTRIBUTING.md, etc. mkdocs expects all files used in the static website to be located under a common directory, docs/ for bpfman. To reference the governance files from the static website, a directory (docs/governance/) was created with a file for each governance file, the only contains --8<-- and the file name. This indicates to mkdocs to pull the additional file from the project root directory.

    For example: docs/governance/MEETINGS.md

    Note

    This works for the website generation, but if a Markdown file is viewed through Github (not the website), the link is broken. So these files should only be linked from docs/index.md and mkdocs.yml.

    "},{"location":"developer-guide/documentation/#docsdeveloper-guideapi-specmd","title":"docs/developer-guide/api-spec.md","text":"

    The file docs/developer-guide/api-spec.md documents the CRDs used in a Kubernetes deployment. The contents are auto-generated when PRs are pushed to Github.

    The contents can be generated locally by running the command ./scripts/api-docs/generate.sh apidocs.html from the root bpfman directory.

    "},{"location":"developer-guide/documentation/#generate-documentation","title":"Generate Documentation","text":"

    If you would like to test locally, build and preview the generated documentation, from the bpfman root directory, use mkdocs to build:

    cd bpfman/\nmkdocs build\n

    Note

    If mkdocs build gives you an error, make sure you have the mkdocs packages listed below installed.

    To preview from a build on a local machine, start the mkdocs dev-server with the command below, then open up http://127.0.0.1:8000/ in your browser, and you'll see the default home page being displayed:

    mkdocs serve\n

    To preview from a build on a remote machine, start the mkdocs dev-server with the command below, then open up http://<ServerIP>:8000/ in your browser, and you'll see the default home page being displayed:

    mkdocs serve -a 0.0.0.0:8000\n
    "},{"location":"developer-guide/documentation/#development-environment-setup","title":"Development Environment Setup","text":"

    The recommended installation method is using pip.

    pip install -r requirements.txt \n

    Once installed, ensure the mkdocs is in your PATH:

    mkdocs -V\nmkdocs, version 1.4.3 from /home/$USER/.local/lib/python3.11/site-packages/mkdocs (Python 3.11)\n

    Note

    If you have an older version of mkdocs installed, you may need to use the --upgrade option (e.g., pip install --upgrade mkdocs) to get it to work.

    "},{"location":"developer-guide/documentation/#document-images","title":"Document Images","text":"

    Source of images used in the example documentation can be found in bpfman Upstream Images. Request access if required.

    "},{"location":"developer-guide/image-build/","title":"bpfman Container Images","text":"

    Container images for bpfman are automatically built and pushed to quay.io/ under the :latest tag whenever code is merged into the main branch of the bpfman and bpfman-operator repositories.

    • quay.io/bpfman: This repository contains images needed to run bpfman. It contains the xdp-dispatcher and tc-dispatcher eBPF container images, which are used by bpfman to allow multiple XDP or TC programs to be loaded on a given interface. It also includes the container images which are used to deploy bpfman in a Kubernetes deployment:
      • bpfman: Packages all the bpfman binaries, including bpfman CLI, bpfman-ns and bpfman-rpc.
      • bpfman-agent: Agent that listens to KubeAPI Server and makes calls to bpfman to load or unload eBPF programs based on user intent.
      • bpfman-operator: Operator for deploying bpfman.
      • tc-dispatcher: eBPF container image containing the TC Dispatcher, which is used by bpfman to manage and allow multiple TC based programs to be loaded on a given TC hook point.
      • xdp-dispatcher: eBPF container image containing the XDP Dispatcher, which is used by bpfman to manage and allow multiple TC based programs to be loaded on a given XDP hook point.
      • csi-node-driver-registrar: CSI Driver used by bpfman.
      • bpfman-operator-bundle: Image containing all the CRDs (Custom-Resource-Definitions) used by bpfman-agent to define Kubernetes objects used to manage eBPF programs.
    • quay.io/bpfman-bytecode: This repository contains eBPF container images for all of the generated bytecode from examples/ and integration-test/.
    • quay.io/bpfman-userspace: This repository contains userspace container images for all of the example programs in examples/.
    "},{"location":"developer-guide/image-build/#multiple-architecture-support","title":"Multiple Architecture Support","text":"

    All bpfman related container images that are automatically built and pushed to quay.io/ contain a manifest file and images built for the following architectures:

    • x86_64
    • arm64
    • ppc64le
    • s390x
    "},{"location":"developer-guide/image-build/#locally-build-bpfman-operator-and-bpfman-agent-container-images","title":"Locally Build bpfman-operator and bpfman-agent Container Images","text":"

    When testing or developing in bpfman-operator, it may be necessary to run with updated changes to the bpfman-operator or bpfman-agent container images. The local Makefile will build and load both images based on the current changes:

    cd bpfman-operator/\n\nmake build-images\nmake run-on-kind\n
    "},{"location":"developer-guide/image-build/#locally-build-bpfman-container-image","title":"Locally Build bpfman Container Image","text":"

    When testing or developing in bpfman-operator, it may be necessary to run with updated changes to bpfman. By default, bpfman-agent uses quay.io/bpfman/bpfman:latest. To build the bpfman binaries in a container image, run:

    cd bpfman/\n\ndocker build -f ./Containerfile.bpfman.local . -t quay.io/$QUAY_USER/bpfman:test\n

    Use any registry, image name and tag, above is just an example. Next, build and deploy the bpfman-operator and bpfman-agent with the locally built bpfman container image.

    cd bpfman-operator/\n\nBPFMAN_IMG=quay.io/$QUAY_USER/bpfman:test make build-images\nBPFMAN_IMG=quay.io/$QUAY_USER/bpfman:test make run-on-kind\n

    To use, the Kind cluster must have access to the image. So either the image needs to be pushed to a registry and made public (make public via the repo GUI after the push) before executing the make run-on-kind command shown above:

    docker push quay.io/$QUAY_USER/bpfman:test\n

    OR it can be loaded into the kind cluster after the cluster is running:

    kind load docker-image quay.io/$QUAY_USER/bpfman:test --name bpfman-deployment\n

    Now the image should be running in the Kind cluster:

    kubectl get pods -A\n NAMESPACE   NAME                               READY   STATUS    RESTARTS   AGE\n bpfman      bpfman-daemon-87fqg                3/3     Running   0          16m\n bpfman      bpfman-operator-7f67bc7c57-bc6lk   2/2     Running   0          16m\n :\n\nkubectl describe pod -n bpfman bpfman-daemon-87fqg\n Name:             bpfman-daemon-87fqg\n Namespace:        bpfman\n :\n Containers:\n  bpfman:\n    Container ID:  containerd://1777d1810f3648f43df775e9d9af79406eaffc5694aa712da04c3f4e578093b3\n    Image:         quay.io/$QUAY_USER/bpfman:test\n    Image ID:      quay.io/$QUAY_USER/bpfman@sha256:f2c94b7acff6b463fc55232a1896816283521dd1ba5560b0d0779af99f811cd0\n:\n
    "},{"location":"developer-guide/image-build/#locally-build-tc-or-xdp-dispatcher-container-image","title":"Locally Build TC or XDP Dispatcher Container Image","text":"

    The TC and XDP Dispatcher images are automatically built and pushed to quay.io/ under the :latest tag whenever code is merged into the main branch of the github.com/bpfman/bpfman. If a dispatcher container image needs to be built locally, use the following steps.

    Build the object files:

    cargo xtask build-ebpf --libbpf-dir ~/src/libbpf/\n\n$ ls .output/tc_dispatcher.bpf/\nbpf_arm64_bpfel.o  bpf_powerpc_bpfel.o  bpf_s390_bpfeb.o  bpf_x86_bpfel.o\n\n$ ls .output/xdp_dispatcher_v2.bpf/\nbpf_arm64_bpfel.o  bpf_powerpc_bpfel.o  bpf_s390_bpfeb.o  bpf_x86_bpfel.o\n

    Then build the bytecode image files:

    bpfman image build -f Containerfile.bytecode -t quay.io/$QUAY_USER/tc-dispatcher:test -b .output/tc_dispatcher.bpf/bpf_x86_bpfel.o\nbpfman image build -f Containerfile.bytecode -t quay.io/$QUAY_USER/xdp-dispatcher:test -b .output/xdp_dispatcher_v2.bpf/bpf_x86_bpfel.o\n

    If a multi-arch image is needed, use:

    bpfman image build -f Containerfile.bytecode.multi.arch -t quay.io/$QUAY_USER/tc-dispatcher:test -c .output/tc_dispatcher.bpf/\nbpfman image build -f Containerfile.bytecode.multi.arch -t quay.io/$QUAY_USER/xdp-dispatcher:test -c .output/xdp_dispatcher_v2.bpf/\n

    Note

    To build images for multiple architectures on a local system, docker (or podman) may need additional configuration settings to allow for caching of non-native images. See https://docs.docker.com/build/building/multi-platform/ for more details.

    "},{"location":"developer-guide/image-build/#locally-build-example-container-images","title":"Locally Build Example Container Images","text":"

    The example images are automatically built and pushed to quay.io/ under the :latest tag whenever code is merged into the main branch of the github.com/bpfman/bpfman. For each example, there is a bytecode and a userspace image. For official bpfman images, bytecode images are pushed to quay.io/bpfman-bytecode and userspace images are pushed to quay.io/bpfman-userspace. For example:

    • quay.io/bpfman-bytecode/go-kprobe-counter
    • quay.io/bpfman-bytecode/go-tc-counter
    • quay.io/bpfman-bytecode/go-tracepoint-counter
    • ...

    • quay.io/bpfman-userspace/go-kprobe-counter

    • quay.io/bpfman-userspace/go-tc-counter
    • quay.io/bpfman-userspace/go-tracepoint-counter
    • ...

    The Makefile in the examples directory has commands to build both sets of images. Image names and tags can be controlled using environment variables. If private images are being generated, both bytecode and userspace images will probably be pushed to the same account, so bytecode and userspace images will need to be distinguished by either fully qualified image names (using IMAGE_TC_BC, IMAGE_TC_US, IMAGE_XDP_BC, IMAGE_XDP_US, etc) or unique tags for each (TAG_BC, TAG_US). See make help in the examples directory and the samples below.

    "},{"location":"developer-guide/image-build/#example-bytecode-container-images","title":"Example Bytecode Container Images","text":"

    If an example bytecode container image needs to be built locally, use the following to build the bytecode container image, (optionally passing the USER_BC and TAG_BC for the image):

    # Build images for all eBPF program types\n$ make build-bc-images USER_BC=$QUAY_USER TAG_BC=test-bc\n:\n => pushing quay.io/$QUAY_USER/go-kprobe-counter:test-bc with docker\n:\n => pushing quay.io/$QUAY_USER/go-tc-counter:test-bc with docker\n:\n => pushing quay.io/$QUAY_USER/go-tracepoint-counter:test-bc with docker\n:\n\n-- OR --\n\n# Build image for a single eBPF program type, XDP in this example\n$ make build-bc-xdp USER_BC=$QUAY_USER TAG_BC=test-bc\n:\n => pushing quay.io/$QUAY_USER/go-xdp-counter:test-bc with docker\n

    If a multi-arch image is needed, use (appending PLATFORM):

    $ make build-bc-xdp USER_BC=$QUAY_USER TAG_BC=test-bc PLATFORM=linux/amd64,linux/arm64,linux/ppc64le,linux/s390x\n:\n => pushing quay.io/$QUAY_USER/go-xdp-counter:test-bc with docker\n

    Note

    To build images for multiple architectures on a local system, docker (or podman) may need additional configuration settings to allow for caching of non-native images. See https://docs.docker.com/build/building/multi-platform/ for more details.

    "},{"location":"developer-guide/image-build/#example-userspace-container-images","title":"Example Userspace Container Images","text":"

    If an example userspace container image needs to be built locally, use the following to build the userspace container images, (optionally passing the USER_US and TAG_US for the image):

    cd bpfman/examples/\n\n# Build all images\n$ make build-us-images USER_US=$QUAY_USER TAG_US=test-us\n:\n => pushing quay.io/$QUAY_USER/go-kprobe-counter:test-us with docker\n:\n => pushing quay.io/$QUAY_USER/go-tc-counter:test-us with docker\n:\n => pushing quay.io/$QUAY_USER/go-tracepoint-counter:test-us with docker\n:\n\n-- OR --\n\n# Build a single image\n$ make build-us-xdp USER_US=$QUAY_USER TAG_US=test-us\n:\n => pushing quay.io/$QUAY_USER/go-xdp-counter:test-us with docker\n

    If a multi-arch image is needed, use (appending PLATFORM):

    $ make build-us-xdp USER_US=$QUAY_USER TAG_US=test-us PLATFORM=linux/amd64,linux/arm64,linux/ppc64le,linux/s390x\n:\n => pushing quay.io/$QUAY_USER/go-xdp-counter:test-us with docker\n

    Note

    To build images for multiple architectures on a local system, docker (or podman) may need additional configuration settings to allow for caching of non-native images. See https://docs.docker.com/build/building/multi-platform/ for more details.

    "},{"location":"developer-guide/image-build/#adding-additional-container-images","title":"Adding Additional Container Images","text":"

    When adding a new container image to one of the bpfman repositories, whether it be via the examples or integration tests, several steps need to be performed.

    • One of the maintainers of the bpfman quay.io repositories must:
      • Add the image to the quay.io repository.
      • Make the new image public.
      • On the image, provide Write access to the bpfman+github_actions robot account.
    • Add the new image to the bpfman/.github/workflows/image-build.yml so the image is built and pushed on each PR merge.
    • For examples, update the examples/Makefile to build the new images.
    "},{"location":"developer-guide/image-build/#signing-container-images","title":"Signing Container Images","text":"

    Signing eBPF container images is encouraged and can be easily done using cosign. Below is a summary of the steps needed to sign an image.

    First, install cosign:

    go install github.com/sigstore/cosign/v2/cmd/cosign@latest\n

    Then sign the image. The cosign command will generate a URL. Follow the sigstore URL and login with either GitHub, Google to Microsoft. That will generate a verification code that will complete the cosign command.

    cosign sign -y quay.io/$QUAY_USER/test-image@sha256:55fe3cfe46409939876be27f7ed4d2948842918145f6cda167d0c31fdea2046f\nGenerating ephemeral keys...\nRetrieving signed certificate...\n:\nhttps://oauth2.sigstore.dev/auth/auth?access_type=online&client_id=sigstore&code_challenge=EwHYBahRxlbli-oEXxS9DoEzEWcyuS_f1lLBhntCVFI&code_challenge_method=S256&nonce=2kR9mJbP0eUxFBAQI9Nhs6LyS4l&redirect_uri=urn%3Aietf%3Awg%3Aoauth%3A2.0%3Aoob&response_type=code&scope=openid+email&state=2kR9mIqOn6IgmAw46BxVrnEEi0M\nEnter verification code: wq3g58qhw6y25wwibcz2kgzfx\n\nSuccessfully verified SCT...\ntlog entry created with index: 120018072\nPushing signature to: quay.io/$QUAY_USER/test-image\n
    "},{"location":"developer-guide/image-build/#containerfiles","title":"Containerfiles","text":"

    There are multiple Containerfiles in the bpfman repositories. Below is a summary of the files and their purpose.

    "},{"location":"developer-guide/image-build/#userspace-containerfiles","title":"Userspace Containerfiles","text":"
    • bpfman/Containerfile.bpfman.local: This file is used to create a userspace container image with bpfman binaries (bpfman CLI, bpfman-rpc and bpfman-ns). It can be used to run local bpfman code in a Kubernetes cluster with the bpfman-operator and bpfman-agent.
    • bpfman/Containerfile.bpfman.multi.arch: This file is used to create a userspace container image with bpfman binaries (bpfman CLI, bpfman-rpc and bpfman-ns), but for multiple architectures. It is used by the bpfman/.github/workflows/image-build.yaml file to build bpfman multi-arch images on every github Pull Request merge. The resulting images are stored in quay.io.
    • bpfman/Containerfile.bpfman.openshift: This file is used to create a userspace container image with bpfman binaries (bpfman CLI, bpfman-rpc and bpfman-ns). It is used by internal OpenShift build processes.
    • bpfman/examples/go-*-counter/container-deployment/Containerfile.go-*-counter: Where '*' is one of the bpfman supported program types (tc, tcx, tracepoint, etc.). These files are used to create the userspace container images associated with the examples.
    • bpfman-operator/Containerfile.bpfman-agent: This file is used to create a userspace container image with bpfman-agent.
    • bpfman-operator/Containerfile.bpfman-agent.openshift: This file is used to create a userspace container image with bpfman-agent. It is used by internal OpenShift build processes.
    • bpfman-operator/Containerfile.bpfman-operator: This file is used to create a userspace container image with bpfman-operator.
    • bpfman-operator/Containerfile.bpfman-operator.openshift: This file is used to create a userspace container image with bpfman-operator. It is used by internal OpenShift build processes.
    • bpfman-operator/Containerfile.bundle: This file is used to create a container image with all the Kubernetes object definitions (ConfigMaps, Custom Resource Definitions (CRDs), Roles, Role Bindings, Service, Service Accounts, etc) bpfman needs to be deployed in a Kubernetes cluster.
    "},{"location":"developer-guide/image-build/#bytecode-containerfiles","title":"Bytecode Containerfiles","text":"
    • bpfman/Containerfile.bytecode: This file is used to create a container image with eBPF bytecode packaged inside. The Containerfile applies labels to the container image describing the bytecode for consumers of the image. See eBPF Bytecode Image Specifications for more details.
    • bpfman/Containerfile.bytecode.multi.arch: This file is used to create a container image with eBPF bytecode packaged inside, but packages eBPF bytecode for multiple architectures. The Containerfile applies labels to the container image describing the bytecode for consumers of the image. See eBPF Bytecode Image Specifications for more details.
    "},{"location":"developer-guide/k8s-selinux-distros/","title":"Running the Examples as Non-Root on SELinux Distributions","text":"

    Developer instances of Kubernetes such as kind often set SELinux to permissive mode, ensuring the security subsystem does not interfere with the local cluster operations. However, in production distributions such as Openshift, EKS, GKE and AWS where security is paramount, SELinux and other security subsystems are often enabled by default. This among other things presents unique challenges when determining how to deploy unprivileged applications with bpfman.

    In order to deploy the provided examples on SELinux distributions, users must first install the security-profiles-operator. This will allow bpfman to deploy custom SELinux policies which will allow container users access to bpf maps (i.e map_read and map_write actions).

    It can easily be installed via operatorhub.io from here.

    Once the security-profiles-operator and bpfman are installed simply deploy desired examples:

    cd examples/\nmake deploy-tc-selinux\nmake deploy-xdp-selinux\n:\nmake undeploy-tc-selinux\nmake undeploy-xdp-selinux\n
    "},{"location":"developer-guide/linux-capabilities/","title":"Linux Capabilities","text":"

    Linux divides the privileges traditionally associated with superuser into distinct units, known as capabilities, which can be independently enabled and disabled. Capabilities are a per-thread attribute. See capabilities man-page.

    When bpfman is run as a systemd service, the set of linux capabilities are restricted to only the required set of capabilities via the bpfman.service file using the AmbientCapabilities and CapabilityBoundingSet fields (see bpfman.service). All spawned threads are stripped of all capabilities, removing all sudo privileges (see drop_linux_capabilities() usage), leaving only the main thread with only the needed set of capabilities.

    "},{"location":"developer-guide/linux-capabilities/#current-bpfman-linux-capabilities","title":"Current bpfman Linux Capabilities","text":"

    Below are the current set of Linux capabilities required by bpfman to operate:

    • CAP_BPF:
      • Required to load BPF programs and create BPF maps.
    • CAP_DAC_READ_SEARCH:
      • Required by Tracepoint programs, needed by aya to check the tracefs mount point. For example, trying to read \"/sys/kernel/tracing\" and \"/sys/kernel/debug/tracing\".
    • CAP_NET_ADMIN:
      • Required for TC programs to attach/detach to/from a qdisc.
    • CAP_SETPCAP:
      • Required to allow bpfman to drop Linux Capabilities on spawned threads.
    • CAP_SYS_ADMIN:
      • Kprobe (Kprobe and Uprobe) and Tracepoint programs are considered perfmon programs and require CAP_PERFMON and CAP_SYS_ADMIN to load.
      • TC and XDP programs are considered admin programs and require CAP_NET_ADMIN and CAP_SYS_ADMIN to load.
    • CAP_SYS_RESOURCE:
      • Required by bpfman to call setrlimit() on RLIMIT_MEMLOCK.
    "},{"location":"developer-guide/linux-capabilities/#debugging-linux-capabilities","title":"Debugging Linux Capabilities","text":"

    As new features are added, the set of Linux capabilities required by bpfman may change over time. The following describes the steps to determine the set of capabilities required by bpfman. If there are any Permission denied (os error 13) type errors when starting or running bpfman as a systemd service, adjusting the linux capabilities is a good place to start.

    "},{"location":"developer-guide/linux-capabilities/#determine-required-capabilities","title":"Determine Required Capabilities","text":"

    The first step is to turn all capabilities on and see if that fixes the problem. This can be done without recompiling the code by editing bpfman.service. Comment out the finite list of granted capabilities and set to ~, which indicates all capabilities.

    sudo vi /usr/lib/systemd/system/bpfman.service\n:\n[Service]\n:\nAmbientCapabilities=~\nCapabilityBoundingSet=~\n#AmbientCapabilities=CAP_BPF CAP_DAC_OVERRIDE CAP_DAC_READ_SEARCH CAP_NET_ADMIN CAP_PERFMON CAP_SETPCAP CAP_SYS_ADMIN CAP_SYS_RESOURCE\n#CapabilityBoundingSet=CAP_BPF CAP_DAC_OVERRIDE CAP_DAC_READ_SEARCH CAP_NET_ADMIN CAP_PERFMON CAP_SETPCAP CAP_SYS_ADMIN CAP_SYS_RESOURCE\n

    Reload the service file and start/restart bpfman and watch the bpfman logs and see if the problem is resolved:

    sudo systemctl daemon-reload\nsudo systemctl start bpfman\n

    If so, then the next step is to watch the set of capabilities being requested by bpfman. Run the bcc capable tool to watch capabilities being requested real-time and restart bpfman:

    $ sudo /usr/share/bcc/tools/capable\nTIME      UID    PID    COMM             CAP  NAME                 AUDIT\n:\n16:36:00  979    75553  tokio-runtime-w  8    CAP_SETPCAP          1\n16:36:00  979    75553  tokio-runtime-w  8    CAP_SETPCAP          1\n16:36:00  979    75553  tokio-runtime-w  8    CAP_SETPCAP          1\n16:36:00  0      616    systemd-journal  19   CAP_SYS_PTRACE       1\n16:36:00  0      616    systemd-journal  19   CAP_SYS_PTRACE       1\n16:36:00  979    75550  bpfman             24   CAP_SYS_RESOURCE     1\n16:36:00  979    75550  bpfman             1    CAP_DAC_OVERRIDE     1\n16:36:00  979    75550  bpfman             21   CAP_SYS_ADMIN        1\n16:36:00  979    75550  bpfman             21   CAP_SYS_ADMIN        1\n16:36:00  0      75555  modprobe         16   CAP_SYS_MODULE       1\n16:36:00  0      628    systemd-udevd    2    CAP_DAC_READ_SEARCH  1\n16:36:00  0      75556  bpf_preload      24   CAP_SYS_RESOURCE     1\n16:36:00  0      75556  bpf_preload      39   CAP_BPF              1\n16:36:00  0      75556  bpf_preload      39   CAP_BPF              1\n16:36:00  0      75556  bpf_preload      39   CAP_BPF              1\n16:36:00  0      75556  bpf_preload      38   CAP_PERFMON          1\n16:36:00  0      75556  bpf_preload      38   CAP_PERFMON          1\n16:36:00  0      75556  bpf_preload      38   CAP_PERFMON          1\n:\n

    Compare the output to list in bpfman.service and determine the delta.

    "},{"location":"developer-guide/linux-capabilities/#determine-capabilities-per-thread","title":"Determine Capabilities Per Thread","text":"

    For additional debugging, it may be helpful to know the granted capabilities on a per thread basis. As mentioned above, all spawned threads are stripped of all Linux capabilities, so if a thread is requesting a capability, that functionality should be moved off the spawned thread and onto the main thread.

    First, determine the bpfman process id, then determine the set of threads:

    $ ps -ef | grep bpfman\n:\nbpfman       75550       1  0 16:36 ?        00:00:00 /usr/sbin/bpfman\n:\n\n$ ps -T -p 75550\n    PID    SPID TTY          TIME CMD\n  75550   75550 ?        00:00:00 bpfman\n  75550   75551 ?        00:00:00 tokio-runtime-w\n  75550   75552 ?        00:00:00 tokio-runtime-w\n  75550   75553 ?        00:00:00 tokio-runtime-w\n  75550   75554 ?        00:00:00 tokio-runtime-w\n

    Then dump the capabilities of each thread:

    $ grep Cap /proc/75550/status\nCapInh: 000000c001201106\nCapPrm: 000000c001201106\nCapEff: 000000c001201106\nCapBnd: 000000c001201106\nCapAmb: 000000c001201106\n\n$ grep Cap /proc/75551/status\nCapInh: 0000000000000000\nCapPrm: 0000000000000000\nCapEff: 0000000000000000\nCapBnd: 0000000000000000\nCapAmb: 0000000000000000\n\n$ grep Cap /proc/75552/status\nCapInh: 0000000000000000\nCapPrm: 0000000000000000\nCapEff: 0000000000000000\nCapBnd: 0000000000000000\nCapAmb: 0000000000000000\n\n:\n\n$ capsh --decode=000000c001201106\n0x000000c001201106=cap_dac_override,cap_dac_read_search,cap_setpcap,cap_net_admin,cap_sys_admin,cap_sys_resource,cap_perfmon,cap_bpf\n
    "},{"location":"developer-guide/linux-capabilities/#removing-cap_bpf-from-bpfman-clients","title":"Removing CAP_BPF from bpfman Clients","text":"

    One of the advantages of using bpfman is that it is doing all the loading and unloading of eBPF programs, so it requires CAP_BPF, but clients of bpfman are just making gRPC calls to bpfman, so they do not need to be privileged or require CAP_BPF. It must be noted that this is only true for kernels 5.19 or higher. Prior to kernel 5.19, all eBPF sys calls required CAP_BPF, which are used to access maps shared between the BFP program and the userspace program. In kernel 5.19, a change went in that only requires CAP_BPF for map creation (BPF_MAP_CREATE) and loading programs (BPF_PROG_LOAD). See bpf: refine kernel.unprivileged_bpf_disabled behaviour.

    "},{"location":"developer-guide/logging/","title":"Logging","text":"

    This section describes how to enable logging in different bpfman deployments.

    "},{"location":"developer-guide/logging/#local-privileged-bpfman-process","title":"Local Privileged Bpfman Process","text":"

    bpfman uses the env_logger crate to log messages to the terminal. By default, only error messages are logged, but that can be overwritten by setting the RUST_LOG environment variable. Valid values:

    • error
    • warn
    • info
    • debug
    • trace

    Example:

    $ sudo RUST_LOG=info /usr/local/bin/bpfman\n[2022-08-08T20:29:31Z INFO  bpfman::server] Loading static programs from /etc/bpfman/programs.d\n[2022-08-08T20:29:31Z INFO  bpfman::server::bpf] Map veth12fa8e3 to 13\n[2022-08-08T20:29:31Z INFO  bpfman::server] Listening on [::1]:50051\n[2022-08-08T20:29:31Z INFO  bpfman::server::bpf] Program added: 1 programs attached to veth12fa8e3\n[2022-08-08T20:29:31Z INFO  bpfman::server] Loaded static program pass with UUID d9fd88df-d039-4e64-9f63-19f3e08915ce\n
    "},{"location":"developer-guide/logging/#systemd-service","title":"Systemd Service","text":"

    If bpfman is running as a systemd service, then bpfman will log to journald. As with env_logger, by default, info and higher messages are logged, but that can be overwritten by setting the RUST_LOG environment variable.

    Example:

    sudo vi /usr/lib/systemd/system/bpfman.service\n[Unit]\nDescription=Run bpfman as a service\nDefaultDependencies=no\nAfter=network.target\n\n[Service]\nEnvironment=\"RUST_LOG=Info\"    <==== Set Log Level Here\nExecStart=/usr/sbin/bpfman system service\nAmbientCapabilities=CAP_BPF CAP_DAC_READ_SEARCH CAP_NET_ADMIN CAP_PERFMON CAP_SYS_ADMIN CAP_SYS_RESOURCE\nCapabilityBoundingSet=CAP_BPF CAP_DAC_READ_SEARCH CAP_NET_ADMIN CAP_PERFMON CAP_SYS_ADMIN CAP_SYS_RESOURCE\n

    Start the service:

    sudo systemctl daemon-reload\nsudo systemctl start bpfman.service\n

    Check the logs:

    $ sudo journalctl -f -u bpfman\nAug 08 16:25:04 ebpf03 systemd[1]: Started bpfman.service - Run bpfman as a service.\nAug 08 16:25:04 ebpf03 bpfman[180118]: Loading static programs from /etc/bpfman/programs.d\nAug 08 16:25:04 ebpf03 bpfman[180118]: Map veth12fa8e3 to 13\nAug 08 16:25:04 ebpf03 bpfman[180118]: Listening on [::1]:50051\nAug 08 16:25:04 ebpf03 bpfman[180118]: Program added: 1 programs attached to veth12fa8e3\nAug 08 16:25:04 ebpf03 bpfman[180118]: Loaded static program pass with UUID a3ffa14a-786d-48ad-b0cd-a4802f0f10b6\n

    Stop the service:

    sudo systemctl stop bpfman.service\n
    "},{"location":"developer-guide/logging/#kubernetes-deployment","title":"Kubernetes Deployment","text":"

    When bpfman is run in a Kubernetes deployment, there is the bpfman Daemonset that runs on every node and the bpd Operator that runs on the control plane:

    kubectl get pods -A\nNAMESPACE            NAME                                                    READY   STATUS    RESTARTS   AGE\nbpfman                 bpfman-daemon-dgqzw                                       2/2     Running   0          3d22h\nbpfman                 bpfman-daemon-gqsgd                                       2/2     Running   0          3d22h\nbpfman                 bpfman-daemon-zx9xr                                       2/2     Running   0          3d22h\nbpfman                 bpfman-operator-7fbf4888c4-z8w76                          2/2     Running   0          3d22h\n:\n
    "},{"location":"developer-guide/logging/#bpfman-daemonset","title":"bpfman Daemonset","text":"

    bpfman and bpfman-agent are running in the bpfman daemonset.

    "},{"location":"developer-guide/logging/#view-logs","title":"View Logs","text":"

    To view the bpfman logs:

    kubectl logs -n bpfman bpfman-daemon-dgqzw -c bpfman\n[2023-05-05T14:41:26Z INFO  bpfman] Has CAP_BPF: false\n[2023-05-05T14:41:26Z INFO  bpfman] Has CAP_SYS_ADMIN: true\n:\n

    To view the bpfman-agent logs:

    kubectl logs -n bpfman bpfman-daemon-dgqzw -c bpfman-agent\n{\"level\":\"info\",\"ts\":\"2023-12-20T20:15:34Z\",\"logger\":\"controller-runtime.metrics\",\"msg\":\"Metrics server is starting to listen\",\"addr\":\":8174\"}\n{\"level\":\"info\",\"ts\":\"2023-12-20T20:15:34Z\",\"logger\":\"setup\",\"msg\":\"Waiting for active connection to bpfman\"}\n{\"level\":\"info\",\"ts\":\"2023-12-20T20:15:34Z\",\"logger\":\"setup\",\"msg\":\"starting Bpfman-Agent\"}\n:\n
    "},{"location":"developer-guide/logging/#change-log-level","title":"Change Log Level","text":"

    To change the log level of the agent or daemon, edit the bpfman-config ConfigMap. The bpfman-operator will detect the change and restart the bpfman daemonset with the updated values.

    kubectl edit configmaps -n bpfman bpfman-config\napiVersion: v1\ndata:\n  bpfman.agent.image: quay.io/bpfman/bpfman-agent:latest\n  bpfman.image: quay.io/bpfman/bpfman:latest\n  bpfman.log.level: info                     <==== Set bpfman Log Level Here\n  bpfman.agent.log.level: info               <==== Set bpfman agent Log Level Here\nkind: ConfigMap\nmetadata:\n  creationTimestamp: \"2023-05-05T14:41:19Z\"\n  name: bpfman-config\n  namespace: bpfman\n  resourceVersion: \"700803\"\n  uid: 0cc04af4-032c-4712-b824-748b321d319b\n

    Valid values for the daemon (bpfman.log.level) are:

    • error
    • warn
    • info
    • debug
    • trace

    trace can be very verbose. More information can be found regarding Rust's env_logger here.

    Valid values for the agent (bpfman.agent.log.level) are:

    • info
    • debug
    • trace
    "},{"location":"developer-guide/logging/#bpfman-operator","title":"bpfman Operator","text":"

    The bpfman Operator is running as a Deployment with a ReplicaSet of one. It runs with the containers bpfman-operator and kube-rbac-proxy.

    "},{"location":"developer-guide/logging/#view-logs_1","title":"View Logs","text":"

    To view the bpfman-operator logs:

    kubectl logs -n bpfman bpfman-operator-7fbf4888c4-z8w76 -c bpfman-operator\n{\"level\":\"info\",\"ts\":\"2023-05-09T18:37:11Z\",\"logger\":\"controller-runtime.metrics\",\"msg\":\"Metrics server is starting to listen\",\"addr\":\"127.0.0.1:8174\"}\n{\"level\":\"info\",\"ts\":\"2023-05-09T18:37:11Z\",\"logger\":\"setup\",\"msg\":\"starting manager\"}\n{\"level\":\"info\",\"ts\":\"2023-05-09T18:37:11Z\",\"msg\":\"Starting server\",\"kind\":\"health probe\",\"addr\":\"[::]:8175\"}\n{\"level\":\"info\",\"ts\":\"2023-05-09T18:37:11Z\",\"msg\":\"Starting server\",\"path\":\"/metrics\",\"kind\":\"metrics\",\"addr\":\"127.0.0.1:8174\"}\nI0509 18:37:11.262885       1 leaderelection.go:248] attempting to acquire leader lease bpfman/8730d955.bpfman.io...\nI0509 18:37:11.268918       1 leaderelection.go:258] successfully acquired lease bpfman/8730d955.bpfman.io\n{\"level\":\"info\",\"ts\":\"2023-05-09T18:37:11Z\",\"msg\":\"Starting EventSource\",\"controller\":\"configmap\",\"controllerGroup\":\"\",\"controllerKind\":\"ConfigMap\",\"source\":\"kind source: *v1.ConfigMap\"}\n:\n

    To view the kube-rbac-proxy logs:

    kubectl logs -n bpfman bpfman-operator-7fbf4888c4-z8w76 -c kube-rbac-proxy\nI0509 18:37:11.063386       1 main.go:186] Valid token audiences: \nI0509 18:37:11.063485       1 main.go:316] Generating self signed cert as no cert is provided\nI0509 18:37:11.955256       1 main.go:366] Starting TCP socket on 0.0.0.0:8443\nI0509 18:37:11.955849       1 main.go:373] Listening securely on 0.0.0.0:8443\n
    "},{"location":"developer-guide/logging/#change-log-level_1","title":"Change Log Level","text":"

    To change the log level, edit the bpfman-operator Deployment. The change will get detected and the bpfman operator pod will get restarted with the updated log level.

    kubectl edit deployment -n bpfman bpfman-operator\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  annotations:\n    deployment.kubernetes.io/revision: \"1\"\n    kubectl.kubernetes.io/last-applied-configuration: |\n      {\"apiVersion\":\"apps/v1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"app.kubernetes.io/component\":\"manager\",\"app.kubernetes.io/create>\n  creationTimestamp: \"2023-05-09T18:37:08Z\"\n  generation: 1\n:\nspec:\n:\n  template:\n    metadata:\n:\n    spec:\n      containers:\n      - args:\n:\n      - args:\n        - --health-probe-bind-address=:8175\n        - --metrics-bind-address=127.0.0.1:8174\n        - --leader-elect\n        command:\n        - /bpfman-operator\n        env:\n        - name: GO_LOG\n          value: info                   <==== Set Log Level Here\n        image: quay.io/bpfman/bpfman-operator:latest\n        imagePullPolicy: IfNotPresent\n:\n

    Valid values are:

    • error
    • info
    • debug
    • trace
    "},{"location":"developer-guide/observability/","title":"Observability","text":""},{"location":"developer-guide/observability/#ebpf-metrics-exporter","title":"eBPF Metrics Exporter","text":"

    The eBPF Metrics Exporter (bpf-metrics-exporter) exports metrics from the kernel's BPF subsystem to OpenTelmetry.

    Note

    An initial set of metrics have been added as a proof of concept. The metrics can be enriched with other metrics from the system as use cases are identified. For example, a possible improvement could be to correlate process IDs -> containers -> k8s pods.

    "},{"location":"developer-guide/observability/#metrics","title":"Metrics","text":"

    The following metrics are currently exported, this list will continue to expand:

    "},{"location":"developer-guide/observability/#gauges","title":"Gauges","text":"
    • bpf_program_info: Information on each loaded BPF Program
      • Labels:
        • id: The ID of the BPF program
        • name: The name of the BPF program
        • type: The type of the BPF program as a readable string
        • tag: The tag of the BPF program
        • gpl_compatible: Whether the BPF program is GPL compatible
        • map_ids: List of associated maps, if any
        • load_time: The time the BPF program was loaded
    • bpf_map_info: Information of each loaded BPF Map
      • Labels:
        • id: The ID of the BPF map
        • name: The name of the BPF map
        • type: The type of the BPF map as an u32 which corresponds to the following kernel enumeration
        • key_size: The key size in bytes for the BPF map
        • value_size: The value size for the BPF map
        • max_entries: The maximum number of entries for the BPF map.
        • flags: Loadtime specific flags for the BPF map
    • bpf_link_info: Information on each of the loaded BPF Link
      • Labels:
        • id: The ID of the bpf Link
        • prog_id: The Program ID of the BPF program which is using the Link.
        • type: The BPF Link type as a u32 which corresponds to the following kernel enumeration
    • bpf_program_load_time: The standard UTC time the program was loaded in seconds
      • Labels:
        • id: The ID of the BPF program
        • name: The name of the BPF program
        • type: The type of the BPF program as a readable string
    "},{"location":"developer-guide/observability/#counters","title":"Counters","text":"
    • bpf_program_size_jitted_bytes: The size in bytes of the program's JIT-compiled machine code.
      • Labels:
        • id: The ID of the BPF program
        • name: The name of the BPF program
        • type: The type of the BPF program as a readable string
    • bpf_program_size_translated_bytes: The size of the BPF program in bytes.
      • Labels:
        • id: The ID of the BPF program
        • name: The name of the BPF program
        • type: The type of the BPF program as a readable string
    • bpf_program_mem_bytes: The amount of memory used by the BPF program in bytes.
      • Labels:
        • id: The ID of the BPF program
        • name: The name of the BPF program
        • type: The type of the BPF program as a readable string
    • bpf_program_verified_instructions: The number of instructions in the BPF program.
      • Labels:
        • id: The ID of the BPF program
        • name: The name of the BPF program
        • type: The type of the BPF program as a readable string
    • bpf_map_key_size: The size of the BPF map key
      • Labels:
        • id: The ID of the BPF map
        • name: The name of the BPF map
        • type: The type of the BPF map as an u32 which corresponds to the following kernel enumeration
    • bpf_map_value_size: The size of the BPF map value
      • Labels:
        • id: The ID of the BPF map
        • name: The name of the BPF map
        • type: The type of the BPF map as an u32 which corresponds to the following kernel enumeration
    • bpf_map_max_entries: The maximum number of entries allowed for the BPF map
      • Labels:
        • id: The ID of the BPF map
        • name: The name of the BPF map
        • type: The type of the BPF map as an u32 which corresponds to the following kernel enumeration

    Note

    All counters will need to have the suffix _total appended when exposed as a sample metric (For an example, search for _total in bpf-metrics-exporter/metrics-stack.yaml).

    "},{"location":"developer-guide/observability/#try-it-out","title":"Try it Out","text":"

    Grafana Stack:

    You'll need a Grafana stack set up. You can quickly deploy one using:

    podman play kube metrics-stack.yaml\n

    Installation:

    bpf-metrics-exporter can be installed using the installation script:

    cd bpfman/\nsudo ./scripts/setup.sh install\n

    Run:

    Then, you can deploy the exporter:

    sudo bpf-metrics-exporter\n

    Verify:

    You can log into grafana at http://localhost:3000/ using the default user:password admin:admin.

    From there simply select the default dashboard titled eBPF Subsystem Metrics:

    Cleanup:

    In order to clean everything up simply exit the bpf-metrics-exporter process with <CTRL>C and run:

    podman kube down metrics-stack.yaml\n
    "},{"location":"developer-guide/observability/#ebpf-log-exporter","title":"eBPF Log Exporter","text":"

    The eBPF Log Exporter (bpf-log-exporter) is a utility tool that registers with the kernel auditing service to receive audit events. The eBPF Log Exporter filters out eBPF related events. Currently, these events are then printed to the terminal. Long term, these events will be forwarded as logs to OpenTelemetry.

    Note

    eBPF Log Exporter is a work in progress. Currently, audit events are just printed to a terminal, but the long term plan is for these events to be forwarded as logs to OpenTelemetry similar to how bpf-metric-exporter is implemented.

    Prerequisites:

    • Auditing must be enabled in the kernel.

    Installation:

    bpf-log-exporter can be installed using the installation script:

    cd bpfman/\nsudo ./scripts/setup.sh install\n

    Run:

    bpf-log-exporter needs root privileges to run. To see the logs, run with at least info level logs enabled.

    $ sudo RUST_LOG=info bpf-log-exporter\n[INFO  bpf_log_exporter] AUDIT_BPF: LogMessage { timestamp: \"1727301213.084\", prog_id: 326, op: \"LOAD\", syscall_op: 0, pid: 0, uid: 0, gid: 0, comm: \"\", cmdline: \"\" }\n[INFO  bpf_log_exporter] AUDIT_BPF: LogMessage { timestamp: \"1727301213.095\", prog_id: 327, op: \"LOAD\", syscall_op: 0, pid: 0, uid: 0, gid: 0, comm: \"\", cmdline: \"\" }\n[INFO  bpf_log_exporter] AUDIT_BPF: LogMessage { timestamp: \"1727301213.109\", prog_id: 326, op: \"UNLOAD\", syscall_op: 0, pid: 0, uid: 0, gid: 0, comm: \"\", cmdline: \"\" }\n[INFO  bpf_log_exporter] AUDIT_BPF: LogMessage { timestamp: \"1727301213.109\", prog_id: 327, op: \"UNLOAD\", syscall_op: 0, pid: 0, uid: 0, gid: 0, comm: \"\", cmdline: \"\" }\n[INFO  bpf_log_exporter] AUDIT_BPF: LogMessage { timestamp: \"1727301228.487\", prog_id: 328, op: \"LOAD\", syscall_op: 0, pid: 0, uid: 0, gid: 0, comm: \"\", cmdline: \"\" }\n[INFO  bpf_log_exporter] AUDIT_BPF: LogMessage { timestamp: \"1727301228.488\", prog_id: 328, op: \"UNLOAD\", syscall_op: 0, pid: 0, uid: 0, gid: 0, comm: \"\", cmdline: \"\" }\n[INFO  bpf_log_exporter] AUDIT_BPF: LogMessage { timestamp: \"1727301228.488\", prog_id: 329, op: \"LOAD\", syscall_op: 0, pid: 0, uid: 0, gid: 0, comm: \"\", cmdline: \"\" }\n[INFO  bpf_log_exporter] AUDIT_BPF: LogMessage { timestamp: \"1727301228.488\", prog_id: 329, op: \"UNLOAD\", syscall_op: 0, pid: 0, uid: 0, gid: 0, comm: \"\", cmdline: \"\" }\n:\n

    Then use <CTRL>C to stop.

    "},{"location":"developer-guide/release/","title":"Release Process","text":"

    This document describes the process for making a release for the bpfman project.

    "},{"location":"developer-guide/release/#overview","title":"Overview","text":"

    The bpfman project includes both the bpfman and bpfman-operator repositories. When a release is made for the project, a release is created for each repository with the same version number.

    Each bpfman project release is comprised of the following major components:

    • bpfman (Core library) and bpfman-api (Core GRPC API protobuf definitions) library crates
    • bpfman (CLI), and bpfman-rpc (gRPC server) binary crates
    • bpf-metrics-exporter and bpf-log-exporter binary crates
    • bpfman RPMs stored in the bpfman COPR repository.
    • Kubernetes User Facing Custom Resource Definitions (CRDs)
      • BpfApplication
      • FentryProgram
      • FexitProgram
      • KprobeProgram
      • TcProgram
      • TcxProgram
      • TracepointProgram
      • UprobeProgram
      • XdpProgram
    • Corresponding go pkgs in the form of github.com/bpfman/bpfman which includes the following:
      • github.com/bpfman/bpfman/clients/gobpfman/v1: The go client for the bpfman GRPC API API helpers.
    • Corresponding go pkgs in the form of github.com/bpfman/bpfman-operator which includes the following:
      • github.com/bpfman/bpfman-operator/apis: The go bindings for the bpfman CRD API
      • github.com/bpfman/bpfman-operator/pkg/client: The autogenerated clientset for the bpfman CRD API
      • github.com/bpfman/bpfman-operator/pkg/helpers: The provided bpfman CRD API helpers.
    • The following core component container images with tag <RELEASE_VERSION>:
      • quay.io/bpfman/bpfman-agent
      • quay.io/bpfman/bpfman-operator-bundle
      • quay.io/bpfman/bpfman-operator
      • quay.io/bpfman/bpfman
      • quay.io/bpfman/tc-dispatcher
      • quay.io/bpfman/xdp-dispatcher
    • The relevant example bytecode container images with tag <RELEASE_VERSION> from source code located in the bpfman project:
      • quay.io/bpfman-bytecode/fentry
      • quay.io/bpfman-bytecode/fexit
      • quay.io/bpfman-bytecode/go-app-counter
      • quay.io/bpfman-bytecode/go-kprobe-counter
      • quay.io/bpfman-bytecode/go-tc-counter
      • quay.io/bpfman-bytecode/go-tracepoint-counter
      • quay.io/bpfman-bytecode/go-uprobe-counter
      • quay.io/bpfman-bytecode/go-xdp-counter
      • quay.io/bpfman-bytecode/kprobe
      • quay.io/bpfman-bytecode/kretprobe
      • quay.io/bpfman-bytecode/tc-pass
      • quay.io/bpfman-bytecode/tcx-test
      • quay.io/bpfman-bytecode/tracepoint
      • quay.io/bpfman-bytecode/uprobe
      • quay.io/bpfman-bytecode/uretprobe
      • quay.io/bpfman-bytecode/xdp-pass-private
      • quay.io/bpfman-bytecode/xdp-pass
    • The relevant example userspace container images with tag <RELEASE_VERSION> from source code located in the bpfman project:
      • quay.io/bpfman-userspace/go-app-counter
      • quay.io/bpfman-userspace/go-kprobe-counter
      • quay.io/bpfman-userspace/go-target
      • quay.io/bpfman-userspace/go-tc-counter
      • quay.io/bpfman-userspace/go-tcx-counter
      • quay.io/bpfman-userspace/go-tracepoint-counter
      • quay.io/bpfman-userspace/go-uprobe-counter
      • quay.io/bpfman-userspace/go-xdp-counter
    • The OLM (Operator Lifecycle Manager) for the Kubernetes Operator.
      • This includes a bundle directory on disk as well as the quay.io/bpfman/bpfman-operator-bundle image with the tag <RELEASE_VERSION>.
    "},{"location":"developer-guide/release/#versioning-strategy","title":"Versioning strategy","text":""},{"location":"developer-guide/release/#release-version-number","title":"Release Version Number","text":"

    bpfman uses the MAJOR.MINOR.PATCH scheme defined by SemVer for version numbers in which the components are defined as follows:

    • MAJOR: Incremented for incompatible API changes.
    • MINOR: Incremented for adding functionality in a backward-compatible manner.
    • PATCH: Incremented for backward-compatible bug fixes.

    Major version zero (0.y.z) is for initial development. If the MAJOR version is 0, anything MAY change at any time, and the public API SHOULD NOT be considered stable.

    Releases are tagged in git with the version number prefixed by \"v\". For example, release version 0.5.2 is tagged as v0.5.2.

    "},{"location":"developer-guide/release/#kubernetes-api-versions-eg-v1alpha2-v1beta1","title":"Kubernetes API Versions (e.g. v1alpha2, v1beta1)","text":"

    Within the bpfman-operator, API versions are primarily used to indicate the stability of a resource. For example, if a resource has not yet graduated to beta, it is still possible that it could either be removed from the API or changed in backward incompatible ways. For more information on API versions, refer to the Kubernetes API versioning documentation.

    "},{"location":"developer-guide/release/#releasing-a-new-version","title":"Releasing a new version","text":""},{"location":"developer-guide/release/#release-process-overview","title":"Release Process Overview","text":"

    Since bpfman and bpfman-operator are maintained in separate repositories, each requires an independent release. However, to ensure version consistency, we plan to synchronize the release versions of both projects. Therefore, whenever a release is needed for either bpfman or bpfman-operator, both repositories will be released with the same version number.

    As bpfman-operator depends on bpfman, it is essential to release bpfman first, followed by bpfman-operator.

    Whenever possible, releases are made on the main branch of each repository and should follow the Standard Release from Main Branch process. However, it is sometimes necessary to \"patch\" a previous release with some but not all of the changes that exist on the main branch. In those cases, a patch branch is created from the tag of the release being patched and the release is done on that branch as described in the Patch Branch Release section. Finally, if it is necessary to test the release automation, the simplified process described in the Release Candidate Release section can be used.

    "},{"location":"developer-guide/release/#generating-release-notes","title":"Generating Release Notes","text":"

    The release notes are contained in CHANGELOG files stored in the changelogs directory of each repository. The change log name must contain the release version (e.g., CHANGELOG-v0.5.2.md).

    To simplify the generation of the release notes details, we are using the GitHub release page as described below. Note that we only use the release page to generate a starting point for the release notes, and don't actually create a tag or do a release from it.

    1. Go to the bpfman releases page.
    2. Push the \"Draft a new release\" button.
    3. Enter the new release number in the \"Choose a tag\" pull-down.
    4. Choose the most recent release in the \"Previous tag\" pull-down.
    5. Push the \"Generate release notes\" button.

    The automatically generated output will likely need to be reorganized and cleaned up a bit, but it provides a good starting point.

    The format for the CHANGELOG file is as follows:

    1. Summary of the major changes and highlights. For example: \"The v0.5.2 release is a patch release that introduced...\"
    2. What's Changed (minor changes may be removed from the list generated by GitHub)
    3. Full Changelog
    4. New Contributors
    5. Known Issues

    Notes on generating the changelog

    • Empty sections should be omitted.
    • Sections 2-3 may be copied and pasted from the text generated with the GitHub releases page process described above.
    • The CHANGELOG for a given release is used by GitHub to generate the initial content for that release on the bpfman releases page. However, after the release has been generated, updates to the CHANGELOG file are not automatically reflected on the GitHub releases page, so the GitHub releases page must be manually edited using the GitHub GUI.
    • Unlike most markdown, the generated output on the GitHub releases page renders each newline in the CHANGELOG file. So each paragraph should be on a single line, or it will not flow as intended.
    "},{"location":"developer-guide/release/#standard-release-from-main-branch","title":"Standard Release from Main Branch","text":"

    This section describes the standard release process used when making a release from the main branch and may be used for major, minor, or patch releases. As mentioned above, we first complete the release for bpfman and then follow that up with a release for bpfman-operator.

    "},{"location":"developer-guide/release/#bpfman-release","title":"bpfman Release","text":"
    • Create a new branch in your bpfman fork, for example <githubuser>/release-x.y.z, and use the new branch in the upcoming steps.
    • Make the following changes
      • Add a new changelog for the release using the process described in Generating Release Notes.
      • Update the Cargo.toml version for the workspace:
        • version = \"x.y.z\"
        • bpfman = { version = \"x.y.z\", path = \"./bpfman\" }\"
        • bpfman-api = { version = \"x.y.z\", path = \"./bpfman-api\" }
        • Note: bpfman-csi does not need to be updated.
      • Run cargo generate-lockfile
      • Update the bpfman version in the bpfman/examples/Makefile:
        • VERSION ?= x.y.z
      • Add a new bpfman/examples/config/v0.x.y/ and bpfman/examples/config/v0.x.y-selinux/ config directory for the release version by copying the latest release directory and running a search for the current release and replace with the new release.
      • Add new example config directories for any new examples added since the last release.
      • Update dispatcher tags.
        • Modify the tag for XDP_DISPATCHER_IMAGE and TC_DISPATCHER_IMAGE in bpfman/src/lib.rs from latest to the new release tag.
        • Manually add the new release tag to the latest version of the following dispatcher images:
          • https://quay.io/repository/bpfman/xdp-dispatcher
          • https://quay.io/repository/bpfman/tc-dispatcher
      • Search the code and docs for the current version number without the \"v\" (e.g., 0.5.1) and replace it with the new version number where it makes sense. (Be careful, though, because not all should be replaced.)
    • Commit the changes, push them to your repo, and open a PR against the bpfman repo.
    • After the PR is reviewed, merged, and all GitHub actions have completed successfully, tag the release with the version number (e.g., v0.5.2).
      • Tag the release using the commit on main where the changelog update merged.
      • A maintainer or someone with write permission on the repo must create the tag.
      • This can be done using the git CLI or Github's release page.
    • The Release will be automatically created by GitHub actions when the tag is applied.

    After these steps are completed, the following should occur: - All GitHub actions should complete successfully. - The release appears on the GitHub Releases Page. - Images are built and updated with the new version tag at: - quay.io/bpfman - quay.io/bpfman-bytecode - quay.io/bpfman-userspace - The new version appears at crates.io - New RPMs are built and pushed to the bpfman COPR repository.

    After the release is complete do the following:

    • Run make build-release-yamls from the bpfman/examples directory, and then add the yaml files generated to the release as assets from the GitHub release page.
      • The yaml files generated include:
        • bpfman-crds-install.yaml
        • bpfman-operator-install.yaml
        • go-app-counter-install-selinux.yaml
        • go-app-counter-install.yaml
        • go-kprobe-counter-install-selinux.yaml
        • go-kprobe-counter-install.yaml
        • go-tc-counter-install-selinux.yaml
        • go-tc-counter-install.yaml
        • go-tcx-counter-install-selinux.yaml
        • go-tcx-counter-install.yaml
        • go-tracepoint-counter-install-selinux.yaml
        • go-tracepoint-counter-install.yaml
        • go-uprobe-counter-install-selinux.yaml
        • go-uprobe-counter-install.yaml
        • go-uretprobe-counter-install-selinux.yaml
        • go-uretprobe-counter-install.yaml
        • go-xdp-counter-install-selinux.yaml
        • go-xdp-counter-install.yaml
    • Do another PR that changes the tag for XDP_DISPATCHER_IMAGE and TC_DISPATCHER_IMAGE in bpfman/src/lib.rs back to latest.
    "},{"location":"developer-guide/release/#bpfman-operator-release","title":"bpfman-operator Release","text":"
    • Create a new branch in your bpfman-operator fork, for example <githubuser>/release-x.y.z, and use the new branch in the upcoming steps.
    • Make the following changes
      • Add a new changelog for the release using the process described in Generating Release Notes.
      • Update the bpfman version in go.mod
      • Run the following commands from the bpfman-operator directory:
        go mod vendor\ngo mod tidy\n
      • Update the bpfman-operator version in the Makefile:
        • VERSION ?= x.y.z
      • Run make bundle from the bpfman-operator directory to update the bundle version.
      • Update the version in the links in README.md
      • Update the version in the OpenShift Containerfiles.
    • Commit the changes, push them to your repo, and open a PR against the bpfman-operator repo.
    • After the PR is reviewed, merged, and all GitHub actions have completed successfully, tag the release with the version number (e.g., v0.5.4).
      • Tag the release using the commit on main where the changelog update merged.
      • A maintainer or someone with write permission on the repo must create the tag.
      • This can be done using the git CLI or Github's release page.
    • The Release will be automatically created by GitHub actions when the tag is applied.

    After these steps are completed, the following should occur:

    • All GitHub actions should complete successfully.
    • The release appears on the GitHub Releases Page.
    • Images are built and updated with the new version tag at:
      • quay.io/bpfman/bpfman-operator
      • quay.io/bpfman/bpfman-agent

    After the release completes:

    • Update the community-operator and community-operators-prod repositories with the latest bundle manifests.
      • Run IMAGE_TAG=vx.y.z make bundle from bpfman-operator.
      • Manually update the following tags in bundle/manifests/bpfman-operator.clusterserviceversion.yaml (TODO: automate this step).
        • Change :latest to :vx.y.z on for the example image URLs.
        • Change \"containerImage: quay.io/bpfman/bpfman-operator:latest\" to \"containerImage: quay.io/bpfman/bpfman-operator:vx.y.z\".
      • Open a PR in each of the community operator repos with the following:
        • Copy bpfman-operator/{manifests, metadata, tests} to the new release directory. Copy bpfman-operator/Containerfile.bundle.openshift to the new release directory.
        • Create a new release directory under operator/bpfman-operator/ in each repo named x.y.z
      • Lessons learned about updating the community operators:
        • These PRs usually auto-merge as soon as all checks pass, and once a bundle for a release is merged, it cannot be modified. If any errors are found in the bundle files after merging, the only solution is to create a new release and open a new PR in each community operator repository.
        • If you start a PR in the community-operator repository as a draft and later mark it as ready for review, it will still auto-merge. However, this auto-merge behavior doesn\u2019t apply in the community-operators-prod repository, where a maintainer must manually merge the PR if you start it as draft.
        • To streamline the process, it\u2019s recommended that you begin with a draft PR in the community-operator repository to allow for review. Once the PR is reviewed and all checks pass, mark it as ready for review. After it merges, submit a PR with the same bundle to the community-operators-prod repository.
    "},{"location":"developer-guide/release/#patch-branch-release","title":"Patch Branch Release","text":"

    The patch branch release process is essentially the same as that for the standard release with the following exceptions.

    Do the following for each repo:

    • If this is the first patch release for a given release, someone with write permissions on the repo (e.g., one of the maintainers) must create a branch from the git tag of the release you want to patch.
      • If patching vx.y.z, the patch branch should be named release-vx.y.z-patch.
    • Create a branch for your changes from the upstream branch.
    • Cherry pick the relevant commits.
    • Do other fixups if necessary.

    Then, follow the steps from Standard Release from Main Branch section, except open your PRs against the release branch.

    "},{"location":"developer-guide/release/#release-candidate-release","title":"Release Candidate Release","text":"

    Often times cutting a release candidate is a great way to test any changes to our release infrastructure before cutting an official release. Make sure release candidate versions contain an rc suffix (e.g., 0.4.0-rc1). This is a lighter-weight process meaning many of the versioned manifests do not necessarily need to be created.

    As in the other releases, first complete the release for bpfman and then follow that up with a release for bpfman-operator.

    "},{"location":"developer-guide/release/#bpfman-release_1","title":"bpfman Release","text":"
    • Create a new branch in your bpfman fork based on the upstream patch branch named, for example <githubuser>/release-x.y.z-rc1, and use the new branch in the upcoming steps.
    • Make the following changes
      • Add a new changelog for the release. A full set of release notes is not required. A single line that says something like \"Pre-release 1 for v0.5.2\" is sufficient.
      • Update the Cargo.toml version for the workspace:
        • version = \"x.y.z-rc1\"
        • bpfman = { version = \"x.y.z-rc1\", path = \"./bpfman\" }\"
        • bpfman-api = { version = \"x.y.z-rc1\", path = \"./bpfman-api\" }
        • Note: bpfman-csi does not need to be updated.
      • Run cargo generate-lockfile
    • Commit the changes, push them to your repo, and open a PR against the bpfman repo.
    • After the PR is reviewed, merged, and all GitHub actions have completed successfully, tag the release with the version number (e.g., vx.y.z-rc1).
      • Tag the release using the commit on main where the changelog update merged.
      • A maintainer or someone with write permission on the repo must create the tag.
      • This can be done using the git CLI or Github's release page.
    • The Release will be automatically created by GitHub actions when the tag is applied.

    After these steps are completed, the following should occur:

    • All GitHub actions should complete successfully.
    • The release appears on the GitHub Releases Page.
    • Images are built and updated with the new version tag at:
      • quay.io/bpfman
      • quay.io/bpfman-bytecode
      • quay.io/bpfman-userspace
    • The new version appears at crates.io
    • A new RPM is built and pushed to the bpfman COPR repository.
    "},{"location":"developer-guide/release/#bpfman-operator-release_1","title":"bpfman-operator Release","text":"
    • Create a new branch in your bpfman fork based on the upstream patch branch named, for example <githubuser>/release-x.y.z-rc1, and use the new branch in the upcoming steps.
    • Make the following changes
      • Add a new changelog for the release. A full set of release notes is not required. A single line that says something like \"Pre-release 1 for v0.5.2\" is sufficient.
      • Update the bpfman-operator version in the Makefile:
        • VERSION ?= x.y.z-rc1
    • Commit the changes, push them to your repo, and open a PR against the bpfman-operator repo.
    • After the PR is reviewed, merged, and all GitHub actions have completed successfully, tag the release with the version number (e.g., vx.y.z-rc1).
      • Tag the release using the commit on main where the changelog update merged.
      • A maintainer or someone with write permission on the repo must create the tag.
      • This can be done using the git CLI or Github's release page.
    • The Release will be automatically created by GitHub actions when the tag is applied.

    After these steps are completed, the following should occur:

    • All GitHub actions should complete successfully.
    • The release appears on the GitHub Releases Page.
    • Images are built and updated with the new version tag at:
      • quay.io/bpfman/bpfman-operator
      • quay.io/bpfman/bpfman-agent
    "},{"location":"developer-guide/shipping-bytecode/","title":"eBPF Bytecode Image Specifications","text":""},{"location":"developer-guide/shipping-bytecode/#introduction","title":"Introduction","text":"

    The eBPF Bytecode Image specification defines how to package eBPF bytecode as container images. The initial primary use case focuses on the containerization and deployment of eBPF programs within container orchestration systems such as Kubernetes, where it is necessary to provide a portable way to distribute bytecode to all nodes which need it.

    "},{"location":"developer-guide/shipping-bytecode/#specifications","title":"Specifications","text":"

    We provide two distinct spec variants here to ensure interoperability with existing registries and packages which do not support the new custom media types defined here.

    • custom-data-type-spec
    • backwards-compatable-spec
    "},{"location":"developer-guide/shipping-bytecode/#backwards-compatible-oci-compliant-spec","title":"Backwards compatible OCI compliant spec","text":"

    This variant makes use of existing OCI conventions to represent eBPF Bytecode as container images.

    "},{"location":"developer-guide/shipping-bytecode/#image-layers","title":"Image Layers","text":"

    The container images following this variant must contain exactly one layer who's media type is one of the following:

    • application/vnd.oci.image.layer.v1.tar+gzip or the compliant application/vnd.docker.image.rootfs.diff.tar.gzip

    Additionally the image layer must contain a valid eBPF object file (generally containing a .o extension) placed at the root of the layer ./.

    "},{"location":"developer-guide/shipping-bytecode/#image-labels","title":"Image Labels","text":"

    To provide relevant metadata regarding the bytecode to any consumers, some relevant labels MUST be defined on the image.

    These labels are dynamic and defined as follows:

    • io.ebpf.programs: A label which defines the eBPF programs stored in the bytecode image. The value of the label is a list which must contain a valid JSON object with Key's specifying the program name, and values specifying the program type i.e: \"{ \"pass\" : \"xdp\" , \"counter\" : \"tc\", ...}\".

    • io.ebpf.maps: A label which defines the eBPF maps stored in the bytecode image. The value of the label is a list which must contain a valid JSON object with Key's specifying the map name, and values specifying the map type i.e: \"{ \"xdp_stats_map\" : \"per_cpu_array\", ...}\".

    "},{"location":"developer-guide/shipping-bytecode/#building-a-backwards-compatible-oci-compliant-image","title":"Building a Backwards compatible OCI compliant image","text":"

    Bpfman does not provide wrappers around compilers like clang since many eBPF libraries (i.e aya, libbpf, cilium-ebpf) already do so, meaning users are expected to pass in the correct ebpf program bytecode for the appropriate platform. However, bpfman does provide a few image builder commands to make this whole process easier.

    Example Containerfiles for single-arch and multi-arch can be found at Containerfile.bytecode and Containerfile.bytecode.multi.arch.

    "},{"location":"developer-guide/shipping-bytecode/#host-platform-architecture-image-build","title":"Host Platform Architecture Image Build","text":"
    bpfman image build -b ./examples/go-xdp-counter/bpf_x86_bpfel.o -f Containerfile.bytecode --tag quay.io/<USER>/go-xdp-counter\n

    Where ./examples/go-xdp-counter/bpf_x86_bpfel.o is the path to the bytecode object file.

    Users can also use skopeo to ensure the image follows the backwards compatible version of the spec:

    • skopeo inspect will show the correctly configured labels stored in the configuration layer (application/vnd.oci.image.config.v1+json) of the image.
    skopeo inspect docker://quay.io/bpfman-bytecode/go-xdp-counter\n{\n    \"Name\": \"quay.io/bpfman-bytecode/go-xdp-counter\",\n    \"Digest\": \"sha256:e8377e94c56272937689af88a1a6231d4d594f83218b5cda839eaeeea70a30d3\",\n    \"RepoTags\": [\n        \"latest\"\n    ],\n    \"Created\": \"2024-05-30T09:17:15.327378016-04:00\",\n    \"DockerVersion\": \"\",\n    \"Labels\": {\n        \"io.ebpf.maps\": \"{\\\"xdp_stats_map\\\":\\\"per_cpu_array\\\"}\",\n        \"io.ebpf.programs\": \"{\\\"xdp_stats\\\":\\\"xdp\\\"}\"\n    },\n    \"Architecture\": \"amd64\",\n    \"Os\": \"linux\",\n    \"Layers\": [\n        \"sha256:c0d921d3f0d077da7cdfba8c0240fb513789e7698cdf326f80f30f388c084cff\"\n    ],\n    \"LayersData\": [\n        {\n            \"MIMEType\": \"application/vnd.docker.image.rootfs.diff.tar.gzip\",\n            \"Digest\": \"sha256:c0d921d3f0d077da7cdfba8c0240fb513789e7698cdf326f80f30f388c084cff\",\n            \"Size\": 2656,\n            \"Annotations\": null\n        }\n    ],\n    \"Env\": [\n        \"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"\n    ]\n}\n
    "},{"location":"developer-guide/shipping-bytecode/#multi-architecture-image-build","title":"Multi-Architecture Image build","text":"
    bpfman image build -t quay.io/bpfman-bytecode/go-xdp-counter-multi --container-file ./Containerfile.bytecode.multi.arch --bc-amd64-el ./examples/go-xdp-counter/bpf_arm64_bpfel.o --bc-s390x-eb ./examples/go-xdp-counter/bpf_s390_bpfeb.o\n

    To better understand the available architectures users can use podman manifest-inspect

    podman manifest inspect quay.io/bpfman-bytecode/go-xdp-counter:test-manual-build\n{\n    \"schemaVersion\": 2,\n    \"mediaType\": \"application/vnd.docker.distribution.manifest.list.v2+json\",\n    \"manifests\": [\n        {\n            \"mediaType\": \"application/vnd.oci.image.manifest.v1+json\",\n            \"size\": 478,\n            \"digest\": \"sha256:aed62d2e5867663fac66822422512a722003b40453325fd873bbb5840d78cba9\",\n            \"platform\": {\n                \"architecture\": \"amd64\",\n                \"os\": \"linux\"\n            }\n        },\n        {\n            \"mediaType\": \"application/vnd.oci.image.manifest.v1+json\",\n            \"size\": 478,\n            \"digest\": \"sha256:a348fe2f26dc0851518d8d82e1049d2c39cc2e4f37419fe9231c1967abc4828c\",\n            \"platform\": {\n                \"architecture\": \"arm64\",\n                \"os\": \"linux\"\n            }\n        },\n        {\n            \"mediaType\": \"application/vnd.oci.image.manifest.v1+json\",\n            \"size\": 478,\n            \"digest\": \"sha256:d5c5d41d2d21e0cb5fb79fe9f343e540942c9a1657cf0de96b8f63e43d369743\",\n            \"platform\": {\n                \"architecture\": \"ppc64le\",\n                \"os\": \"linux\"\n            }\n        },\n        {\n            \"mediaType\": \"application/vnd.oci.image.manifest.v1+json\",\n            \"size\": 478,\n            \"digest\": \"sha256:7915c83838d73268690381b313fb84b5509912aa351c98c78204584cced50efd\",\n            \"platform\": {\n                \"architecture\": \"s390x\",\n                \"os\": \"linux\"\n            }\n        },\n    ]\n}\n
    "},{"location":"developer-guide/shipping-bytecode/#custom-oci-compatible-spec","title":"Custom OCI compatible spec","text":"

    This variant of the eBPF bytecode image spec uses custom OCI medium types to represent eBPF bytecode as container images. Many toolchains and registries may not support this yet.

    TODO https://github.com/bpfman/bpfman/issues/1162

    "},{"location":"developer-guide/testing/","title":"Testing","text":"

    This document describes the automated testing that is done for each pull request submitted to bpfman, and also provides instructions for running them locally when doing development.

    "},{"location":"developer-guide/testing/#unit-testing","title":"Unit Testing","text":"

    Unit testing is executed as part of the build job by running the following command in the top-level bpfman directory.

    cd bpfman/\ncargo test\n
    "},{"location":"developer-guide/testing/#go-example-tests","title":"Go Example Tests","text":"

    Tests are run for each of the example programs found in directory examples

    Detailed description TBD

    "},{"location":"developer-guide/testing/#basic-integration-tests","title":"Basic Integration Tests","text":"

    The full set of basic integration tests are executed by running the following command in the top-level bpfman directory.

    cd bpfman/\ncargo xtask integration-test\n

    Optionally, a subset of the integration tests can be run by adding the \"--\" and a list of one or more names at the end of the command as shown below.

    cargo xtask integration-test -- test_load_unload_xdp test_proceed_on_xdp\n

    The integration tests start a bpfman daemon process, and issue CLI commands to verify a range of functionality. For XDP and TC programs that are installed on network interfaces, the integration test code creates a test network namespace connected to the host by a veth pair on which the programs are attached. The test code uses the IP subnet 172.37.37.1/24 for the namespace. If that address conflicts with an existing network on the host, it can be changed by setting the BPFMAN_IP_PREFIX environment variable to one that is available as shown below.

    export BPFMAN_IP_PREFIX=\"192.168.50\"\n

    If bpfman logs are needed to help debug an integration test, set RUST_LOG either globally or for a given test.

    export RUST_LOG=info\n
    OR
    RUST_LOG=info cargo xtask integration-test -- test_load_unload_xdp test_proceed_on_xdp\n

    There are two categories of integration tests: basic and e2e. The basic tests verify basic CLI functionality such as loading, listing, and unloading programs. The e2e tests verify more advanced functionality such as the setting of global variables, priority, and proceed-on by installing the programs, creating traffic if needed, and examining logs to confirm that things are running as expected.

    Most eBPF test programs are loaded from container images stored on quay.io. The source code for the eBPF test programs can be found in the tests/integration-test/bpf directory. These programs are compiled by executing cargo xtask build-ebpf --libbpf-dir <libbpf dir>

    We also load some tests from local files to test the bpfman load file option.

    "},{"location":"developer-guide/testing/#kubernetes-operator-tests","title":"Kubernetes Operator Tests","text":""},{"location":"developer-guide/testing/#kubernetes-operator-unit-tests","title":"Kubernetes Operator Unit Tests","text":"

    To run all of the unit tests defined in the bpfman-operator controller code run make test in the bpfman-operator directory.

    cd bpfman-operator/\nmake test\n
    "},{"location":"developer-guide/testing/#kubernetes-operator-integration-tests","title":"Kubernetes Operator Integration Tests","text":"

    To run the Kubernetes Operator integration tests locally:

    1. Build the example test code userspace images locally.

      cd bpfman/examples/\nmake build-us-images\n
    2. (optional) build the bytecode images

      In order to rebuild all of the bytecode images for a PR, ask a maintainer to do so, they will be built and generate by github actions with the tag quay.io/bpfman-bytecode/<example>:<branch-name>

    3. Build the bpfman images locally with a unique tag, for example: int-test

      cd bpfman-operator/\nBPFMAN_AGENT_IMG=quay.io/bpfman/bpfman-agent:int-test BPFMAN_OPERATOR_IMG=quay.io/bpfman/bpfman-operator:int-test make build-images\n
    4. Run the integration test suite with the images from the previous step:

      cd bpfman-operator/\nBPFMAN_AGENT_IMG=quay.io/bpfman/bpfman-agent:int-test BPFMAN_OPERATOR_IMG=quay.io/bpfman/bpfman-operator:int-test make test-integration\n

      If an update bpfman image is required, build it separately and pass to make test-integration using BPFMAN_IMG. See Locally Build bpfman Container Image.

      Additionally the integration test can be configured with the following environment variables:

      • KEEP_TEST_CLUSTER: If set to true the test cluster will not be torn down after the integration test suite completes.
      • USE_EXISTING_KIND_CLUSTER: If this is set to the name of the existing kind cluster the integration test suite will use that cluster instead of creating a new one.
    "},{"location":"developer-guide/xdp-overview/","title":"XDP Tutorial","text":"

    The XDP hook point is unique in that the associated eBPF program attaches to an interface and only one eBPF program is allowed to attach to the XDP hook point for a given interface. Due to this limitation, the libxdp protocol was written. The one program that is attached to the XDP hook point is an eBPF dispatcher program. The dispatcher program contains a list of 10 stub functions. When XDP programs wish to be loaded, they are loaded as extension programs which are then called in place of one of the stub functions.

    bpfman is leveraging the libxdp protocol to allow it's users to load up to 10 XDP programs on a given interface. This tutorial will show you how to use bpfman to load multiple XDP programs on an interface.

    Note

    The TC hook point is also associated with an interface. Within bpfman, TC is implemented in a similar fashion to XDP in that it uses a dispatcher with stub functions. TCX is a fairly new kernel feature that improves how the kernel handles multiple TC programs on a given interface. bpfman is on the process of integrating TCX support, which will replace the dispatcher logic for TC. Until then, assume TC behaves in a similar fashion to XDP.

    See Launching bpfman for more detailed instructions on building and loading bpfman. This tutorial assumes bpfman has been built and the bpfman CLI is in $PATH.

    "},{"location":"developer-guide/xdp-overview/#load-xdp-program","title":"Load XDP program","text":"

    We will load the simple xdp-pass program, which permits all traffic to the attached interface, eno3 in this example. We will use the priority of 100. Find a deeper dive into CLI syntax in CLI Guide.

    sudo bpfman load image --image-url quay.io/bpfman-bytecode/xdp_pass:latest --name pass \\\n  xdp --iface eno3 --priority 100\n Bpfman State\n---------------\n Name:          pass\n Image URL:     quay.io/bpfman-bytecode/xdp_pass:latest\n Pull Policy:   IfNotPresent\n Global:        None\n Metadata:      None\n Map Pin Path:  /run/bpfman/fs/maps/6213\n Map Owner ID:  None\n Map Used By:   6213\n Priority:      100\n Iface:         eno3\n Position:      0\n Proceed On:    pass, dispatcher_return\n\n Kernel State\n----------------------------------\n Program ID:                       6213\n Name:                             pass\n Type:                             xdp\n Loaded At:                        2023-07-17T17:48:10-0400\n Tag:                              4b9d1b2c140e87ce\n GPL Compatible:                   true\n Map IDs:                          [2724]\n BTF ID:                           2834\n Size Translated (bytes):          96\n JITed:                            true\n Size JITed (bytes):               67\n Kernel Allocated Memory (bytes):  4096\n Verified Instruction Count:       9\n

    bpfman load image returns the same data as a bpfman get command. From the output, the Program Id of 6213 can be found in the Kernel State section. This id can be used to perform a bpfman get to retrieve all relevant program data and a bpfman unload when the program needs to be unloaded.

    sudo bpfman list\n Program ID  Name  Type  Load Time\n 6213        pass  xdp   2023-07-17T17:48:10-0400\n

    We can recheck the details about the loaded program with the bpfman get command:

    sudo bpfman get 6213\n Bpfman State\n---------------\n Name:          pass\n Image URL:     quay.io/bpfman-bytecode/xdp_pass:latest\n Pull Policy:   IfNotPresent\n Global:        None\n Metadata:      None\n Map Pin Path:  /run/bpfman/fs/maps/6213\n Map Owner ID:  None\n Map Used By:   6213\n Priority:      100\n Iface:         eno3\n Position:      0\n Proceed On:    pass, dispatcher_return\n\n Kernel State\n----------------------------------\n Program ID:                       6213\n Name:                             pass\n Type:                             xdp\n Loaded At:                        2023-07-17T17:48:10-0400\n Tag:                              4b9d1b2c140e87ce\n GPL Compatible:                   true\n Map IDs:                          [2724]\n BTF ID:                           2834\n Size Translated (bytes):          96\n JITed:                            true\n Size JITed (bytes):               67\n Kernel Allocated Memory (bytes):  4096\n Verified Instruction Count:       9\n

    From the output above you can see the program was loaded to position 0 on our interface and thus will be executed first.

    "},{"location":"developer-guide/xdp-overview/#loading-additional-xdp-programs","title":"Loading Additional XDP Programs","text":"

    We will now load 2 more programs with different priorities to demonstrate how bpfman will ensure they are ordered correctly:

    sudo bpfman load image --image-url quay.io/bpfman-bytecode/xdp_pass:latest --name pass \\\n  xdp --iface eno3 --priority 50\n Bpfman State\n---------------\n Name:          pass\n Image URL:     quay.io/bpfman-bytecode/xdp_pass:latest\n Pull Policy:   IfNotPresent\n Global:        None\n Metadata:      None\n Map Pin Path:  /run/bpfman/fs/maps/6215\n Map Owner ID:  None\n Map Used By:   6215\n Priority:      50\n Iface:         eno3\n Position:      0\n Proceed On:    pass, dispatcher_return\n\n Kernel State\n----------------------------------\n Program ID:                       6215\n Name:                             pass\n Type:                             xdp\n:\n
    sudo bpfman load image --image-url quay.io/bpfman-bytecode/xdp_pass:latest --name pass \\\n  xdp --iface eno3 --priority 200\n Bpfman State\n---------------\n Name:          pass\n Image URL:     quay.io/bpfman-bytecode/xdp_pass:latest\n Pull Policy:   IfNotPresent\n Global:        None\n Metadata:      None\n Map Pin Path:  /run/bpfman/fs/maps/6217\n Map Owner ID:  None\n Map Used By:   6217\n Priority:      200\n Iface:         eno3\n Position:      2\n Proceed On:    pass, dispatcher_return\n\n Kernel State\n----------------------------------\n Program ID:                       6217\n Name:                             pass\n Type:                             xdp\n:\n

    Using bpfman list we can see all the programs that were loaded.

    sudo bpfman list\n Program ID  Name  Type  Load Time\n 6213        pass  xdp   2023-07-17T17:48:10-0400\n 6215        pass  xdp   2023-07-17T17:52:46-0400\n 6217        pass  xdp   2023-07-17T17:53:57-0400\n

    The lowest priority program is executed first, while the highest is executed last. As can be seen from the detailed output for each command below:

    • Program 6215 is at position 0 with a priority of 50
    • Program 6213 is at position 1 with a priority of 100
    • Program 6217 is at position 2 with a priority of 200
    sudo bpfman get 6213\n Bpfman State\n---------------\n Name:          pass\n:\n Priority:      100\n Iface:         eno3\n Position:      1\n Proceed On:    pass, dispatcher_return\n\n Kernel State\n----------------------------------\n Program ID:                       6213\n Name:                             pass\n Type:                             xdp\n:\n
    sudo bpfman get 6215\n Bpfman State\n---------------\n Name:          pass\n:\n Priority:      50\n Iface:         eno3\n Position:      0\n Proceed On:    pass, dispatcher_return\n\n Kernel State\n----------------------------------\n Program ID:                       6215\n Name:                             pass\n Type:                             xdp\n:\n
    sudo bpfman get 6217\n Bpfman State\n---------------\n Name:          pass\n:\n Priority:      200\n Iface:         eno3\n Position:      2\n Proceed On:    pass, dispatcher_return\n\n Kernel State\n----------------------------------\n Program ID:                       6217\n Name:                             pass\n Type:                             xdp\n:\n

    By default, the next program in the chain will only be executed if a given program returns pass (see proceed-on field in the bpfman get output above). If the next program in the chain should be called even if a different value is returned, then the program can be loaded with those additional return values using the proceed-on parameter (see bpfman load image xdp --help for list of valid values):

    sudo bpfman load image --image-url quay.io/bpfman-bytecode/xdp_pass:latest --name pass \\\n  xdp --iface eno3 --priority 150 --proceed-on \"pass\" --proceed-on \"dispatcher_return\"\n Bpfman State\n---------------\n Name:          pass\n Image URL:     quay.io/bpfman-bytecode/xdp_pass:latest\n Pull Policy:   IfNotPresent\n Global:        None\n Metadata:      None\n Map Pin Path:  /run/bpfman/fs/maps/6219\n Map Owner ID:  None\n Map Used By:   6219\n Priority:      150\n Iface:         eno3\n Position:      2\n Proceed On:    pass, dispatcher_return\n\n Kernel State\n----------------------------------\n Program ID:                       6219\n Name:                             pass\n Type:                             xdp\n:\n

    Which results in being loaded in position 2 because it was loaded at priority 150, which is lower than the previous program at that position with a priority of 200.

    "},{"location":"developer-guide/xdp-overview/#delete-xdp-program","title":"Delete XDP Program","text":"

    Let's remove the program at position 1.

    sudo bpfman list\n Program ID  Name  Type  Load Time\n 6213        pass  xdp   2023-07-17T17:48:10-0400\n 6215        pass  xdp   2023-07-17T17:52:46-0400\n 6217        pass  xdp   2023-07-17T17:53:57-0400\n 6219        pass  xdp   2023-07-17T17:59:41-0400\n
    sudo bpfman unload 6213\n

    And we can verify that it has been removed and the other programs re-ordered:

    sudo bpfman list\n Program ID  Name  Type  Load Time\n 6215        pass  xdp   2023-07-17T17:52:46-0400\n 6217        pass  xdp   2023-07-17T17:53:57-0400\n 6219        pass  xdp   2023-07-17T17:59:41-0400\n
    bpfman get 6215\n Bpfman State\n---------------\n Name:          pass\n Image URL:     quay.io/bpfman-bytecode/xdp_pass:latest\n Pull Policy:   IfNotPresent\n Global:        None\n Metadata:      None\n Map Pin Path:  /run/bpfman/fs/maps/6215\n Map Owner ID:  None\n Map Used By:   6215\n Priority:      50\n Iface:         eno3\n Position:      0\n Proceed On:    pass, dispatcher_return\n\n Kernel State\n----------------------------------\n Program ID:                       6215\n Name:                             pass\n Type:                             xdp\n:\n
    bpfman get 6217\n Bpfman State\n---------------\n Name:          pass\n Image URL:     quay.io/bpfman-bytecode/xdp_pass:latest\n Pull Policy:   IfNotPresent\n Global:        None\n Metadata:      None\n Map Pin Path:  /run/bpfman/fs/maps/6217\n Map Owner ID:  None\n Map Used By:   6217\n Priority:      200\n Iface:         eno3\n Position:      2\n Proceed On:    pass, dispatcher_return\n\n Kernel State\n----------------------------------\n Program ID:                       6217\n Name:                             pass\n Type:                             xdp\n:\n
    bpfman get 6219\n Bpfman State\n---------------\n Name:          pass\n Image URL:     quay.io/bpfman-bytecode/xdp_pass:latest\n Pull Policy:   IfNotPresent\n Global:        None\n Metadata:      None\n Map Pin Path:  /run/bpfman/fs/maps/6219\n Map Owner ID:  None\n Map Used By:   6219\n Priority:      150\n Iface:         eno3\n Position:      1\n Proceed On:    pass, dispatcher_return\n\n Kernel State\n----------------------------------\n Program ID:                       6219\n Name:                             pass\n Type:                             xdp\n:\n
    "},{"location":"getting-started/building-bpfman/","title":"Setup and Building bpfman","text":"

    This section describes how to build bpfman. If this is the first time building bpfman, the Development Environment Setup section describes all packages needed to build bpfman.

    There is also an option to run prebuilt images from a given release or from an RPM, as opposed to building locally. Jump to:

    • Run bpfman From Release Image for installing from a prebuilt fixed release.
    • Run bpfman From RPM for installing from a prebuilt RPM.
    "},{"location":"getting-started/building-bpfman/#kernel-versions","title":"Kernel Versions","text":"

    eBPF is still a relatively new technology that is being actively developed. To take advantage of this constantly evolving technology, it is best to use the newest kernel version possible. If bpfman needs to be run on an older kernel, this section describes some of the kernel features bpfman relies on to work and which kernel the feature was first introduced.

    Major kernel features leveraged by bpfman:

    • Program Extensions: Program Extensions allows bpfman to load multiple XDP or TC eBPF programs on an interface, which is not natively supported in the kernel. A dispatcher program is loaded as the one program on a given interface, and the user's XDP or TC programs are loaded as extensions to the dispatcher program. Introduced in Kernel 5.6.
    • Pinning: Pinning allows the eBPF program to remain loaded when the loading process (bpfman) is stopped or restarted. Introduced in Kernel 4.11.
    • BPF Perf Link: Support BPF perf link for tracing programs (Tracepoint, Uprobe and Kprobe) which enables pinning for these program types. Introduced in Kernel 5.15.
    • Relaxed CAP_BPF Requirement: Prior to Kernel 5.19, all eBPF system calls required CAP_BPF. This required userspace programs that wanted to access eBPF maps to have the CAP_BPF Linux capability. With the kernel 5.19 change, CAP_BPF is only required for load and unload requests.
    • TCX: TCX has performance improvements over TC and adds support in the kernel for multiple TCX programs to run on a given TC hook point. TCX support was added in Kernel 6.6.

    bpfman tested on older kernel versions:

    • Fedora 34: Kernel 5.17.6-100.fc34.x86_64
      • XDP, TC, Tracepoint, Uprobe and Kprobe programs all loaded with bpfman running on localhost and running as systemd service.
    • Fedora 33: Kernel 5.14.18-100.fc33.x86_64
      • XDP and TC programs loaded with bpfman running on localhost and running as systemd service once SELinux was disabled (see https://github.com/fedora-selinux/selinux-policy/pull/806).
      • Tracepoint, Uprobe and Kprobe programs failed to load because they require the BPF Perf Link support.
    • Fedora 32: Kernel 5.11.22-100.fc32.x86_64
      • XDP and TC programs loaded with bpfman running on localhost once SELinux was disabled (see https://github.com/fedora-selinux/selinux-policy/pull/806).
      • bpfman fails to run as a systemd service because of some capabilities issues in the bpfman.service file.
      • Tracepoint, Uprobe and Kprobe programs failed to load because they require the BPF Perf Link support.
    • Fedora 31: Kernel 5.8.18-100.fc31.x86_64
      • bpfman was able to start on localhost, but XDP and TC programs wouldn't load because BPF_LINK_CREATE call was updated in newer kernels.
      • bpfman fails to run as a systemd service because of some capabilities issues in the bpfman.service file.
    "},{"location":"getting-started/building-bpfman/#development-environment-setup","title":"Development Environment Setup","text":"

    To build bpfman, the following packages must be installed.

    "},{"location":"getting-started/building-bpfman/#install-rust-toolchain","title":"Install Rust Toolchain","text":"

    For further detailed instructions, see Rust Stable & Rust Nightly.

    curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh\nsource \"$HOME/.cargo/env\"\nrustup toolchain install nightly -c rustfmt,clippy,rust-src\n
    "},{"location":"getting-started/building-bpfman/#install-llvm","title":"Install LLVM","text":"

    LLVM 11 or later must be installed. Linux package managers should provide a recent enough release.

    dnf based OS:

    sudo dnf install llvm-devel clang-devel elfutils-libelf-devel\n

    apt based OS:

    sudo apt install clang lldb lld libelf-dev gcc-multilib\n
    "},{"location":"getting-started/building-bpfman/#install-ssl-library","title":"Install SSL Library","text":"

    dnf based OS:

    sudo dnf install openssl-devel\n

    apt based OS:

    sudo apt install libssl-dev\n
    "},{"location":"getting-started/building-bpfman/#install-bpf-helper-header-files","title":"Install bpf Helper Header Files","text":"

    apt based OS:

    sudo apt install libbpf-dev\n
    "},{"location":"getting-started/building-bpfman/#install-protobuf-compiler","title":"Install Protobuf Compiler","text":"

    If any of the Protobuf files need to be updated, then the protobuf-compiler will need to be installed. See RPC Protobuf Generation for bpfman use of protobufs and see protoc for more detailed installation instructions.

    dnf based OS:

    sudo dnf install protobuf-compiler\n

    apt based OS:

    sudo apt install protobuf-compiler\n
    "},{"location":"getting-started/building-bpfman/#install-go-protobuf-compiler-extensions","title":"Install GO protobuf Compiler Extensions","text":"

    See Quick Start Guide for gRPC in Go for installation instructions.

    "},{"location":"getting-started/building-bpfman/#local-libbpf","title":"Local libbpf","text":"

    Checkout a local copy of libbpf.

    git clone https://github.com/libbpf/libbpf --branch v0.8.0\n
    "},{"location":"getting-started/building-bpfman/#install-perl","title":"Install perl","text":"

    Install perl:

    dnf based OS:

    sudo dnf install perl\n

    apt based OS:

    sudo apt install perl\n
    "},{"location":"getting-started/building-bpfman/#install-docker-or-podman","title":"Install docker or podman","text":"

    To build the bpfman-agent and bpfman-operator using the provided Makefile and the make build-images command, docker or podman needs to be installed. There are several existing guides:

    • Fedora: https://developer.fedoraproject.org/tools/docker/docker-installation.html
    • Linux: https://docs.docker.com/engine/install/
    "},{"location":"getting-started/building-bpfman/#install-kind","title":"Install Kind","text":"

    Optionally, to test bpfman running in Kubernetes, the easiest method and the one documented throughout the bpfman documentation is to run a Kubernetes Kind cluster. See kind for documentation and installation instructions. kind also requires docker to be installed.

    Note

    By default, bpfman-operator deploys bpfman with CSI enabled. CSI requires Kubernetes v1.26 due to a PR (kubernetes/kubernetes#112597) that addresses a gRPC Protocol Error that was seen in the CSI client code and it doesn't appear to have been backported. kind v0.20.0 or later is recommended.

    If the following error is seen, it means there is an older version of Kubernetes running and it needs to be upgraded.

    kubectl get pods -A\nNAMESPACE   NAME                               READY   STATUS             RESTARTS      AGE\nbpfman      bpfman-daemon-2hnhx                2/3     CrashLoopBackOff   4 (38s ago)   2m20s\nbpfman      bpfman-operator-6b6cf97857-jbvv4   2/2     Running            0             2m22s\n:\n\nkubectl logs -n bpfman bpfman-daemon-2hnhx -c node-driver-registrar\n:\nE0202 15:33:12.342704       1 main.go:101] Received NotifyRegistrationStatus call: &RegistrationStatus{PluginRegistered:false,Error:RegisterPlugin error -- plugin registration failed with err: rpc error: code = Internal desc = stream terminated by RST_STREAM with error code: PROTOCOL_ERROR,}\nE0202 15:33:12.342723       1 main.go:103] Registration process failed with error: RegisterPlugin error -- plugin registration failed with err: rpc error: code = Internal desc = stream terminated by RST_STREAM with error code: PROTOCOL_ERROR, restarting registration container.\n
    "},{"location":"getting-started/building-bpfman/#install-bash-completion","title":"Install bash-completion","text":"

    bpfman uses the Rust crate clap for the CLI implementation. clap has an optional Rust crate clap_complete. For bash shell, it leverages bash-completion for CLI Command completion. So in order for CLI completion to work in a bash shell, bash-completion must be installed. This feature is optional.

    For the CLI completion to work after installation, /etc/profile.d/bash_completion.sh must be sourced in the running sessions. New login sessions should pick it up automatically.

    dnf based OS:

    sudo dnf install bash-completion\nsource /etc/profile.d/bash_completion.sh\n

    apt based OS:

    sudo apt install bash-completion\nsource /etc/profile.d/bash_completion.sh\n
    "},{"location":"getting-started/building-bpfman/#install-yaml-formatter","title":"Install Yaml Formatter","text":"

    As part of CI, the Yaml files are validated with a Yaml formatter. Optionally, to verify locally, install the YAML Language Support by Red Hat VsCode Extension, or to format in bulk, install prettier.

    To install prettier:

    npm install -g prettier\n

    Then to flag which files are violating the formatting guide, run:

    prettier -l \"*.yaml\"\n

    And to write changes in place, run:

     prettier -f \"*.yaml\"\n
    "},{"location":"getting-started/building-bpfman/#install-toml-formatter","title":"Install toml Formatter","text":"

    As part of CI, the toml files are validated with a toml formatter. Optionally, to verify locally, install taplo.

    cargo install taplo-cli\n

    And to verify locally:

    taplo fmt --check\n
    "},{"location":"getting-started/building-bpfman/#clone-the-bpfman-and-bpfman-operator-repositories","title":"Clone the bpfman and bpfman-operator Repositories","text":"

    You can build and run bpfman from anywhere. For simplicity throughout this documentation, all examples will reference bpfman/ and bpfman-operator/ to indicate which repository is being used. bpfman-operator only needs to be cloned if deploying in Kubernetes.

    mkdir -p $HOME/src/\ncd $HOME/src/\ngit clone https://github.com/bpfman/bpfman.git\ngit clone https://github.com/bpfman/bpfman-operator.git\n
    "},{"location":"getting-started/building-bpfman/#building-bpfman","title":"Building bpfman","text":"

    If you are building bpfman for the first time OR the eBPF code has changed:

    cd bpfman/\ncargo xtask build-ebpf --libbpf-dir /path/to/libbpf\n

    If protobuf files have changed (see RPC Protobuf Generation):

    cargo xtask build-proto\n

    To build bpfman:

    cargo build\n
    "},{"location":"getting-started/building-bpfman/#building-cli-tab-completion-files","title":"Building CLI TAB completion files","text":"

    Optionally, to build the CLI TAB completion files, run the following command:

    cd bpfman/\ncargo xtask build-completion\n

    Files are generated for different shells:

    ls .output/completions/\n_bpfman  bpfman.bash  bpfman.elv  bpfman.fish  _bpfman.ps1\n
    "},{"location":"getting-started/building-bpfman/#bash","title":"bash","text":"

    For bash, this generates a file that can be used by the linux bash-completion utility (see Install bash-completion for installation instructions).

    If the files are generated, they are installed automatically when using the install script (i.e. sudo ./scripts/setup.sh install - See Run as a systemd Service). To install the files manually, copy the file associated with a given shell to /usr/share/bash-completion/completions/. For example:

    sudo cp .output/completions/bpfman.bash /usr/share/bash-completion/completions/.\n\nbpfman g<TAB>\n
    "},{"location":"getting-started/building-bpfman/#other-shells","title":"Other shells","text":"

    Files are generated other shells (Elvish, Fish, PowerShell and zsh). For these shells, generated file must be manually installed.

    "},{"location":"getting-started/building-bpfman/#building-cli-manpages","title":"Building CLI Manpages","text":"

    Optionally, to build the CLI Manpage files, run the following command:

    cd bpfman/\ncargo xtask build-man-page\n

    If the files are generated, they are installed automatically when using the install script (i.e. sudo ./scripts/setup.sh install - See Run as a systemd Service). To install the files manually, copy the generated files to /usr/local/share/man/man1/. For example:

    sudo cp .output/manpage/bpfman*.1 /usr/local/share/man/man1/.\n

    Once installed, use man to view the pages.

    man bpfman list\n

    Note

    bpfman commands with subcommands (specifically bpfman load) have - in the manpage subcommand generation. So use man bpfman load-file, man bpfman load-image, man bpfman load-image-xdp, etc. to display the subcommand manpage files.

    "},{"location":"getting-started/building-bpfman/#building-bpfman-operator","title":"Building bpfman-operator","text":"

    Building and deploying bpfman-operator is covered in it's own section. See Deploying Example eBPF Programs On Kubernetes and Developing the bpfman-operator.

    "},{"location":"getting-started/cli-guide/","title":"CLI Guide","text":"

    bpfman offers several CLI commands to interact with the bpfman daemon. The CLI allows you to load, unload, get and list eBPF programs.

    "},{"location":"getting-started/cli-guide/#notes-for-this-guide","title":"Notes For This Guide","text":"

    As described in other sections, bpfman can be run as either a privileged process or a systemd service. If run as a privileged process, bpfman will most likely be run from your local development branch and will require sudo. Example:

    sudo ./target/debug/bpfman list\n

    If run as a systemd service, bpfman will most likely be installed in your $PATH, and will also require sudo. Example:

    sudo bpfman list\n

    The examples here use sudo bpfman in place of sudo ./target/debug/bpfman for readability, use as your system is deployed.

    eBPF object files used in the examples are taken from the examples and integration-test directories from the bpfman repository.

    "},{"location":"getting-started/cli-guide/#basic-syntax","title":"Basic Syntax","text":"

    Below are the commands supported by bpfman.

    An eBPF manager focusing on simplifying the deployment and administration of eBPF programs.\n\nUsage: bpfman <COMMAND>\n\nCommands:\n  load    Load an eBPF program on the system\n  unload  Unload an eBPF program using the Program Id\n  list    List all eBPF programs loaded via bpfman\n  get     Get an eBPF program using the Program Id\n  image   eBPF Bytecode Image related commands\n  help    Print this message or the help of the given subcommand(s)\n\nOptions:\n  -h, --help\n          Print help (see a summary with '-h')\n
    "},{"location":"getting-started/cli-guide/#bpfman-load","title":"bpfman load","text":"

    The bpfman load file and bpfman load image commands are used to load eBPF programs. The bpfman load file command is used to load a locally built eBPF program. The bpfman load image command is used to load an eBPF program packaged in a OCI container image from a given registry. Each program type (i.e. <COMMAND>) has it's own set of attributes specific to the program type, and those attributes MUST come after the program type is entered. There are a common set of attributes, and those MUST come before the program type is entered.

    sudo bpfman load file --help\nLoad an eBPF program from a local .o file\n\nUsage: bpfman load file [OPTIONS] --path <PATH> --name <NAME> <COMMAND>\n\nCommands:\n  xdp         Install an eBPF program on the XDP hook point for a given interface\n  tc          Install an eBPF program on the TC hook point for a given interface\n  tracepoint  Install an eBPF program on a Tracepoint\n  kprobe      Install a kprobe or kretprobe eBPF probe\n  uprobe      Install a uprobe or uretprobe eBPF probe\n  fentry      Install a fentry eBPF probe\n  fexit       Install a fexit eBPF probe\n  help        Print this message or the help of the given subcommand(s)\n\nOptions:\n  -p, --path <PATH>\n          Required: Location of local bytecode file\n          Example: --path /run/bpfman/examples/go-xdp-counter/bpf_x86_bpfel.o\n\n  -n, --name <NAME>\n          Required: The name of the function that is the entry point for the BPF program\n\n  -g, --global <GLOBAL>...\n          Optional: Global variables to be set when program is loaded.\n          Format: <NAME>=<Hex Value>\n\n          This is a very low level primitive. The caller is responsible for formatting\n          the byte string appropriately considering such things as size, endianness,\n          alignment and packing of data structures.\n\n  -m, --metadata <METADATA>\n          Optional: Specify Key/Value metadata to be attached to a program when it\n          is loaded by bpfman.\n          Format: <KEY>=<VALUE>\n\n          This can later be used to `list` a certain subset of programs which contain\n          the specified metadata.\n          Example: --metadata owner=acme\n\n      --map-owner-id <MAP_OWNER_ID>\n          Optional: Program Id of loaded eBPF program this eBPF program will share a map with.\n          Only used when multiple eBPF programs need to share a map.\n          Example: --map-owner-id 63178\n\n  -h, --help\n          Print help (see a summary with '-h')\n

    and

    sudo bpfman load image --help\nLoad an eBPF program packaged in a OCI container image from a given registry\n\nUsage: bpfman load image [OPTIONS] --image-url <IMAGE_URL> --name <NAME> <COMMAND>\n\nCommands:\n  xdp         Install an eBPF program on the XDP hook point for a given interface\n  tc          Install an eBPF program on the TC hook point for a given interface\n  tracepoint  Install an eBPF program on a Tracepoint\n  kprobe      Install a kprobe or kretprobe eBPF probe\n  uprobe      Install a uprobe or uretprobe eBPF probe\n  fentry      Install a fentry eBPF probe\n  fexit       Install a fexit eBPF probe\n  help        Print this message or the help of the given subcommand(s)\n\nOptions:\n  -i, --image-url <IMAGE_URL>\n          Required: Container Image URL.\n          Example: --image-url quay.io/bpfman-bytecode/xdp_pass:latest\n\n  -r, --registry-auth <REGISTRY_AUTH>\n          Optional: Registry auth for authenticating with the specified image registry.\n          This should be base64 encoded from the '<username>:<password>' string just like\n          it's stored in the docker/podman host config.\n          Example: --registry_auth \"YnjrcKw63PhDcQodiU9hYxQ2\"\n\n  -p, --pull-policy <PULL_POLICY>\n          Optional: Pull policy for remote images.\n\n          [possible values: Always, IfNotPresent, Never]\n\n          [default: IfNotPresent]\n\n  -n, --name <NAME>\n          Required: The name of the function that is the entry point for the eBPF program.\n\n  -g, --global <GLOBAL>...\n          Optional: Global variables to be set when program is loaded.\n          Format: <NAME>=<Hex Value>\n\n          This is a very low level primitive. The caller is responsible for formatting\n          the byte string appropriately considering such things as size, endianness,\n          alignment and packing of data structures.\n\n  -m, --metadata <METADATA>\n          Optional: Specify Key/Value metadata to be attached to a program when it\n          is loaded by bpfman.\n          Format: <KEY>=<VALUE>\n\n          This can later be used to list a certain subset of programs which contain\n          the specified metadata.\n          Example: --metadata owner=acme\n\n      --map-owner-id <MAP_OWNER_ID>\n          Optional: Program Id of loaded eBPF program this eBPF program will share a map with.\n          Only used when multiple eBPF programs need to share a map.\n          Example: --map-owner-id 63178\n\n  -h, --help\n          Print help (see a summary with '-h')\n

    When using either load command, --path, --image-url, --registry-auth, --pull-policy, --name, --global, --metadata and --map-owner-id must be entered before the <COMMAND> (xdp, tc, tracepoint, etc) is entered. Then each <COMMAND> has its own custom parameters (same for both bpfman load file and bpfman load image):

    sudo bpfman load file xdp --help\nInstall an eBPF program on the XDP hook point for a given interface\n\nUsage: bpfman load file --path <PATH> --name <NAME> xdp [OPTIONS] --iface <IFACE> --priority <PRIORITY>\n\nOptions:\n  -i, --iface <IFACE>\n          Required: Interface to load program on\n\n  -p, --priority <PRIORITY>\n          Required: Priority to run program in chain. Lower value runs first\n\n      --proceed-on <PROCEED_ON>...\n          Optional: Proceed to call other programs in chain on this exit code.\n          Multiple values supported by repeating the parameter.\n          Example: --proceed-on \"pass\" --proceed-on \"drop\"\n\n          [possible values: aborted, drop, pass, tx, redirect, dispatcher_return]\n\n          [default: pass, dispatcher_return]\n\n  -h, --help\n          Print help (see a summary with '-h')\n

    Example loading from local file (--path is the fully qualified path):

    cd bpfman/\nsudo bpfman load file --path tests/integration-test/bpf/.output/xdp_pass.bpf.o --name \"pass\" xdp --iface eno3 --priority 100\n

    Example from image in remote repository:

    sudo bpfman load image --image-url quay.io/bpfman-bytecode/xdp_pass:latest --name \"pass\" xdp --iface eno3 --priority 100\n

    The tc command is similar to xdp, but it also requires the direction option and the proceed-on values are different.

    sudo bpfman load file tc --help\nInstall an eBPF program on the TC hook point for a given interface\n\nUsage: bpfman load file --path <PATH> --name <NAME> tc [OPTIONS] --direction <DIRECTION> --iface <IFACE> --priority <PRIORITY>\n\nOptions:\n  -d, --direction <DIRECTION>\n          Required: Direction to apply program.\n\n          [possible values: ingress, egress]\n\n  -i, --iface <IFACE>\n          Required: Interface to load program on\n\n  -p, --priority <PRIORITY>\n          Required: Priority to run program in chain. Lower value runs first\n\n      --proceed-on <PROCEED_ON>...\n          Optional: Proceed to call other programs in chain on this exit code.\n          Multiple values supported by repeating the parameter.\n          Example: --proceed-on \"ok\" --proceed-on \"pipe\"\n\n          [possible values: unspec, ok, reclassify, shot, pipe, stolen, queued,\n                            repeat, redirect, trap, dispatcher_return]\n\n          [default: ok, pipe, dispatcher_return]\n\n  -h, --help\n          Print help (see a summary with '-h')\n

    The following is an example of the tc command using short option names:

    cd bpfman/\nsudo bpfman load file -p tests/integration-test/bpf/.output/tc_pass.bpf.o -n \"pass\" tc -d ingress -i mynet1 -p 40\n

    For the tc_pass.bpf.o program loaded with the command above, the name would be set as shown in the following snippet, taken from the function name, not SEC():

    SEC(\"classifier/pass\")\nint pass(struct __sk_buff *skb) {\n{\n    :\n}\n
    "},{"location":"getting-started/cli-guide/#additional-load-examples","title":"Additional Load Examples","text":"

    Below are some additional examples of bpfman load commands:

    "},{"location":"getting-started/cli-guide/#fentry","title":"Fentry","text":"
    sudo bpfman load image --image-url quay.io/bpfman-bytecode/fentry:latest --name \"test_fentry\" fentry -f do_unlinkat\n
    "},{"location":"getting-started/cli-guide/#fexit","title":"Fexit","text":"
    sudo bpfman load image --image-url quay.io/bpfman-bytecode/fexit:latest --name \"test_fexit\" fexit -f do_unlinkat\n
    "},{"location":"getting-started/cli-guide/#kprobe","title":"Kprobe","text":"
    sudo bpfman load image --image-url quay.io/bpfman-bytecode/kprobe:latest --name \"my_kprobe\" kprobe -f try_to_wake_up\n
    "},{"location":"getting-started/cli-guide/#kretprobe","title":"Kretprobe","text":"
    sudo bpfman load image --image-url quay.io/bpfman-bytecode/kretprobe:latest --name \"my_kretprobe\" kprobe -f try_to_wake_up -r\n
    "},{"location":"getting-started/cli-guide/#tc","title":"TC","text":"
    cd bpfman/\nsudo bpfman load file --path examples/go-tc-counter/bpf_x86_bpfel.o --name \"stats\" tc --direction ingress --iface eno3 --priority 110\n
    "},{"location":"getting-started/cli-guide/#uprobe","title":"Uprobe","text":"
    sudo bpfman load image --image-url quay.io/bpfman-bytecode/uprobe:latest --name \"my_uprobe\" uprobe -f \"malloc\" -t \"libc\"\n
    "},{"location":"getting-started/cli-guide/#uretprobe","title":"Uretprobe","text":"
    sudo bpfman load image --image-url quay.io/bpfman-bytecode/uretprobe:latest --name \"my_uretprobe\" uprobe -f \"malloc\" -t \"libc\" -r\n
    "},{"location":"getting-started/cli-guide/#xdp","title":"XDP","text":"
    cd bpfman/\nsudo bpfman load file --path bpfman/examples/go-xdp-counter/bpf_x86_bpfel.o --name \"xdp_stats\" xdp --iface eno3 --priority 35\n
    "},{"location":"getting-started/cli-guide/#setting-global-variables-in-ebpf-programs","title":"Setting Global Variables in eBPF Programs","text":"

    Global variables can be set for any eBPF program type when loading as follows:

    cd bpfman/\nsudo bpfman load file -p bpfman/tests/integration-test/bpf/.output/tc_pass.bpf.o -g GLOBAL_u8=01 GLOBAL_u32=0A0B0C0D -n \"pass\" tc -d ingress -i mynet1 -p 40\n

    Note that when setting global variables, the eBPF program being loaded must have global variables named with the strings given, and the size of the value provided must match the size of the given variable. For example, the above command can be used to update the following global variables in an eBPF program.

    volatile const __u32 GLOBAL_u8 = 0;\nvolatile const __u32 GLOBAL_u32 = 0;\n
    "},{"location":"getting-started/cli-guide/#modifying-the-proceed-on-behavior","title":"Modifying the Proceed-On Behavior","text":"

    The proceed-on setting applies to xdp and tc programs. For both of these program types, an ordered list of eBPF programs is maintained per attach point. The proceed-on setting determines whether processing will \"proceed\" to the next eBPF program in the list, or terminate processing and return, based on the program's return value. For example, the default proceed-on configuration for an xdp program can be modified as follows:

    cd bpfman/\nsudo bpfman load file -p tests/integration-test/bpf/.output/xdp_pass.bpf.o -n \"pass\" xdp -i mynet1 -p 30 --proceed-on drop pass dispatcher_return\n
    "},{"location":"getting-started/cli-guide/#sharing-maps-between-ebpf-programs","title":"Sharing Maps Between eBPF Programs","text":"

    Warning

    Currently for the map sharing feature to work the LIBBPF_PIN_BY_NAME flag MUST be set in the shared bpf map definitions. Please see this aya issue for future work that will change this requirement.

    To share maps between eBPF programs, first load the eBPF program that owns the maps. One eBPF program must own the maps.

    cd bpfman/\nsudo bpfman load file --path examples/go-xdp-counter/bpf_x86_bpfel.o -n \"xdp_stats\" xdp --iface eno3 --priority 100\n6371\n

    Next, load additional eBPF programs that will share the existing maps by passing the program id of the eBPF program that owns the maps using the --map-owner-id parameter:

    cd bpfman/\nsudo bpfman load file --path examples/go-xdp-counter/bpf_x86_bpfel.o -n \"xdp_stats\" --map-owner-id 6371 xdp --iface eno3 --priority 100\n6373\n

    Use the bpfman get <PROGRAM_ID> command to display the configuration:

    sudo bpfman list\n Program ID  Name       Type  Load Time\n 6371        xdp_stats  xdp   2023-07-18T16:50:46-0400\n 6373        xdp_stats  xdp   2023-07-18T16:51:06-0400\n
    sudo bpfman get 6371\n Bpfman State\n---------------\n Name:          xdp_stats\n Path:          /home/<$USER>/src/bpfman/examples/go-xdp-counter/bpf_x86_bpfel.o\n Global:        None\n Metadata:      None\n Map Pin Path:  /run/bpfman/fs/maps/6371\n Map Owner ID:  None\n Map Used By:   6371\n                6373\n Priority:      100\n Iface:         eno3\n Position:      1\n Proceed On:    pass, dispatcher_return\n:\n
    sudo bpfman get 6373\n Bpfman State\n---------------\n Name:          xdp_stats\n Path:          /home/<$USER>/src/bpfman/examples/go-xdp-counter/bpf_x86_bpfel.o\n Global:        None\n Metadata:      None\n Map Pin Path:  /run/bpfman/fs/maps/6371\n Map Owner ID:  6371\n Map Used By:   6371\n                6373\n Priority:      100\n Iface:         eno3\n Position:      0\n Proceed On:    pass, dispatcher_return\n:\n

    As the output shows, the first program (6371) owns the map, with Map Owner ID of None and the Map Pin Path (/run/bpfman/fs/maps/6371) that includes its own ID.

    The second program (6373) references the first program via the Map Owner ID set to 6371 and the Map Pin Path (/run/bpfman/fs/maps/6371) set to same directory as the first program, which includes the first program's ID. The output for both commands shows the map is being used by both programs via the Map Used By with values of 6371 and 6373.

    The eBPF programs can be unloaded any order, the Map Pin Path will not be deleted until all the programs referencing the maps are unloaded:

    sudo bpfman unload 6371\nsudo bpfman unload 6373\n
    "},{"location":"getting-started/cli-guide/#bpfman-list","title":"bpfman list","text":"

    The bpfman list command lists all the bpfman loaded eBPF programs:

    sudo bpfman list\n Program ID  Name              Type        Load Time\n 6201        pass              xdp         2023-07-17T17:17:53-0400\n 6202        sys_enter_openat  tracepoint  2023-07-17T17:19:09-0400\n 6204        stats             tc          2023-07-17T17:20:14-0400\n

    To see all eBPF programs loaded on the system, include the --all option.

    sudo bpfman list --all\n Program ID  Name              Type           Load Time\n 52          restrict_filesy   lsm            2023-05-03T12:53:34-0400\n 166         dump_bpf_map      tracing        2023-05-03T12:53:52-0400\n 167         dump_bpf_prog     tracing        2023-05-03T12:53:52-0400\n 455                           cgroup_device  2023-05-03T12:58:26-0400\n :\n 6194                          cgroup_device  2023-07-17T17:15:23-0400\n 6201        pass              xdp            2023-07-17T17:17:53-0400\n 6202        sys_enter_openat  tracepoint     2023-07-17T17:19:09-0400\n 6203        dispatcher        tc             2023-07-17T17:20:14-0400\n 6204        stats             tc             2023-07-17T17:20:14-0400\n 6207        xdp               xdp            2023-07-17T17:27:13-0400\n 6210        test_fentry       tracing        2023-07-17T17:28:34-0400\n 6212        test_fexit        tracing        2023-07-17T17:29:02-0400\n 6223        my_uprobe         probe          2023-07-17T17:31:45-0400\n 6225        my_kretprobe      probe          2023-07-17T17:32:27-0400\n 6928        my_kprobe         probe          2023-07-17T17:33:49-0400\n

    To filter on a given program type, include the --program-type parameter:

    sudo bpfman list --all --program-type tc\n Program ID  Name        Type  Load Time\n 6203        dispatcher  tc    2023-07-17T17:20:14-0400\n 6204        stats       tc    2023-07-17T17:20:14-0400\n

    Note: The list filters by the Kernel Program Type. kprobe, kretprobe, uprobe and uretprobe all map to the probe Kernel Program Type. fentry and fexit both map to the tracing Kernel Program Type.

    "},{"location":"getting-started/cli-guide/#bpfman-get","title":"bpfman get","text":"

    To retrieve detailed information for a loaded eBPF program, use the bpfman get <PROGRAM_ID> command. If the eBPF program was loaded via bpfman, then there will be a Bpfman State section with bpfman related attributes and a Kernel State section with kernel information. If the eBPF program was loaded outside of bpfman, then the Bpfman State section will be empty and Kernel State section will be populated.

    sudo bpfman get 6204\n Bpfman State\n---------------\n Name:          stats\n Image URL:     quay.io/bpfman-bytecode/go-tc-counter:latest\n Pull Policy:   IfNotPresent\n Global:        None\n Metadata:      None\n Map Pin Path:  /run/bpfman/fs/maps/6204\n Map Owner ID:  None\n Map Used By:   6204\n Priority:      100\n Iface:         eno3\n Position:      0\n Direction:     eg\n Proceed On:    pipe, dispatcher_return\n\n Kernel State\n----------------------------------\n Program ID:                       6204\n Name:                             stats\n Type:                             tc\n Loaded At:                        2023-07-17T17:20:14-0400\n Tag:                              ead94553702a3742\n GPL Compatible:                   true\n Map IDs:                          [2705]\n BTF ID:                           2821\n Size Translated (bytes):          176\n JITed:                            true\n Size JITed (bytes):               116\n Kernel Allocated Memory (bytes):  4096\n Verified Instruction Count:       24\n
    sudo bpfman get 6190\n Bpfman State\n---------------\nNONE\n\n Kernel State\n----------------------------------\nProgram ID:                        6190\nName:                              None\nType:                              cgroup_skb\nLoaded At:                         2023-07-17T17:15:23-0400\nTag:                               6deef7357e7b4530\nGPL Compatible:                    true\nMap IDs:                           []\nBTF ID:                            0\nSize Translated (bytes):           64\nJITed:                             true\nSize JITed (bytes):                55\nKernel Allocated Memory (bytes):   4096\nVerified Instruction Count:        8\n
    "},{"location":"getting-started/cli-guide/#bpfman-unload","title":"bpfman unload","text":"

    The bpfman unload command takes the program id from the load or list command as a parameter, and unloads the requested eBPF program:

    sudo bpfman unload 6204\n
    sudo bpfman list\n Program ID  Name              Type        Load Time\n 6201        pass              xdp         2023-07-17T17:17:53-0400\n 6202        sys_enter_openat  tracepoint  2023-07-17T17:19:09-0400\n
    "},{"location":"getting-started/cli-guide/#bpfman-image","title":"bpfman image","text":"

    The bpfman image commands contain a set of container image related commands.

    "},{"location":"getting-started/cli-guide/#bpfman-image-pull","title":"bpfman image pull","text":"

    The bpfman image pull command pulls a given bytecode image for future use by a load command.

    sudo bpfman image pull --help\nPull an eBPF bytecode image from a remote registry\n\nUsage: bpfman image pull [OPTIONS] --image-url <IMAGE_URL>\n\nOptions:\n  -i, --image-url <IMAGE_URL>\n          Required: Container Image URL.\n          Example: --image-url quay.io/bpfman-bytecode/xdp_pass:latest\n\n  -r, --registry-auth <REGISTRY_AUTH>\n          Optional: Registry auth for authenticating with the specified image registry.\n          This should be base64 encoded from the '<username>:<password>' string just like\n          it's stored in the docker/podman host config.\n          Example: --registry_auth \"YnjrcKw63PhDcQodiU9hYxQ2\"\n\n  -p, --pull-policy <PULL_POLICY>\n          Optional: Pull policy for remote images.\n\n          [possible values: Always, IfNotPresent, Never]\n\n          [default: IfNotPresent]\n\n  -h, --help\n          Print help (see a summary with '-h')\n

    Example usage:

    sudo bpfman image pull --image-url quay.io/bpfman-bytecode/xdp_pass:latest\nSuccessfully downloaded bytecode\n

    Then when loaded, the local image will be used:

    sudo bpfman load image --image-url quay.io/bpfman-bytecode/xdp_pass:latest --pull-policy IfNotPresent xdp --iface eno3 --priority 100\n Bpfman State                                           \n ---------------\n Name:          pass                                  \n Image URL:     quay.io/bpfman-bytecode/xdp_pass:latest \n Pull Policy:   IfNotPresent                          \n Global:        None                                  \n Metadata:      None                                  \n Map Pin Path:  /run/bpfman/fs/maps/406681              \n Map Owner ID:  None                                  \n Maps Used By:  None                                  \n Priority:      100                                   \n Iface:         eno3\n Position:      2                                     \n Proceed On:    pass, dispatcher_return               \n\n Kernel State                                               \n ----------------------------------\n Program ID:                       406681                   \n Name:                             pass                     \n Type:                             xdp                      \n Loaded At:                        1917-01-27T01:37:06-0500 \n Tag:                              4b9d1b2c140e87ce         \n GPL Compatible:                   true                     \n Map IDs:                          [736646]                 \n BTF ID:                           555560                   \n Size Translated (bytes):          96                       \n JITted:                           true                     \n Size JITted:                      67                       \n Kernel Allocated Memory (bytes):  4096                     \n Verified Instruction Count:       9                        \n
    "},{"location":"getting-started/cli-guide/#bpfman-image-build","title":"bpfman image build","text":"

    The bpfman image build command is a utility command that builds and pushes an eBPF program in a OCI container image leveraging either docker or podman. The eBPF program bytecode must already be generated. This command calls docker or podman with the proper parameters for building multi-architecture based images with the proper labels for a OCI container image.

    Since this command is leveraging docker and podman, a container file (--container-file or -f) is required, along with an image tag (--tag of -t). In addition, the bytecode to package must be included. The bytecode can take several forms, but at least one must be provided:

    • --bytecode or -b: Use this option for a single bytecode object file built for the host architecture. The value of this parameter is a single bytecode object file.
    • --cilium-ebpf-project or -c: Use this option for a cilium/ebpf based project. The value of this parameter is a directory that contains multiple object files for different architectures, where the object files follow the Cilium naming convention with the architecture in the name (i.e. bpf_x86_bpfel.o, bpf_arm64_bpfel.o, bpf_powerpc_bpfel.o, bpf_s390_bpfeb.o).
    • --bc-386-el .. --bc-s390x-eb: Use this option to add one or more architecture specific bytecode files.
    bpfman image build --help\nBuild an eBPF bytecode image from local bytecode objects and push to a registry.\n\nTo use, the --container-file and --tag must be included, as well as a pointer to\nat least one bytecode file that can be passed in several ways. Use either:\n\n* --bytecode: for a single bytecode built for the host architecture.\n\n* --cilium-ebpf-project: for a cilium/ebpf project directory which contains\n    multiple object files for different architectures.\n\n* --bc-386-el .. --bc-s390x-eb: to add one or more architecture specific bytecode files.\n\nExamples:\n   bpfman image build -f Containerfile.bytecode -t quay.io/<USER>/go-xdp-counter:test \\\n     -b ./examples/go-xdp-counter/bpf_x86_bpfel.o\n\nUsage: bpfman image build [OPTIONS] --tag <TAG> --container-file <CONTAINER_FILE> <--bytecode <BYTECODE>|--cilium-ebpf-project <CILIUM_EBPF_PROJECT>|--bc-386-el <BC_386_EL>|--bc-amd64-el <BC_AMD64_EL>|--bc-arm-el <BC_ARM_EL>|--bc-arm64-el <BC_ARM64_EL>|--bc-loong64-el <BC_LOONG64_EL>|--bc-mips-eb <BC_MIPS_EB>|--bc-mipsle-el <BC_MIPSLE_EL>|--bc-mips64-eb <BC_MIPS64_EB>|--bc-mips64le-el <BC_MIPS64LE_EL>|--bc-ppc64-eb <BC_PPC64_EB>|--bc-ppc64le-el <BC_PPC64LE_EL>|--bc-riscv64-el <BC_RISCV64_EL>|--bc-s390x-eb <BC_S390X_EB>>\n\nOptions:\n  -t, --tag <TAG>\n          Required: Name and optionally a tag in the name:tag format.\n          Example: --tag quay.io/bpfman-bytecode/xdp_pass:latest\n\n  -f, --container-file <CONTAINER_FILE>\n          Required: Dockerfile to use for building the image.\n          Example: --container_file Containerfile.bytecode\n\n  -r, --runtime <RUNTIME>\n          Optional: Container runtime to use, works with docker or podman, defaults to docker\n          Example: --runtime podman\n\n  -b, --bytecode <BYTECODE>\n          Optional: bytecode file to use for building the image assuming host architecture.\n          Example: -b ./examples/go-xdp-counter/bpf_x86_bpfel.o\n\n  -c, --cilium-ebpf-project <CILIUM_EBPF_PROJECT>\n          Optional: If specified pull multi-arch bytecode files from a cilium/ebpf formatted project\n          where the bytecode files all contain a standard bpf_<GOARCH>_<(el/eb)>.o tag.\n          Example: --cilium-ebpf-project ./examples/go-xdp-counter\n\n      --bc-386-el <BC_386_EL>\n          Optional: bytecode file to use for building the image assuming amd64 architecture.\n          Example: --bc-386-el ./examples/go-xdp-counter/bpf_386_bpfel.o\n\n      --bc-amd64-el <BC_AMD64_EL>\n          Optional: bytecode file to use for building the image assuming amd64 architecture.\n          Example: --bc-amd64-el ./examples/go-xdp-counter/bpf_x86_bpfel.o\n\n      --bc-arm-el <BC_ARM_EL>\n          Optional: bytecode file to use for building the image assuming arm architecture.\n          Example: --bc-arm-el ./examples/go-xdp-counter/bpf_arm_bpfel.o\n\n      --bc-arm64-el <BC_ARM64_EL>\n          Optional: bytecode file to use for building the image assuming arm64 architecture.\n          Example: --bc-arm64-el ./examples/go-xdp-counter/bpf_arm64_bpfel.o\n\n      --bc-loong64-el <BC_LOONG64_EL>\n          Optional: bytecode file to use for building the image assuming loong64 architecture.\n          Example: --bc-loong64-el ./examples/go-xdp-counter/bpf_loong64_bpfel.o\n\n      --bc-mips-eb <BC_MIPS_EB>\n          Optional: bytecode file to use for building the image assuming mips architecture.\n          Example: --bc-mips-eb ./examples/go-xdp-counter/bpf_mips_bpfeb.o\n\n      --bc-mipsle-el <BC_MIPSLE_EL>\n          Optional: bytecode file to use for building the image assuming mipsle architecture.\n          Example: --bc-mipsle-el ./examples/go-xdp-counter/bpf_mipsle_bpfel.o\n\n      --bc-mips64-eb <BC_MIPS64_EB>\n          Optional: bytecode file to use for building the image assuming mips64 architecture.\n          Example: --bc-mips64-eb ./examples/go-xdp-counter/bpf_mips64_bpfeb.o\n\n      --bc-mips64le-el <BC_MIPS64LE_EL>\n          Optional: bytecode file to use for building the image assuming mips64le architecture.\n          Example: --bc-mips64le-el ./examples/go-xdp-counter/bpf_mips64le_bpfel.o\n\n      --bc-ppc64-eb <BC_PPC64_EB>\n          Optional: bytecode file to use for building the image assuming ppc64 architecture.\n          Example: --bc-ppc64-eb ./examples/go-xdp-counter/bpf_ppc64_bpfeb.o\n\n      --bc-ppc64le-el <BC_PPC64LE_EL>\n          Optional: bytecode file to use for building the image assuming ppc64le architecture.\n          Example: --bc-ppc64le-el ./examples/go-xdp-counter/bpf_ppc64le_bpfel.o\n\n      --bc-riscv64-el <BC_RISCV64_EL>\n          Optional: bytecode file to use for building the image assuming riscv64 architecture.\n          Example: --bc-riscv64-el ./examples/go-xdp-counter/bpf_riscv64_bpfel.o\n\n      --bc-s390x-eb <BC_S390X_EB>\n          Optional: bytecode file to use for building the image assuming s390x architecture.\n          Example: --bc-s390x-eb ./examples/go-xdp-counter/bpf_s390x_bpfeb.o\n\n  -h, --help\n          Print help (see a summary with '-h')\n

    Below are some different examples of building images. Note that sudo is not required. This command also pushed the image to a registry, so user must already be logged into the registry.

    Example of single bytecode image:

    bpfman image build -f Containerfile.bytecode -t quay.io/$QUAY_USER/go-xdp-counter:test -b ./examples/go-xdp-counter/bpf_x86_bpfel.o\n

    Example of directory with Cilium generated bytecode objects:

    bpfman image build -f Containerfile.bytecode.multi.arch -t quay.io/$QUAY_USER/go-xdp-counter:test -c ./examples/go-xdp-counter/\n

    Note

    To build images for multiple architectures on a local system, docker (or podman) may need additional configuration settings to allow for caching of non-native images. See https://docs.docker.com/build/building/multi-platform/ for more details.

    "},{"location":"getting-started/cli-guide/#bpfman-image-generate-build-args","title":"bpfman image generate-build-args","text":"

    The bpfman image generate-build-args command is a utility command that generates the labels used to package eBPF program bytecode in a OCI container image. It is recommended to use the bpfman image build command to package the eBPF program in a OCI container image, but an alternative is to generate the labels then build the container image with docker or podman.

    The eBPF program bytecode must already be generated. The bytecode can take several forms, but at least one must be provided:

    • --bytecode or -b: Use this option for a single bytecode object file built for the host architecture. The value of this parameter is a single bytecode object file.
    • --cilium-ebpf-project or -c: Use this option for a cilium/ebpf based project. The value of this parameter is a directory that contains multiple object files for different architectures, where the object files follow the Cilium naming convention with the architecture in the name (i.e. bpf_x86_bpfel.o, bpf_arm64_bpfel.o, bpf_powerpc_bpfel.o, bpf_s390_bpfeb.o).
    • --bc-386-el .. --bc-s390x-eb: Use this option to add one or more architecture specific bytecode files.
    bpfman image generate-build-args --help\nGenerate the OCI image labels for a given bytecode file.\n\nTo use, the --container-file and --tag must be included, as well as a pointer to\nat least one bytecode file that can be passed in several ways. Use either:\n\n* --bytecode: for a single bytecode built for the host architecture.\n\n* --cilium-ebpf-project: for a cilium/ebpf project directory which contains\n    multiple object files for different architectures.\n\n* --bc-386-el .. --bc-s390x-eb: to add one or more architecture specific bytecode files.\n\nExamples:\n  bpfman image generate-build-args --bc-amd64-el ./examples/go-xdp-counter/bpf_x86_bpfel.o\n\nUsage: bpfman image generate-build-args <--bytecode <BYTECODE>|--cilium-ebpf-project <CILIUM_EBPF_PROJECT>|--bc-386-el <BC_386_EL>|--bc-amd64-el <BC_AMD64_EL>|--bc-arm-el <BC_ARM_EL>|--bc-arm64-el <BC_ARM64_EL>|--bc-loong64-el <BC_LOONG64_EL>|--bc-mips-eb <BC_MIPS_EB>|--bc-mipsle-el <BC_MIPSLE_EL>|--bc-mips64-eb <BC_MIPS64_EB>|--bc-mips64le-el <BC_MIPS64LE_EL>|--bc-ppc64-eb <BC_PPC64_EB>|--bc-ppc64le-el <BC_PPC64LE_EL>|--bc-riscv64-el <BC_RISCV64_EL>|--bc-s390x-eb <BC_S390X_EB>>\n\nOptions:\n  -b, --bytecode <BYTECODE>\n          Optional: bytecode file to use for building the image assuming host architecture.\n          Example: -b ./examples/go-xdp-counter/bpf_x86_bpfel.o\n\n  -c, --cilium-ebpf-project <CILIUM_EBPF_PROJECT>\n          Optional: If specified pull multi-arch bytecode files from a cilium/ebpf formatted project\n          where the bytecode files all contain a standard bpf_<GOARCH>_<(el/eb)>.o tag.\n          Example: --cilium-ebpf-project ./examples/go-xdp-counter\n\n      --bc-386-el <BC_386_EL>\n          Optional: bytecode file to use for building the image assuming amd64 architecture.\n          Example: --bc-386-el ./examples/go-xdp-counter/bpf_386_bpfel.o\n\n      --bc-amd64-el <BC_AMD64_EL>\n          Optional: bytecode file to use for building the image assuming amd64 architecture.\n          Example: --bc-amd64-el ./examples/go-xdp-counter/bpf_x86_bpfel.o\n\n      --bc-arm-el <BC_ARM_EL>\n          Optional: bytecode file to use for building the image assuming arm architecture.\n          Example: --bc-arm-el ./examples/go-xdp-counter/bpf_arm_bpfel.o\n\n      --bc-arm64-el <BC_ARM64_EL>\n          Optional: bytecode file to use for building the image assuming arm64 architecture.\n          Example: --bc-arm64-el ./examples/go-xdp-counter/bpf_arm64_bpfel.o\n\n      --bc-loong64-el <BC_LOONG64_EL>\n          Optional: bytecode file to use for building the image assuming loong64 architecture.\n          Example: --bc-loong64-el ./examples/go-xdp-counter/bpf_loong64_bpfel.o\n\n      --bc-mips-eb <BC_MIPS_EB>\n          Optional: bytecode file to use for building the image assuming mips architecture.\n          Example: --bc-mips-eb ./examples/go-xdp-counter/bpf_mips_bpfeb.o\n\n      --bc-mipsle-el <BC_MIPSLE_EL>\n          Optional: bytecode file to use for building the image assuming mipsle architecture.\n          Example: --bc-mipsle-el ./examples/go-xdp-counter/bpf_mipsle_bpfel.o\n\n      --bc-mips64-eb <BC_MIPS64_EB>\n          Optional: bytecode file to use for building the image assuming mips64 architecture.\n          Example: --bc-mips64-eb ./examples/go-xdp-counter/bpf_mips64_bpfeb.o\n\n      --bc-mips64le-el <BC_MIPS64LE_EL>\n          Optional: bytecode file to use for building the image assuming mips64le architecture.\n          Example: --bc-mips64le-el ./examples/go-xdp-counter/bpf_mips64le_bpfel.o\n\n      --bc-ppc64-eb <BC_PPC64_EB>\n          Optional: bytecode file to use for building the image assuming ppc64 architecture.\n          Example: --bc-ppc64-eb ./examples/go-xdp-counter/bpf_ppc64_bpfeb.o\n\n      --bc-ppc64le-el <BC_PPC64LE_EL>\n          Optional: bytecode file to use for building the image assuming ppc64le architecture.\n          Example: --bc-ppc64le-el ./examples/go-xdp-counter/bpf_ppc64le_bpfel.o\n\n      --bc-riscv64-el <BC_RISCV64_EL>\n          Optional: bytecode file to use for building the image assuming riscv64 architecture.\n          Example: --bc-riscv64-el ./examples/go-xdp-counter/bpf_riscv64_bpfel.o\n\n      --bc-s390x-eb <BC_S390X_EB>\n          Optional: bytecode file to use for building the image assuming s390x architecture.\n          Example: --bc-s390x-eb ./examples/go-xdp-counter/bpf_s390x_bpfeb.o\n\n  -h, --help\n          Print help (see a summary with '-h')\n

    Below are some different examples of generating build arguments. Note that sudo is not required.

    Example of single bytecode image:

    $ bpfman image generate-build-args -b ./examples/go-xdp-counter/bpf_x86_bpfel.o\nBYTECODE_FILE=./examples/go-xdp-counter/bpf_x86_bpfel.o\nPROGRAMS={\"xdp_stats\":\"xdp\"}\nMAPS={\"xdp_stats_map\":\"per_cpu_array\"}\n

    Example of directory with Cilium generated bytecode objects:

    $ bpfman image generate-build-args -c ./examples/go-xdp-counter/\nBC_AMD64_EL=./examples/go-xdp-counter/bpf_x86_bpfel.o\nBC_ARM_EL=./examples/go-xdp-counter/bpf_arm64_bpfel.o\nBC_PPC64LE_EL=./examples/go-xdp-counter/bpf_powerpc_bpfel.o\nBC_S390X_EB=./examples/go-xdp-counter/bpf_s390_bpfeb.o\nPROGRAMS={\"xdp_stats\":\"xdp\"}\nMAPS={\"xdp_stats_map\":\"per_cpu_array\"}\n

    Once the labels are generated, the eBPF program can be packaged in a OCI container image using docker or podman by passing the generated labels as build-arg parameters:

    docker build \\\n  --build-arg BYTECODE_FILE=./examples/go-xdp-counter/bpf_x86_bpfel.o \\\n  --build-arg PROGRAMS={\"xdp_stats\":\"xdp\"} \\\n  --build-arg MAPS={\"xdp_stats_map\":\"per_cpu_array\"} \\\n  -f Containerfile.bytecode . -t quay.io/$USER/go-xdp-counter-bytecode:test\n
    "},{"location":"getting-started/example-bpf-k8s/","title":"Deploying Example eBPF Programs On Kubernetes","text":"

    This section will describe launching eBPF enabled applications on a Kubernetes cluster. The approach is slightly different when running on a Kubernetes cluster.

    This section assumes there is already a Kubernetes cluster running and bpfman is running in the cluster. See Deploying the bpfman-operator for details on deploying bpfman on a Kubernetes cluster, but the quickest solution is to run a Kubernetes KIND Cluster:

    cd bpfman/bpfman-operator/\nmake run-on-kind\n
    "},{"location":"getting-started/example-bpf-k8s/#loading-ebpf-programs-on-kubernetes","title":"Loading eBPF Programs On Kubernetes","text":"

    Instead of using the userspace program or CLI to load the eBPF bytecode as done in previous sections, the bytecode will be loaded by creating a Kubernetes CRD object. There is a CRD object for each eBPF program type bpfman supports.

    • FentryProgram CRD: Fentry Sample yaml
    • FexitProgram CRD: Fexit Sample yaml
    • KprobeProgram CRD: Kprobe Examples yaml
    • TcProgram CRD: TcProgram Examples yaml
    • TcxProgram CRD: TcxProgram Examples yaml
    • TracepointProgram CRD: Tracepoint Examples yaml
    • UprobeProgram CRD: Uprobe Examples yaml
    • XdpProgram CRD: XdpProgram Examples yaml

    Sample bytecode yaml with XdpProgram CRD:

    cat examples/config/base/go-xdp-counter/bytecode.yaml\napiVersion: bpfman.io/v1alpha1\nkind: XdpProgram\nmetadata:\n  labels:\n    app.kubernetes.io/name: xdpprogram\n  name: go-xdp-counter-example\nspec:\n  name: xdp_stats\n  # Select all nodes\n  nodeselector: {}\n  interfaceselector:\n    primarynodeinterface: true\n  priority: 55\n  bytecode:\n    image:\n      url: quay.io/bpfman-bytecode/go-xdp-counter:latest\n

    Note that all the sample yaml files are configured with the bytecode running on all nodes (nodeselector: {}). This can be configured to run on specific nodes, but the DaemonSet yaml for the userspace program, which is described below, should have an equivalent change.

    Assume the following command is run:

    kubectl apply -f examples/config/base/go-xdp-counter/bytecode.yaml\n  xdpprogram.bpfman.io/go-xdp-counter-example created\n

    The diagram below shows go-xdp-counter example, but the other examples operate in a similar fashion.

    Following the diagram for XDP example (Blue numbers):

    1. The user creates a XdpProgram object with the parameters associated with the eBPF bytecode, like interface, priority and BFP bytecode image. The name of the XdpProgram object in this example is go-xdp-counter-example. The XdpProgram is applied using kubectl, but in a more practical deployment, the XdpProgram would be applied by the application or a controller.
    2. bpfman-agent, running on each node, is watching for all changes to XdpProgram objects. When it sees a XdpProgram object created or modified, it makes sure a BpfProgram object for that node exists. The name of the BpfProgram object is the XdpProgram object name with the node name and interface or attach point appended. On a KIND Cluster, it would be similar to go-xdp-counter-example-bpfman-deployment-control-plane-eth0.
    3. bpfman-agent then determines if it should be running on the given node, loads or unloads as needed by making gRPC calls the bpfman-rpc, which calls into the bpfman Library. bpfman behaves the same as described in the running locally example.
    4. bpfman-agent finally updates the status of the BpfProgram object.
    5. bpfman-operator watches all BpfProgram objects, and updates the status of the XdpProgram object indicating if the eBPF program has been applied to all the desired nodes or not.

    To retrieve information on the XdpProgram objects:

    kubectl get xdpprograms\nNAME                     BPFFUNCTIONNAME   NODESELECTOR   STATUS\ngo-xdp-counter-example   xdp_stats         {}             ReconcileSuccess\n\n\nkubectl get xdpprograms go-xdp-counter-example -o yaml\napiVersion: bpfman.io/v1alpha1\nkind: XdpProgram\nmetadata:\n  annotations:\n    kubectl.kubernetes.io/last-applied-configuration: |\n      {\"apiVersion\":\"bpfman.io/v1alpha1\",\"kind\":\"XdpProgram\",\"metadata\":{\"annotations\":{},\"labels\":{\"app.kubernetes.io/name\":\"xdpprogram\"},\"name\":\"go-xdp-counter-example\"},\"spec\":{\"bpffunctionname\":\"xdp_stats\",\"bytecode\":{\"image\":{\"url\":\"quay.io/bpfman-bytecode/go-xdp-counter:latest\"}},\"interfaceselector\":{\"primarynodeinterface\":true},\"nodeselector\":{},\"priority\":55}}\n  creationTimestamp: \"2023-11-06T21:05:15Z\"\n  finalizers:\n  - bpfman.io.operator/finalizer\n  generation: 2\n  labels:\n    app.kubernetes.io/name: xdpprogram\n  name: go-xdp-counter-example\n  resourceVersion: \"3103\"\n  uid: edd45e2e-a40b-4668-ac76-c1f1eb63a23b\nspec:\n  bpffunctionname: xdp_stats\n  bytecode:\n    image:\n      imagepullpolicy: IfNotPresent\n      url: quay.io/bpfman-bytecode/go-xdp-counter:latest\n  interfaceselector:\n    primarynodeinterface: true\n  mapownerselector: {}\n  nodeselector: {}\n  priority: 55\n  proceedon:\n  - pass\n  - dispatcher_return\nstatus:\n  conditions:\n  - lastTransitionTime: \"2023-11-06T21:05:21Z\"\n    message: bpfProgramReconciliation Succeeded on all nodes\n    reason: ReconcileSuccess\n    status: \"True\"\n    type: ReconcileSuccess\n

    To retrieve information on the BpfProgram objects:

    kubectl get bpfprograms\nNAME                                                          TYPE      STATUS         AGE\n:\ngo-xdp-counter-example-bpfman-deployment-control-plane-eth0   xdp       bpfmanLoaded   11m\n\n\nkubectl get bpfprograms go-xdp-counter-example-bpfman-deployment-control-plane-eth0 -o yaml\napiVersion: bpfman.io/v1alpha1\nkind: BpfProgram\nmetadata:\n  annotations:\n    bpfman.io.xdpprogramcontroller/interface: eth0\n    bpfman.io/ProgramId: \"4801\"\n  creationTimestamp: \"2023-11-06T21:05:15Z\"\n  finalizers:\n  - bpfman.io.xdpprogramcontroller/finalizer\n  generation: 1\n  labels:\n    bpfman.io/ownedByProgram: go-xdp-counter-example\n    kubernetes.io/hostname: bpfman-deployment-control-plane\n  name: go-xdp-counter-example-bpfman-deployment-control-plane-eth0\n  ownerReferences:\n  - apiVersion: bpfman.io/v1alpha1\n    blockOwnerDeletion: true\n    controller: true\n    kind: XdpProgram\n    name: go-xdp-counter-example\n    uid: edd45e2e-a40b-4668-ac76-c1f1eb63a23b\n  resourceVersion: \"3102\"\n  uid: f7ffd156-168b-4dc8-be38-18c42626a631\nspec:\n  type: xdp\nstatus:\n  conditions:\n  - lastTransitionTime: \"2023-11-06T21:05:21Z\"\n    message: Successfully loaded bpfProgram\n    reason: bpfmanLoaded\n    status: \"True\"\n    type: Loaded\n
    "},{"location":"getting-started/example-bpf-k8s/#deploying-an-ebpf-enabled-application-on-kubernetes","title":"Deploying an eBPF enabled application On Kubernetes","text":"

    Here, a userspace container is deployed to consume the map data generated by the eBPF counter program. bpfman provides a Container Storage Interface (CSI) driver for exposing eBPF maps into a userspace container. To avoid having to mount a host directory that contains the map pinned file into the container and forcing the container to have permissions to access that host directory, the CSI driver mounts the map at a specified location in the container. All the examples use CSI, here is go-xdp-counter/deployment.yaml for reference:

    cd bpfman/examples/\ncat config/base/go-xdp-counter/deployment.yaml\n:\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: go-xdp-counter-ds\n  namespace: go-xdp-counter\n  labels:\n    k8s-app: go-xdp-counter\nspec:\n  :\n  template:\n    :\n    spec:\n       :\n      containers:\n      - name: go-xdp-counter\n        :\n        volumeMounts:\n        - name: go-xdp-counter-maps                        <==== 2) VolumeMount in container\n          mountPath: /run/xdp/maps                         <==== 2a) Mount path in the container\n          readOnly: true\n      volumes:\n      - name: go-xdp-counter-maps                          <==== 1) Volume describing the map\n        csi:\n          driver: csi.bpfman.io                             <==== 1a) bpfman CSI Driver\n          volumeAttributes:\n            csi.bpfman.io/program: go-xdp-counter-example   <==== 1b) eBPF Program owning the map\n            csi.bpfman.io/maps: xdp_stats_map               <==== 1c) Map to be exposed to the container\n
    "},{"location":"getting-started/example-bpf-k8s/#loading-a-userspace-container-image","title":"Loading A Userspace Container Image","text":"

    The userspace programs have been pre-built and can be found here:

    • quay.io/bpfman-userspace/go-kprobe-counter:latest
    • quay.io/bpfman-userspace/go-tc-counter:latest
    • quay.io/bpfman-userspace/go-tracepoint-counter:latest
    • quay.io/bpfman-userspace/go-uprobe-counter:latest
    • quay.io/bpfman-userspace/go-xdp-counter:latest

    The example yaml files below are loading from these image.

    • go-kprobe-counter/deployment.yaml
    • go-tc-counter/deployment.yaml
    • go-tracepoint-counter/deployment.yaml
    • go-uprobe-counter/deployment.yaml
    • go-xdp-counter/deployment.yaml

    The userspace program in a Kubernetes Deployment doesn't interacts directly with bpfman like it did in the local host deployment. Instead, the userspace program running on each node, if needed, reads the BpfProgram object from the KubeApiServer to gather additional information about the loaded eBPF program. To interact with the KubeApiServer, RBAC must be setup properly to access the BpfProgram object. The bpfman-operator defined the yaml for several ClusterRoles that can be used to access the different bpfman related CRD objects with different access rights. The example userspace containers will use the bpfprogram-viewer-role, which allows Read-Only access to the BpfProgram object. This ClusterRole is created automatically by the bpfman-operator.

    The remaining objects (NameSpace, ServiceAccount, ClusterRoleBinding and examples DaemonSet) can be created for each program type as follows:

    cd bpfman/\nkubectl create -f examples/config/base/go-xdp-counter/deployment.yaml\n

    This creates the go-xdp-counter userspace pod, but the other examples operate in a similar fashion.

    Following the diagram for the XDP example (Green numbers):

    1. The userspace program queries the KubeApiServer for a specific BpfProgram object.
    2. The userspace program verifies the BpfProgram has been loaded and uses the map to periodically read the counter values.

    To see if the userspace programs are working, view the logs:

    kubectl get pods -A\nNAMESPACE               NAME                              READY   STATUS    RESTARTS   AGE\nbpfman                  bpfman-daemon-jsgdh               3/3     Running   0          11m\nbpfman                  bpfman-operator-6c5c8887f7-qk28x  2/2     Running   0          12m\ngo-xdp-counter          go-xdp-counter-ds-2hs6g           1/1     Running   0          6m12s\n:\n\nkubectl logs -n go-xdp-counter go-xdp-counter-ds-2hs6g\n2023/11/06 20:27:16 2429 packets received\n2023/11/06 20:27:16 1328474 bytes received\n\n2023/11/06 20:27:19 2429 packets received\n2023/11/06 20:27:19 1328474 bytes received\n\n2023/11/06 20:27:22 2430 packets received\n2023/11/06 20:27:22 1328552 bytes received\n:\n

    To cleanup:

    kubectl delete -f examples/config/base/go-xdp-counter/deployment.yaml\nkubectl delete -f examples/config/base/go-xdp-counter/bytecode.yaml\n
    "},{"location":"getting-started/example-bpf-k8s/#automated-deployment","title":"Automated Deployment","text":"

    The steps above are automated in the Makefile in the examples directory. Run make deploy to load each of the example bytecode and userspace yaml files, then make undeploy to unload them.

    cd bpfman/examples/\nmake deploy\n  for target in deploy-tc deploy-tracepoint deploy-xdp deploy-xdp-ms deploy-kprobe deploy-target deploy-uprobe ; do \\\n      make $target  || true; \\\n  done\n  make[1]: Entering directory '/home/<$USER>/go/src/github.com/bpfman/bpfman/examples'\n  sed 's@URL_BC@quay.io/bpfman-bytecode/go-tc-counter:latest@' config/default/go-tc-counter/patch.yaml.env > config/default/go-tc-counter/patch.yaml\n  cd config/default/go-tc-counter && /home/<$USER>/go/src/github.com/bpfman/bpfman/examples/bin/kustomize edit set image quay.io/bpfman-userspace/go-tc-counter=quay.io/bpfman-userspace/go-tc-counter:latest\n  namespace/go-tc-counter created\n  serviceaccount/bpfman-app-go-tc-counter created\n  daemonset.apps/go-tc-counter-ds created\n  tcprogram.bpfman.io/go-tc-counter-example created\n  :\n  sed 's@URL_BC@quay.io/bpfman-bytecode/go-uprobe-counter:latest@' config/default/go-uprobe-counter/patch.yaml.env > config/default/go-uprobe-counter/patch.yaml\n  cd config/default/go-uprobe-counter && /home/<$USER>/go/src/github.com/bpfman/bpfman/examples/bin/kustomize edit set image quay.io/bpfman-userspace/go-uprobe-counter=quay.io/bpfman-userspace/go-uprobe-counter:latest\n  namespace/go-uprobe-counter created\n  serviceaccount/bpfman-app-go-uprobe-counter created\n  daemonset.apps/go-uprobe-counter-ds created\n  uprobeprogram.bpfman.io/go-uprobe-counter-example created\n  make[1]: Leaving directory '/home/<$USER>/go/src/github.com/bpfman/bpfman/examples'\n\n# Test Away ...\n\nkubectl get pods -A\nNAMESPACE               NAME                                                      READY   STATUS    RESTARTS   AGE\nbpfman                  bpfman-daemon-md2c5                                       3/3     Running   0          2d17h\nbpfman                  bpfman-operator-7f67bc7c57-95zf7                          2/2     Running   0          2d17h\ngo-kprobe-counter       go-kprobe-counter-ds-8dkls                                1/1     Running   0          2m14s\ngo-target               go-target-ds-nbdf5                                        1/1     Running   0          2m14s\ngo-tc-counter           go-tc-counter-ds-7mtcw                                    1/1     Running   0          2m19s\ngo-tracepoint-counter   go-tracepoint-counter-ds-bcbs7                            1/1     Running   0          2m18s\ngo-uprobe-counter       go-uprobe-counter-ds-j26hc                                1/1     Running   0          2m13s\ngo-xdp-counter          go-xdp-counter-ds-nls6s                                   1/1     Running   0          2m17s\n\nkubectl get bpfprograms\nNAME                                                                                                TYPE         STATUS         AGE\ngo-kprobe-counter-example-bpfman-deployment-control-plane-try-to-wake-up                            kprobe       bpfmanLoaded   2m41s\ngo-tc-counter-example-bpfman-deployment-control-plane-eth0                                          tc           bpfmanLoaded   2m46s\ngo-tracepoint-counter-example-bpfman-deployment-control-plane-syscalls-sys-enter-kill               tracepoint   bpfmanLoaded   2m35s\ngo-uprobe-counter-example-bpfman-deployment-control-plane--go-target-go-target-ds-nbdf5-go-target   uprobe       bpfmanLoaded   2m29s\ngo-xdp-counter-example-bpfman-deployment-control-plane-eth0                                         xdp          bpfmanLoaded   2m24s\ngo-xdp-counter-sharing-map-example-bpfman-deployment-control-plane-eth0                             xdp          bpfmanLoaded   2m21s\n\nmake undeploy\n  for target in undeploy-tc undeploy-tracepoint undeploy-xdp undeploy-xdp-ms undeploy-kprobe undeploy-uprobe undeploy-target ; do \\\n      make $target  || true; \\\n  done\n  make[1]: Entering directory '/home/<$USER>/go/src/github.com/bpfman/bpfman/examples'\n  sed 's@URL_BC@quay.io/bpfman-bytecode/go-tc-counter:latest@' config/default/go-tc-counter/patch.yaml.env > config/default/go-tc-counter/patch.yaml\n  cd config/default/go-tc-counter && /home/<$USER>/go/src/github.com/bpfman/bpfman/examples/bin/kustomize edit set image quay.io/bpfman-userspace/go-tc-counter=quay.io/bpfman-userspace/go-tc-counter:latest\n  namespace \"go-tc-counter\" deleted\n  serviceaccount \"bpfman-app-go-tc-counter\" deleted\n  daemonset.apps \"go-tc-counter-ds\" deleted\n  tcprogram.bpfman.io \"go-tc-counter-example\" deleted\n  :\n  kubectl delete -f config/base/go-target/deployment.yaml\n  namespace \"go-target\" deleted\n  serviceaccount \"bpfman-app-go-target\" deleted\n  daemonset.apps \"go-target-ds\" deleted\n  make[1]: Leaving directory '/home/<$USER>/go/src/github.com/bpfman/bpfman/examples'\n

    Individual examples can be loaded and unloaded as well, for example make deploy-xdp and make undeploy-xdp. To see the full set of available commands, run make help:

    make help\n\nUsage:\n  make <target>\n  make deploy TAG=v0.2.0\n  make deploy-xdp IMAGE_XDP_US=quay.io/user1/go-xdp-counter-userspace:test\n\nGeneral\n  help             Display this help.\n\nLocal Dependencies\n  kustomize        Download kustomize locally if necessary.\n\nDevelopment\n  fmt              Run go fmt against code.\n  verify           Verify all the autogenerated code\n\nBuild\n  build            Build all the userspace example code.\n  generate         Run `go generate` to build the bytecode for each of the examples.\n  build-us-images  Build all example userspace images\n  build-bc-images  Build bytecode example userspace images\n  push-us-images   Push all example userspace images\n  push-bc-images   Push all example bytecode images\n  load-us-images-kind  Build and load all example userspace images into kind\n\nDeployment Variables (not commands)\n  TAG              Used to set all images to a fixed tag. Example: make deploy TAG=v0.2.0\n  IMAGE_TC_BC      TC Bytecode image. Example: make deploy-tc IMAGE_TC_BC=quay.io/user1/go-tc-counter-bytecode:test\n  IMAGE_TC_US      TC Userspace image. Example: make deploy-tc IMAGE_TC_US=quay.io/user1/go-tc-counter-userspace:test\n  IMAGE_TP_BC      Tracepoint Bytecode image. Example: make deploy-tracepoint IMAGE_TP_BC=quay.io/user1/go-tracepoint-counter-bytecode:test\n  IMAGE_TP_US      Tracepoint Userspace image. Example: make deploy-tracepoint IMAGE_TP_US=quay.io/user1/go-tracepoint-counter-userspace:test\n  IMAGE_XDP_BC     XDP Bytecode image. Example: make deploy-xdp IMAGE_XDP_BC=quay.io/user1/go-xdp-counter-bytecode:test\n  IMAGE_XDP_US     XDP Userspace image. Example: make deploy-xdp IMAGE_XDP_US=quay.io/user1/go-xdp-counter-userspace:test\n  IMAGE_KP_BC      Kprobe Bytecode image. Example: make deploy-kprobe IMAGE_KP_BC=quay.io/user1/go-kprobe-counter-bytecode:test\n  IMAGE_KP_US      Kprobe Userspace image. Example: make deploy-kprobe IMAGE_KP_US=quay.io/user1/go-kprobe-counter-userspace:test\n  IMAGE_UP_BC      Uprobe Bytecode image. Example: make deploy-uprobe IMAGE_UP_BC=quay.io/user1/go-uprobe-counter-bytecode:test\n  IMAGE_UP_US      Uprobe Userspace image. Example: make deploy-uprobe IMAGE_UP_US=quay.io/user1/go-uprobe-counter-userspace:test\n  IMAGE_GT_US      Uprobe Userspace target. Example: make deploy-target IMAGE_GT_US=quay.io/user1/go-target-userspace:test\n  KIND_CLUSTER_NAME  Name of the deployed cluster to load example images to, defaults to `bpfman-deployment`\n  ignore-not-found  For any undeploy command, set to true to ignore resource not found errors during deletion. Example: make undeploy ignore-not-found=true\n\nDeployment\n  deploy-tc        Deploy go-tc-counter to the cluster specified in ~/.kube/config.\n  undeploy-tc      Undeploy go-tc-counter from the cluster specified in ~/.kube/config.\n  deploy-tracepoint  Deploy go-tracepoint-counter to the cluster specified in ~/.kube/config.\n  undeploy-tracepoint  Undeploy go-tracepoint-counter from the cluster specified in ~/.kube/config.\n  deploy-xdp       Deploy go-xdp-counter to the cluster specified in ~/.kube/config.\n  undeploy-xdp     Undeploy go-xdp-counter from the cluster specified in ~/.kube/config.\n  deploy-xdp-ms    Deploy go-xdp-counter-sharing-map (shares map with go-xdp-counter) to the cluster specified in ~/.kube/config.\n  undeploy-xdp-ms  Undeploy go-xdp-counter-sharing-map from the cluster specified in ~/.kube/config.\n  deploy-kprobe    Deploy go-kprobe-counter to the cluster specified in ~/.kube/config.\n  undeploy-kprobe  Undeploy go-kprobe-counter from the cluster specified in ~/.kube/config.\n  deploy-uprobe    Deploy go-uprobe-counter to the cluster specified in ~/.kube/config.\n  undeploy-uprobe  Undeploy go-uprobe-counter from the cluster specified in ~/.kube/config.\n  deploy-target    Deploy go-target to the cluster specified in ~/.kube/config.\n  undeploy-target  Undeploy go-target from the cluster specified in ~/.kube/config.\n  deploy           Deploy all examples to the cluster specified in ~/.kube/config.\n  undeploy         Undeploy all examples to the cluster specified in ~/.kube/config.\n
    "},{"location":"getting-started/example-bpf-k8s/#building-a-userspace-container-image","title":"Building A Userspace Container Image","text":"

    To build the userspace examples in a container instead of using the pre-built ones, from the bpfman examples code source directory, run the following build command:

    cd bpfman/examples\nmake \\\n  IMAGE_KP_US=quay.io/$USER/go-kprobe-counter:latest \\\n  IMAGE_TC_US=quay.io/$USER/go-tc-counter:latest \\\n  IMAGE_TP_US=quay.io/$USER/go-tracepoint-counter:latest \\\n  IMAGE_UP_US=quay.io/$USER/go-uprobe-counter:latest \\\n  IMAGE_XDP_US=quay.io/$USER/go-xdp-counter:latest \\\n  build-us-images\n

    Then EITHER push images to a remote repository:

    docker login quay.io\ncd bpfman/examples\nmake \\\n  IMAGE_KP_US=quay.io/$USER/go-kprobe-counter:latest \\\n  IMAGE_TC_US=quay.io/$USER/go-tc-counter:latest \\\n  IMAGE_TP_US=quay.io/$USER/go-tracepoint-counter:latest \\\n  IMAGE_UP_US=quay.io/$USER/go-uprobe-counter:latest \\\n  IMAGE_XDP_US=quay.io/$USER/go-xdp-counter:latest \\\n  push-us-images\n

    OR load the images directly to a specified kind cluster:

    cd bpfman/examples\nmake \\\n  IMAGE_KP_US=quay.io/$USER/go-kprobe-counter:latest \\\n  IMAGE_TC_US=quay.io/$USER/go-tc-counter:latest \\\n  IMAGE_TP_US=quay.io/$USER/go-tracepoint-counter:latest \\\n  IMAGE_UP_US=quay.io/$USER/go-uprobe-counter:latest \\\n  IMAGE_XDP_US=quay.io/$USER/go-xdp-counter:latest \\\n  KIND_CLUSTER_NAME=bpfman-deployment \\\n  load-us-images-kind\n

    Lastly, update the yaml to use the private images or override the yaml files using the Makefile:

    cd bpfman/examples/\n\nmake deploy-kprobe IMAGE_XDP_US=quay.io/$USER/go-kprobe-counter:latest\nmake undeploy-kprobe\n\nmake deploy-tc IMAGE_TC_US=quay.io/$USER/go-tc-counter:latest\nmake undeploy-tc\n\nmake deploy-tracepoint IMAGE_TP_US=quay.io/$USER/go-tracepoint-counter:latest\nmake undeploy-tracepoint\n\nmake deploy-uprobe IMAGE_XDP_US=quay.io/$USER/go-uprobe-counter:latest\nmake undeploy-uprobe\n\nmake deploy-xdp IMAGE_XDP_US=quay.io/$USER/go-xdp-counter:latest\nmake undeploy-xdp\n
    "},{"location":"getting-started/example-bpf-local/","title":"Deploying Example eBPF Programs On Local Host","text":"

    This section describes running bpfman and the example eBPF programs on a local host.

    "},{"location":"getting-started/example-bpf-local/#example-overview","title":"Example Overview","text":"

    Assume the following command is run:

    cd bpfman/examples/go-xdp-counter/\ngo run -exec sudo . -iface eno3\n

    The diagram below shows go-xdp-counter example, but the other examples operate in a similar fashion.

    Following the diagram (Purple numbers):

    1. When go-xdp-counter userspace is started, it will send a gRPC request over unix socket to bpfman-rpc requesting bpfman to load the go-xdp-counter eBPF bytecode located on disk at bpfman/examples/go-xdp-counter/bpf_x86_bpfel.o at a priority of 50 and on interface eno3. These values are configurable as we will see later, but for now we will use the defaults (except interface, which is required to be entered).
    2. bpfman will load it's dispatcher eBPF program, which links to the go-xdp-counter eBPF program and return a kernel Program ID referencing the running program.
    3. bpfman list can be used to show that the eBPF program was loaded.
    4. Once the go-xdp-counter eBPF bytecode is loaded, the eBPF program will write packet counts and byte counts to a shared map.
    5. go-xdp-counter userspace program periodically reads counters from the shared map and logs the value.

    Below are the steps to run the example program described above and then some additional examples that use the bpfman CLI to load and unload other eBPF programs. See Launching bpfman for more detailed instructions on building and loading bpfman. This tutorial assumes bpfman has been built, bpfman-rpc is running, and the bpfman CLI is in $PATH.

    "},{"location":"getting-started/example-bpf-local/#running-example-programs","title":"Running Example Programs","text":"

    Example eBPF Programs describes how the example programs work, how to build them, and how to run the different examples. Build the go-xdp-counter program before continuing.

    To run the go-xdp-counter program, determine the host interface to attach the eBPF program to and then start the go program. In this example, eno3 will be used, as shown in the diagram at the top of the page. The output should show the count and total bytes of packets as they pass through the interface as shown below:

    cd bpfman/examples/go-xdp-counter/\n\ngo run -exec sudo . --iface eno3\n2023/07/17 17:43:58 Using Input: Interface=eno3 Priority=50 Source=/home/$USER/src/bpfman/examples/go-xdp-counter/bpf_x86_bpfel.o\n2023/07/17 17:43:58 Program registered with id 6211\n2023/07/17 17:44:01 4 packets received\n2023/07/17 17:44:01 580 bytes received\n\n2023/07/17 17:44:04 4 packets received\n2023/07/17 17:44:04 580 bytes received\n\n2023/07/17 17:44:07 8 packets received\n2023/07/17 17:44:07 1160 bytes received\n\n:\n

    In another terminal, use the CLI to show the go-xdp-counter eBPF bytecode was loaded.

    sudo bpfman list\n Program ID  Name       Type  Load Time\n 6211        xdp_stats  xdp   2023-07-17T17:43:58-0400\n

    Finally, press <CTRL>+c when finished with go-xdp-counter.

    :\n\n2023/07/17 17:44:34 28 packets received\n2023/07/17 17:44:34 4060 bytes received\n\n^C2023/07/17 17:44:35 Exiting...\n2023/07/17 17:44:35 Unloading Program: 6211\n
    "},{"location":"getting-started/example-bpf-local/#using-cli-to-manage-ebpf-programs","title":"Using CLI to Manage eBPF Programs","text":"

    bpfman provides a CLI to interact with the bpfman Library. Find a deeper dive into CLI syntax in CLI Guide. We will load the simple xdp-pass program, which allows all traffic to pass through the attached interface, eno3 in this example. The source code, xdp_pass.bpf.c, is located in the integration-test directory and there is also a prebuilt image: quay.io/bpfman-bytecode/xdp_pass:latest.

    sudo bpfman load image --image-url quay.io/bpfman-bytecode/xdp_pass:latest xdp --iface eno3 --priority 100\n Bpfman State\n---------------\n Name:          pass\n Image URL:     quay.io/bpfman-bytecode/xdp_pass:latest\n Pull Policy:   IfNotPresent\n Global:        None\n Metadata:      None\n Map Pin Path:  /run/bpfman/fs/maps/6213\n Map Owner ID:  None\n Map Used By:   6213\n Priority:      100\n Iface:         eno3\n Position:      0\n Proceed On:    pass, dispatcher_return\n\n Kernel State\n----------------------------------\n Program ID:                       6213\n Name:                             pass\n Type:                             xdp\n Loaded At:                        2023-07-17T17:48:10-0400\n Tag:                              4b9d1b2c140e87ce\n GPL Compatible:                   true\n Map IDs:                          [2724]\n BTF ID:                           2834\n Size Translated (bytes):          96\n JITed:                            true\n Size JITed (bytes):               67\n Kernel Allocated Memory (bytes):  4096\n Verified Instruction Count:       9\n

    bpfman load image returns the same data as the bpfman get command. From the output, the Program Id of 6213 can be found in the Kernel State section. The Program Id can be used to perform a bpfman get to retrieve all relevant program data and a bpfman unload when the program needs to be unloaded.

    sudo bpfman list\n Program ID  Name  Type  Load Time\n 6213        pass  xdp   2023-07-17T17:48:10-0400\n

    We can recheck the details about the loaded program with the bpfman get command:

    sudo bpfman get 6213\n Bpfman State\n---------------\n Name:          pass\n Image URL:     quay.io/bpfman-bytecode/xdp_pass:latest\n Pull Policy:   IfNotPresent\n Global:        None\n Metadata:      None\n Map Pin Path:  /run/bpfman/fs/maps/6213\n Map Owner ID:  None\n Map Used By:   6213\n Priority:      100\n Iface:         eno3\n Position:      0\n Proceed On:    pass, dispatcher_return\n\n Kernel State\n----------------------------------\n Program ID:                       6213\n Name:                             pass\n Type:                             xdp\n Loaded At:                        2023-07-17T17:48:10-0400\n Tag:                              4b9d1b2c140e87ce\n GPL Compatible:                   true\n Map IDs:                          [2724]\n BTF ID:                           2834\n Size Translated (bytes):          96\n JITed:                            true\n Size JITed (bytes):               67\n Kernel Allocated Memory (bytes):  4096\n Verified Instruction Count:       9\n

    Then unload the program:

    sudo bpfman unload 6213\n
    "},{"location":"getting-started/example-bpf/","title":"Example eBPF Programs","text":"

    Example applications that use the bpfman-go bindings can be found in the bpfman/examples/ directory. Current examples include:

    • bpfman/examples/go-app-counter/
    • bpfman/examples/go-kprobe-counter/
    • bpfman/examples/go-target/
    • bpfman/examples/go-tc-counter/
    • bpfman/examples/go-tcx-counter/
    • bpfman/examples/go-tracepoint-counter/
    • bpfman/examples/go-uprobe-counter/
    • bpfman/examples/go-uretprobe-counter/
    • bpfman/examples/go-xdp-counter/
    "},{"location":"getting-started/example-bpf/#example-code-breakdown","title":"Example Code Breakdown","text":"

    These examples and the associated documentation are intended to provide the basics on how to deploy and manage an eBPF program using bpfman. Each of the examples contains an eBPF Program(s) written in C (app_counter.c, kprobe_counter.c, tc_counter.c, tcx_counter.c, tracepoint_counter.c, uprobe_counter.c, uretprobe_counter.c, and xdp_counter.c) that is compiled into eBPF bytecode for each supported architecture (bpf_arm64_bpfel.o, bpf_powerpc_bpfel.o, bpf_s390_bpfeb.o and bpf_x86_bpfel.o). Each time the eBPF program is called, it increments the packet and byte counts in a map that is accessible by the userspace portion.

    Each of the examples also have a userspace portion written in GO. The userspace code is leveraging the cilium/ebpf library to manage the maps shared with the eBPF program. The example eBPF programs are very similar in functionality, and only vary where in the Linux networking stack they are inserted. The userspace program then polls the eBPF map every 3 seconds and logs the current counts.

    The examples were written to either run locally on a host or run in a container in a Kubernetes deployment. The userspace code flow is slightly different depending on the deployment, so input parameters dictate the deployment method.

    "},{"location":"getting-started/example-bpf/#examples-in-local-deployment","title":"Examples in Local Deployment","text":"

    When run locally, the userspace program makes gRPC calls to bpfman-rpc requesting bpfman to load the eBPF program at the requested hook point (TC hook point, Tracepoint, XDP hook point, etc). Data sent in the RPC request is either defaulted or passed in via input parameters. To make the examples as simple as possible to run, all input data is defaulted (except the interface TC and XDP programs need to attach to) but can be overwritten if desired. All example programs have the following common parameters (kprobe does not have any command specific parameters):

    cd bpfman/examples/go-kprobe-counter/\n\n./go-kprobe-counter --help\nUsage of ./go-kprobe-counter:\n  -crd\n      Flag to indicate all attributes should be pulled from the BpfProgram CRD.\n      Used in Kubernetes deployments and is mutually exclusive with all other\n      parameters.\n  -file string\n      File path of bytecode source. \"file\" and \"image\"/\"id\" are mutually exclusive.\n      Example: -file /home/$USER/src/bpfman/examples/go-kprobe-counter/bpf_x86_bpfel.o\n  -id uint\n      Optional Program ID of bytecode that has already been loaded. \"id\" and\n      \"file\"/\"image\" are mutually exclusive.\n      Example: -id 28341\n  -image string\n      Image repository URL of bytecode source. \"image\" and \"file\"/\"id\" are\n      mutually exclusive.\n      Example: -image quay.io/bpfman-bytecode/go-kprobe-counter:latest\n  -map_owner_id int\n      Program Id of loaded eBPF program this eBPF program will share a map with.\n      Example: -map_owner_id 9785\n

    The location of the eBPF bytecode can be provided four different ways:

    • Defaulted: If nothing is passed in, the code scans the local directory for a bpf_x86_bpfel.o file. If found, that is used. If not, it errors out.
    • file: Fully qualified path of the bytecode object file.
    • image: Image repository URL of bytecode source.
    • id: Kernel program Id of a bytecode that has already been loaded. This program could have been loaded using bpftool, or bpfman.

    If two userspace programs need to share the same map, map_owner_id is the Program ID of the first loaded program that has the map the second program wants to share.

    The examples require sudo to run because they require access the Unix socket bpfman-rpc is listening on. Deploying Example eBPF Programs On Local Host steps through launching bpfman locally and running some of the examples.

    "},{"location":"getting-started/example-bpf/#examples-in-kubernetes-deployment","title":"Examples in Kubernetes Deployment","text":"

    When run in a Kubernetes deployment, all the input data is passed to Kubernetes through yaml files. To indicate to the userspace code that it is in a Kubernetes deployment and not to try to load the eBPF bytecode, the example is launched in the container with the crd flag. Example: ./go-kprobe-counter -crd

    For these examples, the bytecode is loaded via one yaml file which creates a *Program CRD Object (KprobeProgram, TcProgram, TracepointProgram, etc.) and the userspace pod is loaded via another yaml file. In a more realistic deployment, the userspace pod may have the logic to send the *Program CRD Object create request to the KubeAPI Server, but the two yaml files are load manually for simplicity in the example code. The examples directory contain yaml files to load each example, leveraging Kustomize to modify the yaml to load the latest images from Quay.io, to load custom images or released based images. It is recommended to use the commands built into the Makefile, which run kustomize, to apply and remove the yaml files to a Kubernetes cluster. Use make help to see all the make options. For example:

    cd bpfman/examples/\n\n# Deploy then undeploy all the examples\nmake deploy\nmake undeploy\n\nOR\n\n# Deploy then undeploy just the TC example\nmake deploy-tc\nmake undeploy-tc\n

    Deploying Example eBPF Programs On Kubernetes steps through deploying bpfman to multiple nodes in a Kubernetes cluster and loading the examples.

    "},{"location":"getting-started/example-bpf/#building-example-code","title":"Building Example Code","text":"

    All the examples can be built locally as well as packaged in a container for Kubernetes deployment.

    "},{"location":"getting-started/example-bpf/#building-locally","title":"Building Locally","text":"

    To build directly on a system, make sure all the prerequisites are met, then build.

    "},{"location":"getting-started/example-bpf/#prerequisites","title":"Prerequisites","text":"

    This assumes bpfman is already installed and running on the system. If not, see Setup and Building bpfman.

    1. All requirements defined by the cilium/ebpf package
    2. libbpf development package to get the required eBPF c headers

      Fedora: sudo dnf install libbpf-devel

      Ubuntu: sudo apt-get install libbpf-dev

    "},{"location":"getting-started/example-bpf/#build","title":"Build","text":"

    To build all the C based eBPF counter bytecode, run:

    cd bpfman/examples/\nmake generate\n

    To build all the Userspace GO Client examples, run:

    cd bpfman/examples/\nmake build\n

    To build only a single example:

    cd bpfman/examples/go-tracepoint-counter/\ngo generate\ngo build\n

    Other program types are the same.

    "},{"location":"getting-started/example-bpf/#building-ebpf-bytecode-container-image","title":"Building eBPF Bytecode Container Image","text":"

    eBPF Bytecode Image Specifications provides detailed instructions on building and shipping bytecode in a container image. Pre-built eBPF container images for the examples can be loaded from:

    • quay.io/bpfman-bytecode/go-app-counter:latest
    • quay.io/bpfman-bytecode/go-kprobe-counter:latest
    • quay.io/bpfman-bytecode/go-tc-counter:latest
    • quay.io/bpfman-bytecode/go-tcx-counter:latest
    • quay.io/bpfman-bytecode/go-tracepoint-counter:latest
    • quay.io/bpfman-bytecode/go-uprobe-counter:latest
    • quay.io/bpfman-bytecode/go-uretprobe-counter:latest
    • quay.io/bpfman-bytecode/go-xdp-counter:latest

    To build the example eBPF bytecode container images, first generate the bytecode (the generate commands require the Prerequisites described above in the Building Locally section).

    To generate the bytecode for all the examples:

    cd bpfman/examples/\nmake generate\n

    OR to generate the bytecode for a single example (XDP in this case):

    cd bpfman/examples/go-xdp-counter/\ngo generate\n

    The preferred method for building the container image is to use the bpfman image build command. See bpfman image build in the CLI Guide for more details.

    cd bpfman/examples/go-xdp-counter/\nbpfman image build -f ../../Containerfile.bytecode -t quay.io/$QUAY_USER/go-xdp-counter-bytecode:test -b bpf_x86_bpfel.o\n

    The examples Makefile has commands to build all the example images if needed. See Locally Build Example Container Images for more details.

    bpfman currently does not provide a method for pre-loading bytecode images (see issue #603), so push the bytecode image to an image repository.

    For example:

    docker login quay.io\ndocker push quay.io/$QUAY_USER/go-xdp-counter-bytecode:test\n
    "},{"location":"getting-started/example-bpf/#running-examples","title":"Running Examples","text":"

    Below are some examples of how to run the bpfman examples on a host where bpfman is already installed.

    cd bpfman/examples/go-xdp-counter/\nsudo ./go-xdp-counter -iface <INTERNET INTERFACE NAME>\n

    or (NOTE: TC programs also require a direction, ingress or egress)

    cd bpfman/examples/go-tc-counter/\nsudo ./go-tc-counter -direction ingress -iface <INTERNET INTERFACE NAME>\n

    or

    cd bpfman/examples/go-tracepoint-counter/\nsudo ./go-tracepoint-counter\n

    bpfman can load eBPF bytecode from a container image built following the spec described in eBPF Bytecode Image Specifications.

    To use the container image, pass the URL to the userspace program:

    sudo ./go-xdp-counter -iface eno3 -image quay.io/bpfman-bytecode/go-xdp-counter:latest\n2022/12/02 16:28:32 Using Input: Interface=eno3 Priority=50 Source=quay.io/bpfman-bytecode/go-xdp-counter:latest\n2022/12/02 16:28:34 Program registered with id 6223\n2022/12/02 16:28:37 4 packets received\n2022/12/02 16:28:37 580 bytes received\n\n2022/12/02 16:28:40 4 packets received\n2022/12/02 16:28:40 580 bytes received\n\n^C2022/12/02 16:28:42 Exiting...\n2022/12/02 16:28:42 Unloading Program: 6223\n

    Or to run with the privately built bytecode container image:

    sudo ./go-xdp-counter -iface eno3 -image quay.io/$QUAY_USER/go-xdp-counter-bytecode:test\n2022/12/02 16:38:44 Using Input: Interface=eno3 Priority=50 Source=quay.io/$QUAY_USER/go-xdp-counter-bytecode:test\n2022/12/02 16:38:45 Program registered with id 6225\n2022/12/02 16:38:48 4 packets received\n2022/12/02 16:38:48 580 bytes received\n\n2022/12/02 16:38:51 4 packets received\n2022/12/02 16:38:51 580 bytes received\n\n^C2022/12/02 16:38:51 Exiting...\n2022/12/02 16:38:51 Unloading Program: 6225\n
    "},{"location":"getting-started/launching-bpfman/","title":"Launching bpfman","text":"

    The most basic way to deploy bpfman is to run it directly on a host system. First bpfman needs to be built and then started.

    "},{"location":"getting-started/launching-bpfman/#build-bpfman","title":"Build bpfman","text":"

    Perform the following steps to build bpfman. If this is your first time using bpfman, follow the instructions in Setup and Building bpfman to setup the prerequisites for building. To avoid installing the dependencies and having to build bpfman, consider running bpfman from a packaged release (see Run bpfman From Release Image) or installing the bpfman RPM (see Run bpfman From RPM).

    cd bpfman/\ncargo build\n
    "},{"location":"getting-started/launching-bpfman/#install-and-start-bpfman","title":"Install and Start bpfman","text":"

    Run the following command to copy the bpfman CLI and bpfman-rpc binaries to /usr/sbin/ and copy bpfman.socket and bpfman.service files to /usr/lib/systemd/system/. This option will also enable and start the systemd services:

    cd bpfman/\nsudo ./scripts/setup.sh install\n

    bpfman CLI is now in $PATH and can be used to load, view and unload eBPF programs.

    sudo bpfman load image --image-url quay.io/bpfman-bytecode/xdp_pass:latest --name pass xdp --iface eno3 --priority 100\n\nsudo bpfman list\n Program ID  Name  Type  Load Time                \n 53885       pass  xdp   2024-08-26T17:41:36-0400 \n\nsudo bpfman unload 53885\n

    bpfman CLI is a Rust program that calls the bpfman library directly. To view logs while running bpfman CLI commands, prepend RUST_LOG=info to each command (see Logging for more details):

    sudo RUST_LOG=info bpfman list\n[INFO  bpfman::utils] Has CAP_BPF: true\n[INFO  bpfman::utils] Has CAP_SYS_ADMIN: true\n Program ID  Name  Type  Load Time \n

    The examples (see Deploying Example eBPF Programs On Local Host) are Go based programs, so they are building and sending RPC messages to the rust based binary bpfman-rpc, which in turn calls the bpfman library.

    cd bpfman/examples/go-xdp-counter/\ngo run -exec sudo . -iface eno3\n

    To view bpfman logs for RPC based applications, including all the provided examples, use journalctl:

    sudo journalctl -f -u bpfman.service -u bpfman.socket\n:\n  <RUN \"go run -exec sudo . -iface eno3\">\nAug 26 18:03:54 server-calvin bpfman-rpc[2401725]: Using a Unix socket from systemd\nAug 26 18:03:54 server-calvin bpfman-rpc[2401725]: Using inactivity timer of 15 seconds\nAug 26 18:03:54 server-calvin bpfman-rpc[2401725]: Listening on /run/bpfman-sock/bpfman.sock\nAug 26 18:03:54 server-calvin bpfman-rpc[2401725]: Has CAP_BPF: true\nAug 26 18:03:54 server-calvin bpfman-rpc[2401725]: Has CAP_SYS_ADMIN: true\nAug 26 18:03:54 server-calvin bpfman-rpc[2401725]: Starting Cosign Verifier, downloading data from Sigstore TUF repository\nAug 26 18:03:55 server-calvin bpfman-rpc[2401725]: Loading program bytecode from file: /home/$USER/src/bpfman/bpfman/examples/go-xdp-counter/bpf_x86_bpfel.o\nAug 26 18:03:57 server-calvin bpfman-rpc[2401725]: The bytecode image: quay.io/bpfman/xdp-dispatcher:latest is signed\nAug 26 18:03:57 server-calvin bpfman-rpc[2401725]: Added xdp program with name: xdp_stats and id: 53919\nAug 26 18:04:09 server-calvin bpfman-rpc[2401725]: Shutdown Unix Handler /run/bpfman-sock/bpfman.sock```\n
    "},{"location":"getting-started/launching-bpfman/#additional-notes","title":"Additional Notes","text":"

    To update the configuration settings associated with running bpfman as a service, edit the service configuration files:

    sudo vi /usr/lib/systemd/system/bpfman.socket\nsudo vi /usr/lib/systemd/system/bpfman.service\nsudo systemctl daemon-reload\n

    If bpfman CLI or bpfman-rpc is rebuilt, the following command can be run to install the update binaries without tearing down bpfman. The services are automatically restarted.

    sudo ./scripts/setup.sh reinstall\n

    To unwind all the changes, stop bpfman and remove all related files from the system, run the following script:

    sudo ./scripts/setup.sh uninstall\n
    "},{"location":"getting-started/launching-bpfman/#preferred-method-to-start-bpfman","title":"Preferred Method to Start bpfman","text":"

    In order to call into the bpfman Library, the calling process must be privileged. In order to load and unload eBPF, the kernel requires a set of powerful capabilities. Long lived privileged processes are more vulnerable to attack than short lived processes. When bpfman-rpc is run as a systemd service, it is leveraging socket activation. This means that it loads a bpfman.socket and bpfman.service file. The socket service is the long lived process, which doesn't have any special permissions. The service that runs bpfman-rpc is only started when there is a request on the socket, and then bpfman-rpc stops itself after an inactivity timeout.

    Note

    For security reasons, it is recommended to run bpfman-rpc as a systemd service when running on a local host. For local development, some may find it useful to run bpfman-rpc as a long lived process.

    When run as a systemd service, the set of linux capabilities are limited to only the required set. If permission errors are encountered, see Linux Capabilities for help debugging.

    "},{"location":"getting-started/operator-quick-start/","title":"Deploying the bpfman-operator","text":"

    The bpfman-operator repository exists in order to deploy and manage bpfman within a Kubernetes cluster. This operator was built utilizing some great tooling provided by the operator-sdk library. A great first step in understanding some of the functionality can be to just run make help.

    "},{"location":"getting-started/operator-quick-start/#deploy-bpfman-operation","title":"Deploy bpfman Operation","text":"

    The bpfman-operator is running as a Deployment with a ReplicaSet of one. It runs on the control plane and is composed of the containers bpfman-operator and kube-rbac-proxy. The operator is responsible for launching the bpfman Daemonset, which runs on every node. The bpfman Daemonset is composed of the containers bpfman, bpfman-agent, and node-driver-registrar.

    Described below are two ways to deploy bpfman in a Kubernetes cluster:

    • Deploy Locally via KIND: Easiest way to deploy bpfman in a Kubernetes cluster and great for testing.
    • Deploy To Openshift Cluster: Special steps are needed to deploy on an Openshift cluster because SELinux is enable.
    "},{"location":"getting-started/operator-quick-start/#deploy-locally-via-kind","title":"Deploy Locally via KIND","text":"

    After reviewing the possible make targets it's quick and easy to get bpfman deployed locally on your system via a KIND cluster with:

    cd bpfman/bpfman-operator\nmake run-on-kind\n

    Note

    By default, bpfman-operator deploys bpfman with CSI enabled. CSI requires Kubernetes v1.26 due to a PR (kubernetes/kubernetes#112597) that addresses a gRPC Protocol Error that was seen in the CSI client code and it doesn't appear to have been backported. It is recommended to install kind v0.20.0 or later.

    "},{"location":"getting-started/operator-quick-start/#deploy-to-openshift-cluster","title":"Deploy To Openshift Cluster","text":"

    First deploy the operator with one of the following two options:

    "},{"location":"getting-started/operator-quick-start/#1-manually-with-kustomize","title":"1. Manually with Kustomize","text":"

    To install manually with Kustomize and raw manifests simply run the following commands. The Openshift cluster needs to be up and running and specified in ~/.kube/config file.

    cd bpfman/bpfman-operator\nmake deploy-openshift\n

    Which can then be cleaned up at a later time with:

    make undeploy-openshift\n
    "},{"location":"getting-started/operator-quick-start/#2-via-the-olm-bundle","title":"2. Via the OLM bundle","text":"

    The other option for installing the bpfman-operator is to install it using OLM bundle.

    First setup the namespace and certificates for the operator with:

    cd bpfman/bpfman-operator\noc apply -f ./hack/ocp-scc-hacks.yaml\n

    Then use operator-sdk to install the bundle like so:

    operator-sdk run bundle quay.io/bpfman/bpfman-operator-bundle:latest --namespace openshift-bpfman\n

    Which can then be cleaned up at a later time with:

    operator-sdk cleanup bpfman-operator\n

    followed by

    oc delete -f ./hack/ocp-scc-hacks.yaml\n
    "},{"location":"getting-started/operator-quick-start/#verify-the-installation","title":"Verify the Installation","text":"

    Independent of the method used to deploy, if the bpfman-operator came up successfully you will see the bpfman-daemon and bpfman-operator pods running without errors:

    $ kubectl get pods -n bpfman\nNAME                             READY   STATUS    RESTARTS   AGE\nbpfman-daemon-w24pr                3/3     Running   0          130m\nbpfman-operator-78cf9c44c6-rv7f2   2/2     Running   0          132m\n
    "},{"location":"getting-started/operator-quick-start/#api-types-overview","title":"API Types Overview","text":"

    Refer to api-spec.md for a more detailed description of all the bpfman Kubernetes API types.

    "},{"location":"getting-started/operator-quick-start/#cluster-scoped-versus-namespaced-scoped-crds","title":"Cluster Scoped Versus Namespaced Scoped CRDs","text":"

    For security reasons, cluster admins may want to limit certain applications to only loading eBPF programs within a given namespace. To provide these tighter controls on eBPF program loading, some of the bpfman Custom Resource Definitions (CRDs) are Namespace scoped. Not all eBPF programs make sense to be namespaced scoped. The namespaced scoped CRDs use the \"<ProgramType>NsProgram\" identifier and cluster scoped CRDs to use \"<ProgramType>Program\" identifier.

    "},{"location":"getting-started/operator-quick-start/#multiple-program-crds","title":"Multiple Program CRDs","text":"

    The multiple *Program CRDs are the bpfman Kubernetes API objects most relevant to users and can be used to understand clusterwide state for an eBPF program. It's designed to express how, and where eBPF programs are to be deployed within a Kubernetes cluster. Currently bpfman supports:

    • fentryProgram
    • fexitProgram
    • kprobeProgram
    • tcProgram and tcNsProgram
    • tcxProgram and tcxNsProgram
    • tracepointProgram
    • uprobeProgram and uprobeNsProgam
    • xdpProgram and xdpNsProgram

    There is also the bpfApplication and bpfNsApplication CRDs, which are designed for managing eBPF programs at an application level within a Kubernetes cluster. These CRD allows Kubernetes users to define which eBPF programs are essential for an application's operations and specify how these programs should be deployed across the cluster. With cluster scoped variant (bpfApplication), any variation of the cluster scoped eBPF programs can be loaded. With namespace scoped variant (bpfNsApplication), any variation of the namespace scoped eBPF programs can be loaded.

    "},{"location":"getting-started/operator-quick-start/#bpfprogram-and-bpfnsprogram-crd","title":"BpfProgram and BpfNsProgram CRD","text":"

    The BpfProgram and BpfNsProgram CRDs are used internally by the bpfman-deployment to keep track of per node bpfman state such as map pin points, and to report node specific errors back to the user. Kubernetes users/controllers are only allowed to view these objects, NOT create or edit them.

    Applications wishing to use bpfman to deploy/manage their eBPF programs in Kubernetes will make use of this object to find references to the bpfMap pin points (spec.maps) in order to configure their eBPF programs.

    "},{"location":"getting-started/operator-quick-start/#deploy-an-ebpf-program-to-the-cluster","title":"Deploy an eBPF Program to the cluster","text":"

    There are sample yamls for each of the support program type in the bpfman-operator/config/samples directory.

    "},{"location":"getting-started/operator-quick-start/#deploy-cluster-scoped-sample","title":"Deploy Cluster Scoped Sample","text":"

    Any of the cluster scoped samples can be applied as is. To test the deployment simply deploy one of the sample xdpPrograms:

    cd bpfman/bpfman-operator/\nkubectl apply -f config/samples/bpfman.io_v1alpha1_xdp_pass_xdpprogram.yaml\n

    If loading of the XDP Program to the selected nodes was successful it will be reported back to the user via the xdpProgram's status field:

    $ kubectl get xdpprogram xdp-pass-all-nodes -o yaml\napiVersion: bpfman.io/v1alpha1\nkind: XdpProgram\nmetadata:\n  annotations:\n    kubectl.kubernetes.io/last-applied-configuration: |\n      {\"apiVersion\":\"bpfman.io/v1alpha1\",\"kind\":\"XdpProgram\",\"metadata\":{\"annotations\":{},\"labels\":{\"app.kubernetes.io/name\":\"xdpprogram\"},\"name\":\"xdp-pass-all-nodes\"},\"spec\":{\"bpffunctionname\":\"pass\",\"bytecode\":{\"image\":{\"url\":\"quay.io/bpfman-bytecode/xdp_pass:latest\"}},\"globaldata\":{\"GLOBAL_u32\":[13,12,11,10],\"GLOBAL_u8\":[1]},\"interfaceselector\":{\"primarynodeinterface\":true},\"nodeselector\":{},\"priority\":0}}\n  creationTimestamp: \"2023-11-07T19:16:39Z\"\n  finalizers:\n  - bpfman.io.operator/finalizer\n  generation: 2\n  labels:\n    app.kubernetes.io/name: xdpprogram\n  name: xdp-pass-all-nodes\n  resourceVersion: \"157187\"\n  uid: 21c71a61-4e73-44eb-9b49-07af2866d25b\nspec:\n  bpffunctionname: pass\n  bytecode:\n    image:\n      imagepullpolicy: IfNotPresent\n      url: quay.io/bpfman-bytecode/xdp_pass:latest\n  globaldata:\n    GLOBAL_u8: AQ==\n    GLOBAL_u32: DQwLCg==\n  interfaceselector:\n    primarynodeinterface: true\n  mapownerselector: {}\n  nodeselector: {}\n  priority: 0\n  proceedon:\n  - pass\n  - dispatcher_return\nstatus:\n  conditions:\n  - lastTransitionTime: \"2023-11-07T19:16:42Z\"\n    message: bpfProgramReconciliation Succeeded on all nodes\n    reason: ReconcileSuccess\n    status: \"True\"\n    type: ReconcileSuccess\n

    To see information in listing form simply run:

    $ kubectl get xdpprogram -o wide\nNAME                 BPFFUNCTIONNAME   NODESELECTOR   PRIORITY   INTERFACESELECTOR               PROCEEDON\nxdp-pass-all-nodes   pass              {}             0          {\"primarynodeinterface\":true}   [\"pass\",\"dispatcher_return\"]\n

    To view each attachment point on each node, use the bpfProgram object:

    $ kubectl get bpfprograms\nNAME                          TYPE   STATUS         AGE\nxdp-pass-all-nodes-f3def00d   xdp    bpfmanLoaded   56s\n\n\n$ kubectl get bpfprograms xdp-pass-all-nodes-f3def00d -o yaml\napiVersion: bpfman.io/v1alpha1\nkind: BpfProgram\nmetadata:\n  annotations:\n    bpfman.io.xdpprogramcontroller/interface: eth0\n    bpfman.io/ProgramId: \"26577\"\n    bpfman.io/bpfProgramAttachPoint: eth0\n  creationTimestamp: \"2024-12-18T22:26:55Z\"\n  finalizers:\n  - bpfman.io.xdpprogramcontroller/finalizer\n  generation: 1\n  labels:\n    bpfman.io/appProgramId: \"\"\n    bpfman.io/ownedByProgram: xdp-pass-all-nodes\n    kubernetes.io/hostname: bpfman-deployment-control-plane\n  name: xdp-pass-all-nodes-f3def00d\n  ownerReferences:\n  - apiVersion: bpfman.io/v1alpha1\n    blockOwnerDeletion: true\n    controller: true\n    kind: XdpProgram\n    name: xdp-pass-all-nodes\n    uid: 7685a5b6-a626-4483-8c20-06b29643a2a8\n  resourceVersion: \"8430\"\n  uid: 83c5a80d-2dca-46ce-806b-6fdf7bde901f\nspec:\n  type: xdp\nstatus:\n  conditions:\n  - lastTransitionTime: \"2024-12-18T22:27:11Z\"\n    message: Successfully loaded bpfProgram\n    reason: bpfmanLoaded\n    status: \"True\"\n    type: Loaded\n
    "},{"location":"getting-started/operator-quick-start/#deploy-namespace-scoped-sample","title":"Deploy Namespace Scoped Sample","text":"

    The namespace scoped samples need a namespace and pods to attach to. A yaml has been created that will create a Namespace called \"acme\". (bpfman-operator/hack/namespace_scoped.yaml). The reason for namespace scoped CRDs is to limit an application or user to a namespace. To this end, this yaml also creates a ServiceAccount, Role, RoleBinding and Secret.

    cd bpfman-operator\nkubectl apply -f hack/namespace_scoped.yaml \n  namespace/acme created\n  serviceaccount/test-account created\n  role.rbac.authorization.k8s.io/test-account created\n  rolebinding.rbac.authorization.k8s.io/test-account created\n  secret/test-account-token created\n

    To create a kubeconfig file that limits access to the created namespace, use the script bpfman-operator/hack/namespace_scoped.sh. The script needs to know the name of the Cluster, Namespace, Service Account and Secret. The script defaults these fields to what is currently in bpfman-operator/hack/namespace_scoped.yaml. However, if a file is passed to the script, it will look for the Secret object and attempt to extract the values. This can be used if the names are changed or a different yaml file is used. The output of the script is the contents of a kubeconfig. This can be printed to the console or redirected to a file.

    ./hack/namespace_scoped.sh hack/namespace_scoped.yaml > /tmp/kubeconfig \n

    To use the kubeconfig file, select the session to limit access in and run:

    export KUBECONFIG=/tmp/kubeconfig\n

    From within this limited access session, a sample nginx deployment can be created in the same namespace using bpfman-operator/hack/namespace_scoped.yaml.

    kubectl apply -f hack/nginx-deployment.yaml\n  deployment.apps/nginx-deployment created\n

    Finally, load any of the namespaced samples from bpfman-operator/config/samples. They are of the format: bpfman.io_v1alpha1_*nsprogram.yaml

    kubectl apply -f config/samples/bpfman.io_v1alpha1_tc_pass_tcnsprogram.yaml \n  tcnsprogram.bpfman.io/tc-containers created\n

    The status for each namespaced program is reported via the *NsProgram status field and further information can be seen in the resulting BpfNsProgram CRDs. As an example, the following commands display the information of the TC program loaded in the acme namespace with the command above.

    $ kubectl get tcnsprograms\nNAME            BPFFUNCTIONNAME   NODESELECTOR   STATUS\ntc-containers   pass              {}             ReconcileSuccess\n\n\n$ kubectl get tcnsprograms tc-containers -o yaml\napiVersion: bpfman.io/v1alpha1\nkind: TcNsProgram\nmetadata:\n  annotations:\n    kubectl.kubernetes.io/last-applied-configuration: |\n      {\"apiVersion\":\"bpfman.io/v1alpha1\",\"kind\":\"TcNsProgram\",\"metadata\":{\"annotations\":{},\"labels\":{\"app.kubernetes.io/name\":\"tcnsprogram\"},\"name\":\"tc-containers\",\"namespace\":\"acme\"},\"spec\":{\"bpffunctionname\":\"pass\",\"bytecode\":{\"image\":{\"url\":\"quay.io/bpfman-bytecode/tc_pass:latest\"}},\"containers\":{\"containernames\":[\"nginx\"],\"pods\":{\"matchLabels\":{\"app\":\"nginx\"}}},\"direction\":\"ingress\",\"globaldata\":{\"GLOBAL_u32\":[13,12,11,10],\"GLOBAL_u8\":[1]},\"interfaceselector\":{\"interfaces\":[\"eth0\"]},\"nodeselector\":{},\"priority\":0}}\n  creationTimestamp: \"2024-12-18T22:22:52Z\"\n  finalizers:\n  - bpfman.io.operator/finalizer\n  generation: 2\n  labels:\n    app.kubernetes.io/name: tcnsprogram\n  name: tc-containers\n  namespace: acme\n  resourceVersion: \"7993\"\n  uid: 49291f28-49dc-4486-9119-af7c31569de3\nspec:\n  bpffunctionname: pass\n  bytecode:\n    image:\n      imagepullpolicy: IfNotPresent\n      url: quay.io/bpfman-bytecode/tc_pass:latest\n  containers:\n    containernames:\n    - nginx\n    pods:\n      matchLabels:\n        app: nginx\n  direction: ingress\n  globaldata:\n    GLOBAL_u8: AQ==\n    GLOBAL_u32: DQwLCg==\n  interfaceselector:\n    interfaces:\n    - eth0\n  mapownerselector: {}\n  nodeselector: {}\n  priority: 0\n  proceedon:\n  - pipe\n  - dispatcher_return\nstatus:\n  conditions:\n  - lastTransitionTime: \"2024-12-18T22:23:11Z\"\n    message: bpfProgramReconciliation Succeeded on all nodes\n    reason: ReconcileSuccess\n    status: \"True\"\n    type: ReconcileSuccess\n

    To view each attachment point on each node, use the bpfNsProgram object:

    $ kubectl get bpfnsprograms\nNAME                     TYPE   STATUS         AGE\ntc-containers-6494dbed   tc     bpfmanLoaded   12m\ntc-containers-7dcde5ab   tc     bpfmanLoaded   12m\n\n\n$ kubectl get bpfnsprograms tc-containers-6494dbed -o yaml\napiVersion: bpfman.io/v1alpha1\nkind: BpfNsProgram\nmetadata:\n  annotations:\n    bpfman.io.tcnsprogramcontroller/containerpid: \"3256\"\n    bpfman.io.tcnsprogramcontroller/interface: eth0\n    bpfman.io/ProgramId: \"26575\"\n    bpfman.io/bpfProgramAttachPoint: eth0-ingress-nginx-deployment-57d84f57dc-lgc6f-nginx\n  creationTimestamp: \"2024-12-18T22:23:08Z\"\n  finalizers:\n  - bpfman.io.tcnsprogramcontroller/finalizer\n  generation: 1\n  labels:\n    bpfman.io/appProgramId: \"\"\n    bpfman.io/ownedByProgram: tc-containers\n    kubernetes.io/hostname: bpfman-deployment-control-plane\n  name: tc-containers-6494dbed\n  namespace: acme\n  ownerReferences:\n  - apiVersion: bpfman.io/v1alpha1\n    blockOwnerDeletion: true\n    controller: true\n    kind: TcNsProgram\n    name: tc-containers\n    uid: 49291f28-49dc-4486-9119-af7c31569de3\n  resourceVersion: \"7992\"\n  uid: c913eea4-71e0-4d5d-b664-078abac36c40\nspec:\n  type: tc\nstatus:\n  conditions:\n  - lastTransitionTime: \"2024-12-18T22:23:11Z\"\n    message: Successfully loaded bpfProgram\n    reason: bpfmanLoaded\n    status: \"True\"\n    type: Loaded\n
    "},{"location":"getting-started/overview/","title":"bpfman Overview","text":"

    Core bpfman is a library written in Rust and published as a Crate via crates.io. The bpfman library leverages the aya library to manage eBPF programs. Applications written in Rust can import the bpfman library and call the bpfman APIs directly. An example of a Rust based application leveraging the bpfman library is the bpfman CLI, which is a Rust based binary used to provision bpfman from a Linux command prompt (see CLI Guide).

    For applications written in other languages, bpfman provides bpfman-rpc, a Rust based bpfman RPC server binary. Non-Rust applications can send a RPC message to the server, which translate the RPC request into a bpfman library call.

    "},{"location":"getting-started/overview/#local-host-deployment","title":"Local Host Deployment","text":"

    When deploying bpfman on a local server, the bpfman-rpc binary runs as a systemd service that uses socket activation to start bpfman-rpc only when there is a RPC message to process. More details are provided in Deploying Example eBPF Programs On Local Host.

    "},{"location":"getting-started/overview/#kubernetes-deployment","title":"Kubernetes Deployment","text":"

    When deploying bpfman in a Kubernetes deployment, bpfman-agent, bpfman-rpc, and the bpfman library are packaged in a container. When the container starts, bpfman-rpc is started as a long running process. bpfman-agent listens to the KubeAPI Server and send RPC requests to bpfman-rpc, which in turn calls the bpfman library to manage eBPF programs on a given node.

    More details provided in Deploying Example eBPF Programs On Kubernetes.

    "},{"location":"getting-started/running-release/","title":"Run bpfman From Release Image","text":"

    This section describes how to deploy bpfman from a given release. See Releases for the set of bpfman releases.

    Note

    Instructions for interacting with bpfman change from release to release, so reference release specific documentation. For example:

    https://bpfman.io/v0.5.4/getting-started/running-release/

    Jump to the Setup and Building bpfman section for help building from the latest code or building from a release branch.

    Start bpfman-rpc contains more details on the different modes to run bpfman in on the host. Use Run using an rpm for deploying a released version of bpfman from an rpm as a systemd service and then use Deploying Example eBPF Programs On Local Host for further information on how to test and interact with bpfman.

    Deploying the bpfman-operator contains more details on deploying bpfman in a Kubernetes deployment and Deploying Example eBPF Programs On Kubernetes contains more details on interacting with bpfman running in a Kubernetes deployment. Use Deploying Release Version of the bpfman-operator below for deploying released version of bpfman in Kubernetes and then use the links above for further information on how to test and interact with bpfman.

    "},{"location":"getting-started/running-release/#run-as-a-long-lived-process","title":"Run as a Long Lived Process","text":"
    export BPFMAN_REL=0.5.4\nmkdir -p $HOME/src/bpfman-${BPFMAN_REL}/; cd $HOME/src/bpfman-${BPFMAN_REL}/\nwget https://github.com/bpfman/bpfman/releases/download/v${BPFMAN_REL}/bpfman-linux-x86_64.tar.gz\ntar -xzvf bpfman-linux-x86_64.tar.gz; rm bpfman-linux-x86_64.tar.gz\n\n$ tree\n.\n\u251c\u2500\u2500 bpf-log-exporter\n\u251c\u2500\u2500 bpfman\n\u251c\u2500\u2500 bpfman-ns\n\u251c\u2500\u2500 bpfman-rpc\n\u2514\u2500\u2500 bpf-metrics-exporter\n

    To deploy bpfman-rpc:

    sudo RUST_LOG=info ./bpfman-rpc --timeout=0\n[INFO  bpfman::utils] Has CAP_BPF: true\n[INFO  bpfman::utils] Has CAP_SYS_ADMIN: true\n[INFO  bpfman_rpc::serve] Using no inactivity timer\n[INFO  bpfman_rpc::serve] Using default Unix socket\n[INFO  bpfman_rpc::serve] Listening on /run/bpfman-sock/bpfman.sock\n:\n

    To use the CLI:

    sudo ./bpfman list\n Program ID  Name  Type  Load Time\n

    Continue in Deploying Example eBPF Programs On Local Host if desired.

    "},{"location":"getting-started/running-release/#deploying-release-version-of-the-bpfman-operator","title":"Deploying Release Version of the bpfman-operator","text":"

    The quickest solution for running bpfman in a Kubernetes deployment is to run a Kubernetes KIND Cluster:

    kind create cluster --name=test-bpfman\n

    Next, deploy the bpfman CRDs:

    export BPFMAN_REL=0.5.4\nkubectl apply -f  https://github.com/bpfman/bpfman/releases/download/v${BPFMAN_REL}/bpfman-crds-install.yaml\n

    Next, deploy the bpfman-operator, which will also deploy the bpfman-daemon, which contains bpfman-rpc, bpfman Library and bpfman-agent:

    kubectl apply -f https://github.com/bpfman/bpfman/releases/download/v${BPFMAN_REL}/bpfman-operator-install-v${BPFMAN_REL}.yaml\n

    Finally, deploy an example eBPF program.

    kubectl apply -f https://github.com/bpfman/bpfman/releases/download/v${BPFMAN_REL}/go-xdp-counter-install-v${BPFMAN_REL}.yaml\n

    There are other example programs in the Releases page.

    Continue in Deploying the bpfman-operator or Deploying Example eBPF Programs On Kubernetes if desired. Keep in mind that prior to v0.4.0, bpfman was released as bpfd. So follow the release specific documentation.

    Use the following command to teardown the cluster:

    kind delete cluster -n test-bpfman\n
    "},{"location":"getting-started/running-rpm/","title":"Run bpfman From RPM","text":"

    This section describes how to deploy bpfman from an RPM. RPMs are generated each time a Pull Request is merged in github for Fedora 38, 39 and Rawhide (see Install Prebuilt RPM below). RPMs can also be built locally from a Fedora server (see Build RPM Locally below).

    "},{"location":"getting-started/running-rpm/#install-prebuilt-rpm","title":"Install Prebuilt RPM","text":"

    This section describes how to install an RPM built automatically by the Packit Service. The Packit Service builds RPMs for each Pull Request merged.

    "},{"location":"getting-started/running-rpm/#packit-service-prerequisites","title":"Packit Service Prerequisites","text":"

    To install an RPM generated by the Packit Service, the following packages need to be installed:

    dnf based OS:

    sudo dnf install -y dnf-plugins-core\n

    To install officially released versions:

    sudo dnf copr enable @ebpf-sig/bpfman\n

    To install nightly builds:

    sudo dnf copr enable @ebpf-sig/bpfman-next\n

    Note

    If both the bpfman and bpfman-next copr repos are enabled, dnf will automatically pull from bpfman-next. Either repo can be disabled. For example, to disable bpfman-next run:

    sudo dnf copr disable @ebpf-sig/bpfman-next\n
    "},{"location":"getting-started/running-rpm/#install-rpm-from-packit-service","title":"Install RPM From Packit Service","text":"

    To load a RPM from a specific commit (@ebpf-sig/bpfman-next needs to be enabled instead of @ebpf-sig/bpfman), find the commit from bpfman commits, and click on the green check showing a given Pull Request was verified. At the bottom of the list of checks are the RPM builds, click on the details, and follow the Packit Dashboard link to the Copr Build Results. Then install the given RPM:

    sudo dnf install -y bpfman-0.4.0~dev-1.20240117143006587102.main.191.gda44a71.fc38.x86_64\n

    bpfman is now installed but not running. To start bpfman:

    sudo systemctl daemon-reload\nsudo systemctl enable bpfman.socket\nsudo systemctl start bpfman.socket\n

    Verify bpfman is installed and running:

    $ sudo systemctl status bpfman.socket\n\u25cf bpfman.socket - bpfman API Socket\n     Loaded: loaded (/usr/lib/systemd/system/bpfman.socket; enabled; preset: disabled)\n     Active: active (listening) since Thu 2024-01-18 21:19:29 EST; 5s ago\n   Triggers: \u25cf bpfman.service\n     Listen: /run/bpfman-sock/bpfman.sock (Stream)\n     CGroup: /system.slice/bpfman.socket\n:\n\n$ sudo systemctl status bpfman.service\n\u25cb bpfman.service - Run bpfman as a service\n     Loaded: loaded (/usr/lib/systemd/system/bpfman.service; static)\n    Drop-In: /usr/lib/systemd/system/service.d\n             \u2514\u250010-timeout-abort.conf\n     Active: inactive (dead)\nTriggeredBy: \u25cf bpfman.socket\n:\n\n$ sudo bpfman list\n Program ID  Name  Type  Load Time\n
    "},{"location":"getting-started/running-rpm/#uninstall-given-rpm","title":"Uninstall Given RPM","text":"

    To determine the RPM that is currently loaded:

    $ sudo rpm -qa | grep bpfman\nbpfman-0.4.0~dev-1.20240117143006587102.main.191.gda44a71.fc39.x86_64\n

    To stop bpfman and uninstall the RPM:

    sudo systemctl stop bpfman.socket\nsudo systemctl disable bpfman.socket\n\nsudo dnf erase -y bpfman-0.4.0~dev-1.20240117143006587102.main.191.gda44a71.fc39.x86_64\n\nsudo systemctl daemon-reload\n
    "},{"location":"getting-started/running-rpm/#build-rpm-locally","title":"Build RPM Locally","text":"

    This section describes how to build and install an RPM locally.

    "},{"location":"getting-started/running-rpm/#local-build-prerequisites","title":"Local Build Prerequisites","text":"

    To build locally, the following packages need to be installed:

    dnf based OS:

    sudo dnf install packit\nsudo dnf install cargo-rpm-macros\n

    Note

    cargo-rpm-macros needs to be version 25 or higher. It appears this is only available on Fedora 37, 38, 39 and Rawhide at the moment.

    "},{"location":"getting-started/running-rpm/#build-locally","title":"Build Locally","text":"

    To build locally, run the following command:

    packit build locally\n

    This will generate several RPMs in a x86_64/ directory:

    $ ls x86_64/\nbpfman-0.4.1-1.20240521101705214906.main.19.b47994a3.fc39.x86_64.rpm\nbpfman-debuginfo-0.4.1-1.20240521101705214906.main.19.b47994a3.fc39.x86_64.rpm\nbpfman-debugsource-0.4.1-1.20240521101705214906.main.19.b47994a3.fc39.x86_64.rpm\n

    If local RPM builds were previously run on the system, the packit build locally command may fail with something similar to:

    packit build locally\n2024-05-21 10:00:03.904 base_git.py       INFO   Using user-defined script for ActionName.post_upstream_clone: [['bash', '-c', 'if [[ ! -d /var/tmp/cargo-vendor-filterer ]]; then git clone https://github.com/coreos/cargo-vendor-filterer.git /var/tmp/cargo-vendor-filterer; fi && cd /var/tmp/cargo-vendor-filterer && cargo build && cd - && cp /var/tmp/cargo-vendor-filterer/target/debug/cargo-vendor-filterer . && ./cargo-vendor-filterer --format tar.gz --prefix vendor bpfman-bpfman-vendor.tar.gz']]\n2024-05-21 10:00:03.956 logging.py        INFO   error: could not find `Cargo.toml` in `/var/tmp/cargo-vendor-filterer` or any parent directory\n2024-05-21 10:00:03.957 commands.py       ERROR  Command 'bash -c if [[ ! -d /var/tmp/cargo-vendor-filterer ]]; then git clone https://github.com/coreos/cargo-vendor-filterer.git /var/tmp/cargo-vendor-filterer; fi && cd /var/tmp/cargo-vendor-filterer && cargo build && cd - && cp /var/tmp/cargo-vendor-filterer/target/debug/cargo-vendor-filterer . && ./cargo-vendor-filterer --format tar.gz --prefix vendor bpfman-bpfman-vendor.tar.gz' failed.\n2024-05-21 10:00:03.957 utils.py          ERROR  Command 'bash -c if [[ ! -d /var/tmp/cargo-vendor-filterer ]]; then git clone https://github.com/coreos/cargo-vendor-filterer.git /var/tmp/cargo-vendor-filterer; fi && cd /var/tmp/cargo-vendor-filterer && cargo build && cd - && cp /var/tmp/cargo-vendor-filterer/target/debug/cargo-vendor-filterer . && ./cargo-vendor-filterer --format tar.gz --prefix vendor bpfman-bpfman-vendor.tar.gz' failed.\n

    To fix, run:

    sudo rm -rf /var/tmp/cargo-vendor-filterer/\n
    "},{"location":"getting-started/running-rpm/#install-local-build","title":"Install Local Build","text":"

    Install the RPM:

    sudo rpm -i x86_64/bpfman-0.4.1-1.20240521101705214906.main.19.b47994a3.fc39.x86_64.rpm\n

    bpfman is now installed but not running. To start bpfman:

    sudo systemctl daemon-reload\nsudo systemctl enable bpfman.socket\nsudo systemctl start bpfman.socket\n

    Verify bpfman is installed and running:

    $ sudo systemctl status bpfman.socket\n\u25cf bpfman.socket - bpfman API Socket\n     Loaded: loaded (/usr/lib/systemd/system/bpfman.socket; enabled; preset: disabled)\n     Active: active (listening) since Thu 2024-01-18 21:19:29 EST; 5s ago\n   Triggers: \u25cf bpfman.service\n     Listen: /run/bpfman-sock/bpfman.sock (Stream)\n     CGroup: /system.slice/bpfman.socket\n:\n\n$ sudo systemctl status bpfman.service\n\u25cb bpfman.service - Run bpfman as a service\n     Loaded: loaded (/usr/lib/systemd/system/bpfman.service; static)\n    Drop-In: /usr/lib/systemd/system/service.d\n             \u2514\u250010-timeout-abort.conf\n     Active: inactive (dead)\nTriggeredBy: \u25cf bpfman.socket\n:\n\n$ sudo bpfman list\n Program ID  Name  Type  Load Time\n
    "},{"location":"getting-started/running-rpm/#uninstall-local-build","title":"Uninstall Local Build","text":"

    To determine the RPM that is currently loaded:

    $ sudo rpm -qa | grep bpfman\nbpfman-0.4.1-1.20240521101705214906.main.19.b47994a3.fc39.x86_64\n

    To stop bpfman and uninstall the RPM:

    sudo systemctl stop bpfman.socket\nsudo systemctl disable bpfman.socket\n\nsudo rpm -e bpfman-0.4.1-1.20240521101705214906.main.19.b47994a3.fc39.x86_64\n\nsudo systemctl daemon-reload\n
    "},{"location":"getting-started/troubleshooting/","title":"Troubleshooting","text":"

    This section provides a list of common issues and solutions when working with bpfman.

    "},{"location":"getting-started/troubleshooting/#xdp","title":"XDP","text":""},{"location":"getting-started/troubleshooting/#xdp-program-fails-to-load","title":"XDP Program Fails to Load","text":"

    When attempting to load an XDP program and the program fails to load:

    $ sudo bpfman load image --image-url quay.io/bpfman-bytecode/xdp_pass:latest xdp --iface veth92cd99b --priority 100\nError: status: Aborted, message: \"An error occurred. dispatcher attach failed on interface veth92cd99b: `bpf_link_create` failed\", details: [], metadata: MetadataMap { headers: {\"content-type\": \"application/grpc\", \"date\": \"Tue, 28 Nov 2023 13:37:02 GMT\", \"content-length\": \"0\"} }\n

    The log may look something like this:

    Nov 28 08:36:58 ebpf03 bpfman[2081732]: The bytecode image: quay.io/bpfman-bytecode/xdp_pass:latest is signed\nNov 28 08:36:59 ebpf03 bpfman[2081732]: Loading program bytecode from container image: quay.io/bpfman-bytecode/xdp_pass:latest\nNov 28 08:37:01 ebpf03 bpfman[2081732]: The bytecode image: quay.io/bpfman/xdp-dispatcher:v2 is signed\nNov 28 08:37:02 ebpf03 bpfman[2081732]: BPFMAN load error: Error(\n                                            \"dispatcher attach failed on interface veth92cd99b: `bpf_link_create` failed\",\n                                        )\n

    The issue may be the there is already an external XDP program loaded on the given interface. bpfman allows multiple XDP programs on an interface by loading a dispatcher program which is the XDP program and additional programs are loaded as extensions to the dispatcher. Use bpftool to determine if any programs are already loaded on an interface:

    $ sudo bpftool net list dev veth92cd99b\nxdp:\nveth92cd99b(32) generic id 8733\n\ntc:\nveth92cd99b(32) clsact/ingress tc_dispatcher id 8922\n\nflow_dissector:\n
    "},{"location":"governance/CODE_OF_CONDUCT/","title":"Contributor Covenant Code of Conduct","text":""},{"location":"governance/CODE_OF_CONDUCT/#our-pledge","title":"Our Pledge","text":"

    We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, caste, color, religion, or sexual identity and orientation.

    We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community.

    "},{"location":"governance/CODE_OF_CONDUCT/#our-standards","title":"Our Standards","text":"

    Examples of behavior that contributes to a positive environment for our community include:

    • Demonstrating empathy and kindness toward other people
    • Being respectful of differing opinions, viewpoints, and experiences
    • Giving and gracefully accepting constructive feedback
    • Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience
    • Focusing on what is best not just for us as individuals, but for the overall community

    Examples of unacceptable behavior include:

    • The use of sexualized language or imagery, and sexual attention or advances of any kind
    • Trolling, insulting or derogatory comments, and personal or political attacks
    • Public or private harassment
    • Publishing others' private information, such as a physical or email address, without their explicit permission
    • Other conduct which could reasonably be considered inappropriate in a professional setting
    "},{"location":"governance/CODE_OF_CONDUCT/#enforcement-responsibilities","title":"Enforcement Responsibilities","text":"

    Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful.

    Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate.

    "},{"location":"governance/CODE_OF_CONDUCT/#scope","title":"Scope","text":"

    This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event.

    "},{"location":"governance/CODE_OF_CONDUCT/#enforcement","title":"Enforcement","text":"

    Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement directly. Maintainers are identified in the MAINTAINERS.md file and their contact information is on their GitHub profile page. All complaints will be reviewed and investigated promptly and fairly.

    All community leaders are obligated to respect the privacy and security of the reporter of any incident.

    "},{"location":"governance/CODE_OF_CONDUCT/#enforcement-guidelines","title":"Enforcement Guidelines","text":"

    Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct:

    "},{"location":"governance/CODE_OF_CONDUCT/#1-correction","title":"1. Correction","text":"

    Community Impact: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community.

    Consequence: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested.

    "},{"location":"governance/CODE_OF_CONDUCT/#2-warning","title":"2. Warning","text":"

    Community Impact: A violation through a single incident or series of actions.

    Consequence: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban.

    "},{"location":"governance/CODE_OF_CONDUCT/#3-temporary-ban","title":"3. Temporary Ban","text":"

    Community Impact: A serious violation of community standards, including sustained inappropriate behavior.

    Consequence: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban.

    "},{"location":"governance/CODE_OF_CONDUCT/#4-permanent-ban","title":"4. Permanent Ban","text":"

    Community Impact: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals.

    Consequence: A permanent ban from any sort of public interaction within the community.

    "},{"location":"governance/CODE_OF_CONDUCT/#attribution","title":"Attribution","text":"

    This Code of Conduct is adapted from the Contributor Covenant, version 2.1, available at https://www.contributor-covenant.org/version/2/1/code_of_conduct.html.

    Community Impact Guidelines were inspired by Mozilla's code of conduct enforcement ladder.

    For answers to common questions about this code of conduct, see the FAQ at https://www.contributor-covenant.org/faq. Translations are available at https://www.contributor-covenant.org/translations.

    "},{"location":"governance/CONTRIBUTING/","title":"Contributing Guide","text":"
    • Ways to Contribute
    • Find an Issue
    • Ask for Help
    • Pull Request Lifecycle
    • Development Environment Setup
    • Signoff Your Commits
    • Pull Request Checklist

    Welcome! We are glad that you want to contribute to our project! \ud83d\udc96

    As you get started, you are in the best position to give us feedback on areas of our project that we need help with including:

    • Problems found during setting up a new developer environment
    • Gaps in our Quickstart Guide or documentation
    • Bugs in our automation scripts

    If anything doesn't make sense, or doesn't work when you run it, please open a bug report and let us know!

    "},{"location":"governance/CONTRIBUTING/#ways-to-contribute","title":"Ways to Contribute","text":"

    We welcome many different types of contributions including:

    • New features
    • Builds, CI/CD
    • Bug fixes
    • Documentation
    • Issue Triage
    • Answering questions on Slack/Mailing List
    • Web design
    • Communications / Social Media / Blog Posts
    • Release management

    Not everything happens through a GitHub pull request. Please come to our meetings or contact us and let's discuss how we can work together.

    "},{"location":"governance/CONTRIBUTING/#come-to-meetings","title":"Come to Meetings","text":"

    Absolutely everyone is welcome to come to any of our meetings. You never need an invite to join us. In fact, we want you to join us, even if you don\u2019t have anything you feel like you want to contribute. Just being there is enough!

    You can find out more about our meetings here. You don\u2019t have to turn on your video. The first time you come, introducing yourself is more than enough. Over time, we hope that you feel comfortable voicing your opinions, giving feedback on others\u2019 ideas, and even sharing your own ideas, and experiences.

    "},{"location":"governance/CONTRIBUTING/#find-an-issue","title":"Find an Issue","text":"

    We have good first issues for new contributors and help wanted issues suitable for any contributor. good first issue has extra information to help you make your first contribution. help wanted are issues suitable for someone who isn't a core maintainer and is good to move onto after your first pull request.

    Sometimes there won\u2019t be any issues with these labels. That\u2019s ok! There is likely still something for you to work on. If you want to contribute but you don\u2019t know where to start or can't find a suitable issue, you can reach out to us on Slack and we will be happy to help.

    Once you see an issue that you'd like to work on, please post a comment saying that you want to work on it. Something like \"I want to work on this\" is fine.

    "},{"location":"governance/CONTRIBUTING/#ask-for-help","title":"Ask for Help","text":"

    The best way to reach us with a question when contributing is to ask on:

    • The original github issue
    • Our Slack channel
    "},{"location":"governance/CONTRIBUTING/#pull-request-lifecycle","title":"Pull Request Lifecycle","text":"

    Pull requests are managed by Mergify.

    Our process is currently as follows:

    1. When you open a PR a maintainer will automatically be assigned for review
    2. Make sure that your PR is passing CI - if you need help with failing checks please feel free to ask!
    3. Once it is passing all CI checks, a maintainer will review your PR and you may be asked to make changes.
    4. When you have received at least one approval from a maintainer, your PR will be merged automatically.

    In some cases, other changes may conflict with your PR. If this happens, you will get notified by a comment in the issue that your PR requires a rebase, and the needs-rebase label will be applied. Once a rebase has been performed, this label will be automatically removed.

    "},{"location":"governance/CONTRIBUTING/#development-environment-setup","title":"Development Environment Setup","text":"

    See Setup and Building bpfman

    "},{"location":"governance/CONTRIBUTING/#signoff-your-commits","title":"Signoff Your Commits","text":""},{"location":"governance/CONTRIBUTING/#dco","title":"DCO","text":"

    Licensing is important to open source projects. It provides some assurances that the software will continue to be available based under the terms that the author(s) desired. We require that contributors sign off on commits submitted to our project's repositories. The Developer Certificate of Origin (DCO) is a way to certify that you wrote and have the right to contribute the code you are submitting to the project.

    You sign-off by adding the following to your commit messages. Your sign-off must match the git user and email associated with the commit.

    This is my commit message\n\nSigned-off-by: Your Name <your.name@example.com>\n

    Git has a -s command line option to do this automatically:

    git commit -s -m 'This is my commit message'\n

    If you forgot to do this and have not yet pushed your changes to the remote repository, you can amend your commit with the sign-off by running

    git commit --amend -s\n
    "},{"location":"governance/CONTRIBUTING/#logical-grouping-of-commits","title":"Logical Grouping of Commits","text":"

    It is a recommended best practice to keep your changes as logically grouped as possible within individual commits. If while you're developing you prefer doing a number of commits that are \"checkpoints\" and don't represent a single logical change, please squash those together before asking for a review. When addressing review comments, please perform an interactive rebase and edit commits directly rather than adding new commits with messages like \"Fix review comments\".

    "},{"location":"governance/CONTRIBUTING/#commit-message-guidelines","title":"Commit message guidelines","text":"

    A good commit message should describe what changed and why.

    1. The first line should:

    2. contain a short description of the change (preferably 50 characters or less, and no more than 72 characters)

    3. be entirely in lowercase with the exception of proper nouns, acronyms, and the words that refer to code, like function/variable names
    4. be prefixed with the name of the sub crate being changed

    Examples:

    • bpfman: validate program section names
    • bpf: add dispatcher program test slot

    • Keep the second line blank.

    • Wrap all other lines at 72 columns (except for long URLs).
    • If your patch fixes an open issue, you can add a reference to it at the end of the log. Use the Fixes: # prefix and the issue number. For other references use Refs: #. Refs may include multiple issues, separated by a comma.

    Examples:

    • Fixes: #1337
    • Refs: #1234

    Sample complete commit message:

    subcrate: explain the commit in one line\n\nBody of commit message is a few lines of text, explaining things\nin more detail, possibly giving some background about the issue\nbeing fixed, etc.\n\nThe body of the commit message can be several paragraphs, and\nplease do proper word-wrap and keep columns shorter than about\n72 characters or so. That way, `git log` will show things\nnicely even when it is indented.\n\nFixes: #1337\nRefs: #453, #154\n
    "},{"location":"governance/CONTRIBUTING/#pull-request-checklist","title":"Pull Request Checklist","text":"

    When you submit your pull request, or you push new commits to it, our automated systems will run some checks on your new code. We require that your pull request passes these checks, but we also have more criteria than just that before we can accept and merge it. We recommend that you check the following things locally before you submit your code:

    • Verify that Rust code has been formatted and that all clippy lints have been fixed:
    • Verify that Go code has been formatted and linted
    • Verify that Yaml files have been formatted (see Install Yaml Formatter)
    • Verify that Bash scripts have been linted using shellcheck

      cd bpfman/\ncargo xtask lint\n
    • Verify that unit tests are passing locally (see Unit Testing):

      cd bpfman/\ncargo xtask unit-test\n
    • Verify any changes to the bpfman API have been \"blessed\". After running the below command, any changes to any of the files in bpfman/xtask/public-api/*.txt indicate changes to the bpfman API. Verify that these changes were intentional. CI uses the latest nightly Rust toolchain, so make sure the public-apis are verified against latest.

      cd bpfman/\nrustup update nightly\ncargo +nightly xtask public-api --bless\n
    • Verify that integration tests are passing locally (see Basic Integration Tests):

      cd bpfman/\ncargo xtask integration-test\n
    • If developing the bpfman-operator, verify that bpfman-operator unit and integration tests are passing locally:

      See Kubernetes Operator Tests.

    "},{"location":"governance/GOVERNANCE/","title":"bpfman Project Governance","text":"

    The bpfman project is dedicated to creating an easy way to run eBPF programs on a single host and in clusters. This governance explains how the project is run.

    • Values
    • Maintainers
    • Becoming a Maintainer
    • Meetings
    • Code of Conduct Enforcement
    • Security Response Team
    • Voting
    • Modifications
    "},{"location":"governance/GOVERNANCE/#values","title":"Values","text":"

    The bpfman project and its leadership embrace the following values:

    • Openness: Communication and decision-making happens in the open and is discoverable for future reference. As much as possible, all discussions and work take place in public forums and open repositories.

    • Fairness: All stakeholders have the opportunity to provide feedback and submit contributions, which will be considered on their merits.

    • Community over Product or Company: Sustaining and growing our community takes priority over shipping code or sponsors' organizational goals. Each contributor participates in the project as an individual.

    • Inclusivity: We innovate through different perspectives and skill sets, which can only be accomplished in a welcoming and respectful environment.

    • Participation: Responsibilities within the project are earned through participation, and there is a clear path up the contributor ladder into leadership positions.

    "},{"location":"governance/GOVERNANCE/#maintainers","title":"Maintainers","text":"

    bpfman Maintainers have write access to the project GitHub repository. They can merge their patches or patches from others. The list of current maintainers can be found at MAINTAINERS.md. Maintainers collectively manage the project's resources and contributors.

    This privilege is granted with some expectation of responsibility: maintainers are people who care about the bpfman project and want to help it grow and improve. A maintainer is not just someone who can make changes, but someone who has demonstrated their ability to collaborate with the team, get the most knowledgeable people to review code and docs, contribute high-quality code, and follow through to fix issues (in code or tests).

    A maintainer is a contributor to the project's success and a citizen helping the project succeed.

    The collective team of all Maintainers is known as the Maintainer Council, which is the governing body for the project.

    "},{"location":"governance/GOVERNANCE/#becoming-a-maintainer","title":"Becoming a Maintainer","text":"

    To become a Maintainer you need to demonstrate the following:

    • commitment to the project:
    • participate in discussions, contributions, code and documentation reviews, for 6 months or more,
    • perform reviews for 10 non-trivial pull requests,
    • contribute 10 non-trivial pull requests and have them merged,
    • ability to write quality code and/or documentation,
    • ability to collaborate with the team,
    • understanding of how the team works (policies, processes for testing and code review, etc),
    • understanding of the project's code base and coding and documentation style.

    A new Maintainer must be proposed by an existing maintainer by opening a Pull Request on GitHub to update the MAINTAINERS.md file. A simple majority vote of existing Maintainers approves the application. Maintainer nominations will be evaluated without prejudice to employers or demographics.

    Maintainers who are selected will be granted the necessary GitHub rights.

    "},{"location":"governance/GOVERNANCE/#removing-a-maintainer","title":"Removing a Maintainer","text":"

    Maintainers may resign at any time if they feel that they will not be able to continue fulfilling their project duties.

    Maintainers may also be removed after being inactive, failing to fulfill their Maintainer responsibilities, violating the Code of Conduct, or for other reasons. Inactivity is defined as a period of very low or no activity in the project for a year or more, with no definite schedule to return to full Maintainer activity.

    A Maintainer may be removed at any time by a 2/3 vote of the remaining maintainers.

    Depending on the reason for removal, a Maintainer may be converted to Emeritus status. Emeritus Maintainers will still be consulted on some project matters and can be rapidly returned to Maintainer status if their availability changes.

    "},{"location":"governance/GOVERNANCE/#meetings","title":"Meetings","text":"

    Time zones permitting, Maintainers are expected to participate in the public developer meeting, detailed in the meetings document.

    Maintainers will also have closed meetings to discuss security reports or Code of Conduct violations. Such meetings should be scheduled by any Maintainer on receipt of a security issue or CoC report. All current Maintainers must be invited to such closed meetings, except for any Maintainer who is accused of a CoC violation.

    "},{"location":"governance/GOVERNANCE/#code-of-conduct","title":"Code of Conduct","text":"

    Code of Conduct violations by community members will be discussed and resolved on the private maintainer Slack channel.

    "},{"location":"governance/GOVERNANCE/#security-response-team","title":"Security Response Team","text":"

    The Maintainers will appoint a Security Response Team to handle security reports. This committee may simply consist of the Maintainer Council themselves. If this responsibility is delegated, the Maintainers will appoint a team of at least two contributors to handle it. The Maintainers will review who is assigned to this at least once a year.

    The Security Response Team is responsible for handling all reports of security holes and breaches according to the security policy.

    "},{"location":"governance/GOVERNANCE/#voting","title":"Voting","text":"

    While most business in bpfman is conducted by \"lazy consensus\", periodically the Maintainers may need to vote on specific actions or changes. A vote can be taken on the private developer slack channel for security or conduct matters. Votes may also be taken at the developer meeting. Any Maintainer may demand a vote be taken.

    Most votes require a simple majority of all Maintainers to succeed, except where otherwise noted. Two-thirds majority votes mean at least two-thirds of all existing maintainers.

    "},{"location":"governance/GOVERNANCE/#modifying-this-charter","title":"Modifying this Charter","text":"

    Changes to this Governance and its supporting documents may be approved by a 2/3 vote of the Maintainers.

    "},{"location":"governance/MAINTAINERS/","title":"Maintainers","text":"

    See CONTRIBUTING.md for general contribution guidelines. See GOVERNANCE.md for governance guidelines and maintainer responsibilities. See CODEOWNERS for a detailed list of owners for the various source directories.

    Name Employer Responsibilities Dave Tucker Red Hat Catch all Andrew Stoycos Red Hat bpfman-operator, bpfman-agent Andre Fredette Red Hat All things tc-bpf Billy McFall Red Hat All things systemd"},{"location":"governance/MEETINGS/","title":"bpfman Community Meetings","text":""},{"location":"governance/MEETINGS/#meeting-time","title":"Meeting time","text":"

    We meet every Thursday at 10:00 AM Eastern Time. The meetings last up to 1 hour.

    "},{"location":"governance/MEETINGS/#meeting-location","title":"Meeting location","text":"

    Video call link: https://meet.google.com/ggz-zkmp-pxx Or dial: (US) +1 98ttp4-221-0859 PIN: 613 588 790# More phone numbers: https://tel.meet/ggz-zkmp-pxx?pin=3270510926446

    "},{"location":"governance/MEETINGS/#meeting-agenda-and-minutes","title":"Meeting agenda and minutes","text":"

    Meeting agenda

    "},{"location":"governance/REVIEWING/","title":"Reviewing Guide","text":"

    This document covers who may review pull requests for this project, and guides how to perform code reviews that meet our community standards and code of conduct. All reviewers must read this document and agree to follow the project review guidelines. Reviewers who do not follow these guidelines may have their privileges revoked.

    "},{"location":"governance/REVIEWING/#the-reviewer-role","title":"The Reviewer Role","text":"

    Only maintainers are REQUIRED to review pull requests. Other contributors may opt to review pull requests, but any LGTM from a non-maintainer won't count towards the required number of Approved Reviews in the Mergify policy.

    "},{"location":"governance/REVIEWING/#values","title":"Values","text":"

    All reviewers must abide by the Code of Conduct and are also protected by it. A reviewer should not tolerate poor behavior and is encouraged to report any behavior that violates the Code of Conduct. All of our values listed above are distilled from our Code of Conduct.

    Below are concrete examples of how it applies to code review specifically:

    "},{"location":"governance/REVIEWING/#inclusion","title":"Inclusion","text":"

    Be welcoming and inclusive. You should proactively ensure that the author is successful. While any particular pull request may not ultimately be merged, overall we want people to have a great experience and be willing to contribute again. Answer the questions they didn't know to ask or offer concrete help when they appear stuck.

    "},{"location":"governance/REVIEWING/#sustainability","title":"Sustainability","text":"

    Avoid burnout by enforcing healthy boundaries. Here are some examples of how a reviewer is encouraged to act to take care of themselves:

    • Authors should meet baseline expectations when submitting a pull request, such as writing tests.
    • If your availability changes, you can step down from a pull request and have someone else assigned.
    • If interactions with an author are not following the code of conduct, close the PR and raise it with your Code of Conduct committee or point of contact. It's not your job to coax people into behaving.
    "},{"location":"governance/REVIEWING/#trust","title":"Trust","text":"

    Be trustworthy. During a review, your actions both build and help maintain the trust that the community has placed in this project. Below are examples of ways that we build trust:

    • Transparency - If a pull request won't be merged, clearly say why and close it. If a pull request won't be reviewed for a while, let the author know so they can set expectations and understand why it's blocked.
    • Integrity - Put the project's best interests ahead of personal relationships or company affiliations when deciding if a change should be merged.
    • Stability - Only merge when the change won't negatively impact project stability. It can be tempting to merge a pull request that doesn't meet our quality standards, for example when the review has been delayed, or because we are trying to deliver new features quickly, but regressions can significantly hurt trust in our project.
    "},{"location":"governance/REVIEWING/#process","title":"Process","text":"
    • Reviewers are automatically assigned based on the CODEOWNERS file.
    • Reviewers should wait for automated checks to pass before reviewing
    • At least 1 approved review is required from a maintainer before a pull request can be merged
    • All CI checks must pass
    • If a PR is stuck for some reason it is down to the reviewer to determine the best course of action:
    • PRs may be closed if they are no longer relevant
    • A maintainer may choose to carry a PR forward on their own, but they should ALWAYS include the original author's commits
    • A maintainer may choose to open additional PRs to help lay a foundation on which the stuck PR can be unstuck. They may either rebase the stuck PR themselves or leave this to the author
    • Maintainers should not merge their pull requests without a review
    • Maintainers should let the Mergify bot merge PRs and not merge PRs directly
    • In times of need, i.e. to fix pressing security issues, the Maintainers may, at their discretion, merge PRs without review. They should at least add a comment to the PR explaining why they did so.
    "},{"location":"governance/REVIEWING/#checklist","title":"Checklist","text":"

    Below are a set of common questions that apply to all pull requests:

    • [ ] Is this PR targeting the correct branch?
    • [ ] Does the commit message provide an adequate description of the change?
    • [ ] Does the affected code have corresponding tests?
    • [ ] Are the changes documented, not just with inline documentation, but also with conceptual documentation such as an overview of a new feature, or task-based documentation like a tutorial? Consider if this change should be announced on your project blog.
    • [ ] Does this introduce breaking changes that would require an announcement or bumping of the major version?
    • [ ] Does this PR introduce any new dependencies?
    "},{"location":"governance/REVIEWING/#reading-list","title":"Reading List","text":"

    Reviewers are encouraged to read the following articles for help with common reviewer tasks:

    • The Art of Closing: How to close an unfinished or rejected pull request
    • Kindness and Code Reviews: Improving the Way We Give Feedback
    • Code Review Guidelines for Humans: Examples of good and back feedback
    "},{"location":"governance/SECURITY/","title":"Security Policy","text":""},{"location":"governance/SECURITY/#supported-versions","title":"Supported Versions","text":"

    No released versions of bpfman and bpfman-agent or bpfman-operator will receive regular security updates until a mainline release has been performed. A reported and fixed vulnerability will be included in the next minor release, which depending on the severity of the vulnerability may be immediate.

    "},{"location":"governance/SECURITY/#reporting-a-vulnerability","title":"Reporting a Vulnerability","text":"

    To report a vulnerability, please use the Private Vulnerability Reporting Feature on GitHub. We will endevour to respond within 48hrs of reporting. If a vulnerability is reported but considered low priority it may be converted into an issue and handled on the public issue tracker. Should a vulnerability be considered severe we will endeavour to patch it within 48hrs of acceptance, and may ask for you to collaborate with us on a temporary private fork of the repository.

    "},{"location":"blog/archive/2024/","title":"2024","text":""},{"location":"blog/archive/2023/","title":"2023","text":""},{"location":"blog/category/community-meeting/","title":"Community Meeting","text":""},{"location":"blog/category/2024/","title":"2024","text":""}]} \ No newline at end of file diff --git a/main/sitemap.xml.gz b/main/sitemap.xml.gz index dcb7ecc52..ea20813e6 100644 Binary files a/main/sitemap.xml.gz and b/main/sitemap.xml.gz differ