Skip to content

Commit

Permalink
nits
Browse files Browse the repository at this point in the history
Signed-off-by: Aryan-sharma11 <[email protected]>
  • Loading branch information
Aryan-sharma11 committed Feb 5, 2025
1 parent 4f9fe35 commit d325cfd
Show file tree
Hide file tree
Showing 5 changed files with 25 additions and 30 deletions.
1 change: 0 additions & 1 deletion pkg/KubeArmorController/common/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,6 @@ func RemoveApparmorAnnotation(pod *corev1.Pod) {
}

func CheckKubearmorStatus(nodeName string, c *kubernetes.Clientset) (bool, error) {
fmt.Println("checking kubearmor status")
pods, err := c.CoreV1().Pods("kubearmor").List(context.TODO(), metav1.ListOptions{
LabelSelector: "kubearmor-app=kubearmor",
})
Expand Down
10 changes: 5 additions & 5 deletions pkg/KubeArmorController/handlers/pod_mutation.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ func (a *PodAnnotator) Handle(ctx context.Context, req admission.Request) admiss
a.Cluster.ClusterLock.RLock()
// homogenousApparmor := a.Cluster.HomogenousApparmor
if _, exist := a.Cluster.Nodes[nodename]; exist {
if !a.Cluster.Nodes[nodename].KubeArmorActive {
if a.Cluster.Nodes[nodename].KubeArmorActive {
annotate = true
}
}
Expand Down Expand Up @@ -89,17 +89,17 @@ func (a *PodAnnotator) Handle(ctx context.Context, req admission.Request) admiss
// == common annotations == //
common.AddCommonAnnotations(pod)
nodename := pod.Spec.NodeName
apparmor := false
annotate := false
// == Apparmor annotations == //
a.Cluster.ClusterLock.RLock()
// homogenousApparmor := a.Cluster.HomogenousApparmor
if _, exist := a.Cluster.Nodes[nodename]; exist {
if !a.Cluster.Nodes[nodename].SkipNode {
apparmor = true
if a.Cluster.Nodes[nodename].KubeArmorActive {
annotate = true
}
}
a.Cluster.ClusterLock.RUnlock()
if apparmor {
if annotate {
common.AppArmorAnnotator(pod)
}

Expand Down
27 changes: 13 additions & 14 deletions pkg/KubeArmorController/informer/nodewatcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,14 +47,13 @@ func NodeWatcher(c *kubernetes.Clientset, cluster *types.Cluster, log logr.Logge

cluster.Nodes[node.Name] = &types.NodeInfo{}
cluster.Nodes[node.Name].Enforcer = enforcer
fmt.Println("adding enforcer")

kubearmorStatus, err := common.CheckKubearmorStatus(node.Name, c)
if err != nil {
log.Info(fmt.Sprintf("unable to get kubearmor status on node %s : %s", node.Name, err.Error()))
}
cluster.Nodes[node.Name].KubeArmorActive = kubearmorStatus
if cluster.Nodes[node.Name].SkipNode {
if !cluster.Nodes[node.Name].KubeArmorActive {
log.Info(fmt.Sprintf("kubearmor not found on node %s", node.Name))
}
}
Expand Down Expand Up @@ -95,19 +94,19 @@ func NodeWatcher(c *kubernetes.Clientset, cluster *types.Cluster, log logr.Logge
if enforcer != cluster.Nodes[node.Name].Enforcer {
delete(cluster.Nodes, node.Name)
}
} else {
if enforcer == "apparmor" {
cluster.Nodes[node.Name].Enforcer = enforcer
var err error
kubearmorStatus, err := common.CheckKubearmorStatus(node.Name, c)
if err != nil {
log.Info(fmt.Sprintf("unable to get kubearmor status on node %s : %s", node.Name, err.Error()))
}
cluster.Nodes[node.Name].KubeArmorActive = kubearmorStatus
}

if cluster.Nodes[node.Name].SkipNode {
log.Info(fmt.Sprintf("kubearmor not found on node %s", node.Name))
}
if enforcer == "apparmor" {
cluster.Nodes[node.Name].Enforcer = enforcer
var err error
kubearmorStatus, err := common.CheckKubearmorStatus(node.Name, c)
if err != nil {
log.Error(err, fmt.Sprintf("unable to get kubearmor status on node %s", node.Name))
}
cluster.Nodes[node.Name].KubeArmorActive = kubearmorStatus

if !cluster.Nodes[node.Name].KubeArmorActive {
log.Info(fmt.Sprintf("kubearmor not found on node %s", node.Name))
}
}
// re-compute homogeneous status
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ func (r *PodRefresherReconciler) Reconcile(ctx context.Context, req ctrl.Request
if ref.Kind == "ReplicaSet" {
replicaSet, err := r.Corev1.AppsV1().ReplicaSets(pod.Namespace).Get(ctx, ref.Name, metav1.GetOptions{})
if err != nil {
fmt.Printf("Failed to get ReplicaSet %s: %v\n", ref.Name, err)
log.Error(err, fmt.Sprintf("Failed to get ReplicaSet %s:", ref.Name))
continue
}
// Check if the ReplicaSet is managed by a Deployment
Expand Down Expand Up @@ -190,10 +190,9 @@ func restartResources(resourcesMap map[string]ResourceInfo, corev1 *kubernetes.C
for name, resInfo := range resourcesMap {
switch resInfo.kind {
case "Deployment":
fmt.Println("Resource Info", resInfo.namespaceName, name, resInfo.kind)
dep, err := corev1.AppsV1().Deployments(resInfo.namespaceName).Get(ctx, name, metav1.GetOptions{})
if err != nil {
fmt.Printf("error geting deployments : %s", err.Error())
log.Error(err, fmt.Sprintf("error geting deployment %s in namespace %s", name, resInfo.namespaceName))
}
log.Info("restarting deployment %s in namespace %s", name, resInfo.namespaceName)
// Update the Pod template's annotations to trigger a rolling restart
Expand All @@ -204,13 +203,12 @@ func restartResources(resourcesMap map[string]ResourceInfo, corev1 *kubernetes.C
// Patch the Deployment
_, err = corev1.AppsV1().Deployments(resInfo.namespaceName).Update(ctx, dep, metav1.UpdateOptions{})
if err != nil {
return fmt.Errorf("failed to update Deployment %s: %v", name, err)
log.Error(err, fmt.Sprintf("error updating deployment %s in namespace %s", name, resInfo.namespaceName))
}

case "Statefulset":
statefulSet, err := corev1.AppsV1().StatefulSets(resInfo.namespaceName).Get(ctx, name, metav1.GetOptions{})
if err != nil {
fmt.Printf("error geting statefulset : %s", err.Error())
log.Error(err, fmt.Sprintf("error geting statefulset %s in namespace %s", name, resInfo.namespaceName))
}
log.Info("restarting statefulset " + name + " in namespace " + resInfo.namespaceName)
// Update the Pod template's annotations to trigger a rolling restart
Expand All @@ -221,13 +219,13 @@ func restartResources(resourcesMap map[string]ResourceInfo, corev1 *kubernetes.C
// Patch the Deployment
_, err = corev1.AppsV1().StatefulSets(resInfo.namespaceName).Update(ctx, statefulSet, metav1.UpdateOptions{})
if err != nil {
return fmt.Errorf("failed to update statefulset %s: %v", name, err)
log.Error(err, fmt.Sprintf("error updating statefulset %s in namespace %s", name, resInfo.namespaceName))
}

case "Daemonset":
daemonSet, err := corev1.AppsV1().DaemonSets(resInfo.namespaceName).Get(ctx, name, metav1.GetOptions{})
if err != nil {
fmt.Printf("error geting daemonset : %s", err.Error())
log.Error(err, fmt.Sprintf("error geting daemonset %s in namespace %s", name, resInfo.namespaceName))
}
log.Info("restarting daemonset " + name + " in namespace " + resInfo.namespaceName)
// Update the Pod template's annotations to trigger a rolling restart
Expand All @@ -238,7 +236,7 @@ func restartResources(resourcesMap map[string]ResourceInfo, corev1 *kubernetes.C
// Patch the Deployment
_, err = corev1.AppsV1().DaemonSets(resInfo.namespaceName).Update(ctx, daemonSet, metav1.UpdateOptions{})
if err != nil {
return fmt.Errorf("failed to update daemonset %s: %v", name, err)
log.Error(err, fmt.Sprintf("error updating daemonset %s in namespace %s", name, resInfo.namespaceName))
}
}
// wait for few seconds after updating every resource
Expand Down
1 change: 0 additions & 1 deletion pkg/KubeArmorController/types/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ type Cluster struct {
TotalNodes int //total no of nodes present
}
type NodeInfo struct {
SkipNode bool
KubeArmorActive bool
Enforcer string
}
Expand Down

0 comments on commit d325cfd

Please sign in to comment.