From 3fc8e3b6a2783284c2f90e424dd4eead9e374b35 Mon Sep 17 00:00:00 2001 From: Nishant Parekh Date: Mon, 7 Oct 2024 12:23:35 -0400 Subject: [PATCH] add nodeName as a new parameter in order to support MNO usecase * Pass in the nodeName with a running GM deployment for testing * Use this nodeName to identify the correct linuxptp-daemon pod to use in an MNO deployment * NodeName is stored part of the collectionConstructor available for any collector under use * fix linter issues Signed-off-by: Nishant Parekh --- pkg/clients/clientset.go | 15 ++++++++++---- pkg/clients/exec_command.go | 10 ++++++---- pkg/clients/exec_command_test.go | 16 ++++++++++----- pkg/cmd/collect.go | 2 ++ pkg/cmd/common.go | 23 +++++++++++++++++++--- pkg/cmd/verifyEnv.go | 3 ++- pkg/collectors/collector.go | 1 + pkg/collectors/contexts/contexts.go | 4 ++-- pkg/collectors/dev_info_collector.go | 2 +- pkg/collectors/devices/device_info_test.go | 5 ++++- pkg/collectors/devices/dpll_fs_test.go | 2 +- pkg/collectors/devices/dpll_netlink.go | 4 ++-- pkg/collectors/devices/gps_ubx_test.go | 2 +- pkg/collectors/devices/gps_ubx_ver_test.go | 2 +- pkg/collectors/devices/pmc_test.go | 2 +- pkg/collectors/dpll_collector.go | 2 +- pkg/collectors/dpll_collector_fs.go | 2 +- pkg/collectors/gps_ubx_collector.go | 2 +- pkg/collectors/log_follower.go | 4 +++- pkg/collectors/pmc_collector.go | 2 +- pkg/runner/runner.go | 4 ++++ pkg/verify/verify.go | 21 +++++++++++--------- 22 files changed, 89 insertions(+), 41 deletions(-) diff --git a/pkg/clients/clientset.go b/pkg/clients/clientset.go index 1e0bc48b..c60f7008 100644 --- a/pkg/clients/clientset.go +++ b/pkg/clients/clientset.go @@ -98,8 +98,13 @@ func ClearClientSet() { clientset = Clientset{} } -func (clientsholder *Clientset) FindPodNameFromPrefix(namespace, prefix string) (string, error) { - podList, err := clientsholder.K8sClient.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{}) +func (clientsholder *Clientset) FindPodNameFromPrefix(namespace, prefix, nodeName string) (string, error) { + listOpts := metav1.ListOptions{} + if len(nodeName) > 0 { + listOpts = metav1.ListOptions{FieldSelector: "spec.nodeName=" + nodeName} + } + podList, err := clientsholder.K8sClient.CoreV1().Pods(namespace).List(context.TODO(), listOpts) + if err != nil { return "", fmt.Errorf("failed to getting pod list: %w", err) } @@ -115,10 +120,12 @@ func (clientsholder *Clientset) FindPodNameFromPrefix(namespace, prefix string) switch len(podNames) { case 0: - return "", fmt.Errorf("no pod with prefix %v found in namespace %v", prefix, namespace) + return "", fmt.Errorf("no pod with prefix %v found in namespace %v on node %v", prefix, namespace, + nodeName) case 1: return podNames[0], nil default: - return "", fmt.Errorf("too many (%v) pods with prefix %v found in namespace %v", len(podNames), prefix, namespace) + return "", fmt.Errorf("too many (%v) pods with prefix %v found in namespace %v on node %v", + len(podNames), prefix, namespace, nodeName) } } diff --git a/pkg/clients/exec_command.go b/pkg/clients/exec_command.go index e7d0d495..c809b740 100644 --- a/pkg/clients/exec_command.go +++ b/pkg/clients/exec_command.go @@ -32,17 +32,18 @@ type ExecContext interface { var NewSPDYExecutor = remotecommand.NewSPDYExecutor -// ContainerExecContext encapsulates the context in which a command is run; the namespace, pod, and container. +// ContainerExecContext encapsulates the context in which a command is run; the nodeName, the namespace, pod, and container. type ContainerExecContext struct { clientset *Clientset namespace string podName string containerName string podNamePrefix string + nodeName string } func (c *ContainerExecContext) refresh() error { - newPodname, err := c.clientset.FindPodNameFromPrefix(c.namespace, c.podNamePrefix) + newPodname, err := c.clientset.FindPodNameFromPrefix(c.namespace, c.podNamePrefix, c.nodeName) if err != nil { return err } @@ -52,9 +53,9 @@ func (c *ContainerExecContext) refresh() error { func NewContainerContext( clientset *Clientset, - namespace, podNamePrefix, containerName string, + namespace, podNamePrefix, containerName, nodeName string, ) (*ContainerExecContext, error) { - podName, err := clientset.FindPodNameFromPrefix(namespace, podNamePrefix) + podName, err := clientset.FindPodNameFromPrefix(namespace, podNamePrefix, nodeName) if err != nil { return &ContainerExecContext{}, err } @@ -64,6 +65,7 @@ func NewContainerContext( containerName: containerName, podNamePrefix: podNamePrefix, clientset: clientset, + nodeName: nodeName, } return &ctx, nil } diff --git a/pkg/clients/exec_command_test.go b/pkg/clients/exec_command_test.go index 4eb012ce..080eff6f 100644 --- a/pkg/clients/exec_command_test.go +++ b/pkg/clients/exec_command_test.go @@ -24,6 +24,9 @@ var notATestPod = &v1.Pod{ Namespace: "TestNamespace", Annotations: map[string]string{}, }, + Spec: v1.PodSpec{ + NodeName: "TestNode", + }, } var testPod = &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -31,6 +34,9 @@ var testPod = &v1.Pod{ Namespace: "TestNamespace", Annotations: map[string]string{}, }, + Spec: v1.PodSpec{ + NodeName: "TestNode", + }, } var _ = Describe("NewContainerContext", func() { @@ -49,7 +55,7 @@ var _ = Describe("NewContainerContext", func() { fakeK8sClient := fakeK8s.NewSimpleClientset(notATestPod) clientset.K8sClient = fakeK8sClient - _, err := clients.NewContainerContext(clientset, "TestNamespace", "Test", "TestContainer") + _, err := clients.NewContainerContext(clientset, "TestNamespace", "Test", "TestContainer", "TestNode") Expect(err).To(HaveOccurred()) }) }) @@ -58,7 +64,7 @@ var _ = Describe("NewContainerContext", func() { fakeK8sClient := fakeK8s.NewSimpleClientset(notATestPod, testPod) clientset.K8sClient = fakeK8sClient - ctx, err := clients.NewContainerContext(clientset, "TestNamespace", "Test", "TestContainer") + ctx, err := clients.NewContainerContext(clientset, "TestNamespace", "Test", "TestContainer", "TestNode") Expect(err).NotTo(HaveOccurred()) Expect(ctx.GetNamespace()).To(Equal("TestNamespace")) Expect(ctx.GetContainerName()).To(Equal("TestContainer")) @@ -81,7 +87,7 @@ var _ = Describe("ExecCommandContainer", func() { return []byte(expectedStdOut), []byte(expectedStdErr), nil } clients.NewSPDYExecutor = testutils.NewFakeNewSPDYExecutor(responder, nil) - ctx, _ := clients.NewContainerContext(clientset, "TestNamespace", "Test", "TestContainer") + ctx, _ := clients.NewContainerContext(clientset, "TestNamespace", "Test", "TestContainer", "TestNode") cmd := []string{"my", "test", "command"} stdout, stderr, err := ctx.ExecCommand(cmd) Expect(err).NotTo(HaveOccurred()) @@ -100,7 +106,7 @@ var _ = Describe("ExecCommandContainer", func() { return []byte(expectedStdOut), []byte(expectedStdErr), nil } clients.NewSPDYExecutor = testutils.NewFakeNewSPDYExecutor(responder, expectedErr) - ctx, _ := clients.NewContainerContext(clientset, "TestNamespace", "Test", "TestContainer") + ctx, _ := clients.NewContainerContext(clientset, "TestNamespace", "Test", "TestContainer", "TestNode") cmd := []string{"my", "test", "command"} stdout, stderr, err := ctx.ExecCommand(cmd) Expect(err).To(HaveOccurred()) @@ -119,7 +125,7 @@ var _ = Describe("ExecCommandContainer", func() { return []byte(expectedStdOut), []byte(expectedStdErr), expectedErr } clients.NewSPDYExecutor = testutils.NewFakeNewSPDYExecutor(responder, nil) - ctx, _ := clients.NewContainerContext(clientset, "TestNamespace", "Test", "TestContainer") + ctx, _ := clients.NewContainerContext(clientset, "TestNamespace", "Test", "TestContainer", "TestNode") cmd := []string{"my", "test", "command"} stdout, stderr, err := ctx.ExecCommand(cmd) Expect(err).To(HaveOccurred()) diff --git a/pkg/cmd/collect.go b/pkg/cmd/collect.go index 3a1babd9..5558fa4a 100644 --- a/pkg/cmd/collect.go +++ b/pkg/cmd/collect.go @@ -81,6 +81,7 @@ var collectCmd = &cobra.Command{ collectionRunner.Run( kubeConfig, outputFile, + nodeName, requestedDuration, pollInterval, devInfoAnnouceInterval, @@ -101,6 +102,7 @@ func init() { //nolint:funlen // Allow this to get a little long AddOutputFlag(collectCmd) AddFormatFlag(collectCmd) AddInterfaceFlag(collectCmd) + AddNodeNameFlag(collectCmd) collectCmd.Flags().StringVarP( &requestedDurationStr, diff --git a/pkg/cmd/common.go b/pkg/cmd/common.go index 81fe50d2..133c3e49 100644 --- a/pkg/cmd/common.go +++ b/pkg/cmd/common.go @@ -13,16 +13,23 @@ var ( outputFile string useAnalyserJSON bool ptpInterface string + nodeName string ) func AddKubeconfigFlag(targetCmd *cobra.Command) { - targetCmd.Flags().StringVarP(&kubeConfig, "kubeconfig", "k", "", "Path to the kubeconfig file") + targetCmd.Flags().StringVarP(&kubeConfig, + "kubeconfig", + "k", "", + "Path to the kubeconfig file") err := targetCmd.MarkFlagRequired("kubeconfig") utils.IfErrorExitOrPanic(err) } func AddOutputFlag(targetCmd *cobra.Command) { - targetCmd.Flags().StringVarP(&outputFile, "output", "o", "", "Path to the output file") + targetCmd.Flags().StringVarP(&outputFile, + "output", + "o", "", + "Path to the output file") } func AddFormatFlag(targetCmd *cobra.Command) { @@ -36,7 +43,17 @@ func AddFormatFlag(targetCmd *cobra.Command) { } func AddInterfaceFlag(targetCmd *cobra.Command) { - targetCmd.Flags().StringVarP(&ptpInterface, "interface", "i", "", "Name of the PTP interface") + targetCmd.Flags().StringVarP(&ptpInterface, + "interface", + "i", "", + "Name of the PTP interface") err := targetCmd.MarkFlagRequired("interface") utils.IfErrorExitOrPanic(err) } + +func AddNodeNameFlag(targetCmd *cobra.Command) { + targetCmd.Flags().StringVarP(&ptpInterface, + "nodeName", + "n", "", + "Name of the Node under test (valid only for MNO Use case)") +} diff --git a/pkg/cmd/verifyEnv.go b/pkg/cmd/verifyEnv.go index ca7ecc36..e9c701ca 100644 --- a/pkg/cmd/verifyEnv.go +++ b/pkg/cmd/verifyEnv.go @@ -20,7 +20,7 @@ var verifyEnvCmd = &cobra.Command{ Short: "verify the environment is ready for collection", Long: `verify the environment is ready for collection`, Run: func(cmd *cobra.Command, args []string) { - verify.Verify(ptpInterface, kubeConfig, useAnalyserJSON) + verify.Verify(ptpInterface, kubeConfig, useAnalyserJSON, nodeName) }, } @@ -31,4 +31,5 @@ func init() { AddOutputFlag(verifyEnvCmd) AddFormatFlag(verifyEnvCmd) AddInterfaceFlag(verifyEnvCmd) + AddNodeNameFlag(verifyEnvCmd) } diff --git a/pkg/collectors/collector.go b/pkg/collectors/collector.go index d2c5b1bd..f97547b7 100644 --- a/pkg/collectors/collector.go +++ b/pkg/collectors/collector.go @@ -24,6 +24,7 @@ type CollectionConstructor struct { Clientset *clients.Clientset ErroredPolls chan PollResult PTPInterface string + PTPNodeName string Msg string LogsOutputFile string TempDir string diff --git a/pkg/collectors/contexts/contexts.go b/pkg/collectors/contexts/contexts.go index 3f22fcc3..662b964c 100644 --- a/pkg/collectors/contexts/contexts.go +++ b/pkg/collectors/contexts/contexts.go @@ -20,8 +20,8 @@ const ( NetlinkDebugContainerImage = "quay.io/redhat-partner-solutions/dpll-debug:0.1" ) -func GetPTPDaemonContext(clientset *clients.Clientset) (clients.ExecContext, error) { - ctx, err := clients.NewContainerContext(clientset, PTPNamespace, PTPPodNamePrefix, PTPContainer) +func GetPTPDaemonContext(clientset *clients.Clientset, ptpNodeName string) (clients.ExecContext, error) { + ctx, err := clients.NewContainerContext(clientset, PTPNamespace, PTPPodNamePrefix, PTPContainer, ptpNodeName) if err != nil { return ctx, fmt.Errorf("could not create container context %w", err) } diff --git a/pkg/collectors/dev_info_collector.go b/pkg/collectors/dev_info_collector.go index f3210d63..058401ed 100644 --- a/pkg/collectors/dev_info_collector.go +++ b/pkg/collectors/dev_info_collector.go @@ -151,7 +151,7 @@ func verify(ptpDevInfo *devices.PTPDeviceInfo, constructor *CollectionConstructo // Returns a new DevInfoCollector from the CollectionConstuctor Factory func NewDevInfoCollector(constructor *CollectionConstructor) (Collector, error) { // Build DPPInfoFetcher ahead of time call to GetPTPDeviceInfo will build the other - ctx, err := contexts.GetPTPDaemonContext(constructor.Clientset) + ctx, err := contexts.GetPTPDaemonContext(constructor.Clientset, constructor.PTPNodeName) if err != nil { return &DevInfoCollector{}, fmt.Errorf("failed to create DevInfoCollector: %w", err) } diff --git a/pkg/collectors/devices/device_info_test.go b/pkg/collectors/devices/device_info_test.go index 16dbbe71..2e47309d 100644 --- a/pkg/collectors/devices/device_info_test.go +++ b/pkg/collectors/devices/device_info_test.go @@ -40,6 +40,9 @@ var testPod = &v1.Pod{ Namespace: "TestNamespace", Annotations: map[string]string{}, }, + Spec: v1.PodSpec{ + NodeName: "TestNodeName", + }, } var _ = Describe("NewContainerContext", func() { @@ -84,7 +87,7 @@ var _ = Describe("NewContainerContext", func() { response[expectedInput] = []byte(expectedOutput) - ctx, err := clients.NewContainerContext(clientset, "TestNamespace", "Test", "TestContainer") + ctx, err := clients.NewContainerContext(clientset, "TestNamespace", "Test", "TestContainer", "TestNodeName") Expect(err).NotTo(HaveOccurred()) info, err := devices.GetPTPDeviceInfo("aFakeInterface", ctx) Expect(err).NotTo(HaveOccurred()) diff --git a/pkg/collectors/devices/dpll_fs_test.go b/pkg/collectors/devices/dpll_fs_test.go index 9215a72e..967db476 100644 --- a/pkg/collectors/devices/dpll_fs_test.go +++ b/pkg/collectors/devices/dpll_fs_test.go @@ -54,7 +54,7 @@ var _ = Describe("NewContainerContext", func() { response[expectedInput] = []byte(expectedOutput) - ctx, err := clients.NewContainerContext(clientset, "TestNamespace", "Test", "TestContainer") + ctx, err := clients.NewContainerContext(clientset, "TestNamespace", "Test", "TestContainer", "TestNodeName") Expect(err).NotTo(HaveOccurred()) info, err := devices.GetDevDPLLFilesystemInfo(ctx, "aFakeInterface") Expect(err).NotTo(HaveOccurred()) diff --git a/pkg/collectors/devices/dpll_netlink.go b/pkg/collectors/devices/dpll_netlink.go index 51018a93..2c495415 100644 --- a/pkg/collectors/devices/dpll_netlink.go +++ b/pkg/collectors/devices/dpll_netlink.go @@ -187,8 +187,8 @@ func postProcessDPLLNetlinkClockID(result map[string]string) (map[string]any, er } type NetlinkClockID struct { - ClockID *big.Int `fetcherKey:"clockID" json:"clockId"` - Timestamp string `fetcherKey:"date" json:"timestamp"` + ClockID *big.Int `fetcherKey:"clockID" json:"clockId"` + Timestamp string `fetcherKey:"date" json:"timestamp"` } func GetClockID(ctx clients.ExecContext, interfaceName string) (NetlinkClockID, error) { diff --git a/pkg/collectors/devices/gps_ubx_test.go b/pkg/collectors/devices/gps_ubx_test.go index 547c7f8a..90dcfe96 100644 --- a/pkg/collectors/devices/gps_ubx_test.go +++ b/pkg/collectors/devices/gps_ubx_test.go @@ -65,7 +65,7 @@ var _ = Describe("GetGPSNav", func() { }, "\n") response[expectedInput] = []byte(expectedOutput) - ctx, err := clients.NewContainerContext(clientset, "TestNamespace", "Test", "TestContainer") + ctx, err := clients.NewContainerContext(clientset, "TestNamespace", "Test", "TestContainer", "TestNodeName") Expect(err).NotTo(HaveOccurred()) gpsInfo, err := devices.GetGPSNav(ctx) diff --git a/pkg/collectors/devices/gps_ubx_ver_test.go b/pkg/collectors/devices/gps_ubx_ver_test.go index 913712d1..09ac3097 100644 --- a/pkg/collectors/devices/gps_ubx_ver_test.go +++ b/pkg/collectors/devices/gps_ubx_ver_test.go @@ -72,7 +72,7 @@ var _ = Describe("GetGPSNav", func() { }, "\n") response[expectedInput] = []byte(expectedOutput) - ctx, err := clients.NewContainerContext(clientset, "TestNamespace", "Test", "TestContainer") + ctx, err := clients.NewContainerContext(clientset, "TestNamespace", "Test", "TestContainer", "TestNodeName") Expect(err).NotTo(HaveOccurred()) gpsInfo, err := devices.GetGPSVersions(ctx) diff --git a/pkg/collectors/devices/pmc_test.go b/pkg/collectors/devices/pmc_test.go index 516b759f..553410e5 100644 --- a/pkg/collectors/devices/pmc_test.go +++ b/pkg/collectors/devices/pmc_test.go @@ -64,7 +64,7 @@ var _ = Describe("GetPMC", func() { }, "\n") response[expectedInput] = []byte(expectedOutput) - ctx, err := clients.NewContainerContext(clientset, "TestNamespace", "Test", "TestContainer") + ctx, err := clients.NewContainerContext(clientset, "TestNamespace", "Test", "TestContainer", "TestNodeName") Expect(err).NotTo(HaveOccurred()) pmcInfo, err := devices.GetPMC(ctx) diff --git a/pkg/collectors/dpll_collector.go b/pkg/collectors/dpll_collector.go index 1503ea44..4effc0fb 100644 --- a/pkg/collectors/dpll_collector.go +++ b/pkg/collectors/dpll_collector.go @@ -17,7 +17,7 @@ const ( // Returns a new DPLLCollector from the CollectionConstuctor Factory func NewDPLLCollector(constructor *CollectionConstructor) (Collector, error) { - ctx, err := contexts.GetPTPDaemonContext(constructor.Clientset) + ctx, err := contexts.GetPTPDaemonContext(constructor.Clientset, constructor.PTPNodeName) if err != nil { return &DPLLNetlinkCollector{}, fmt.Errorf("failed to create DPLLCollector: %w", err) } diff --git a/pkg/collectors/dpll_collector_fs.go b/pkg/collectors/dpll_collector_fs.go index c36d83ec..1925aeb9 100644 --- a/pkg/collectors/dpll_collector_fs.go +++ b/pkg/collectors/dpll_collector_fs.go @@ -67,7 +67,7 @@ func (dpll *DPLLFilesystemCollector) CleanUp() error { // Returns a new DPLLFilesystemCollector from the CollectionConstuctor Factory func NewDPLLFilesystemCollector(constructor *CollectionConstructor) (Collector, error) { - ctx, err := contexts.GetPTPDaemonContext(constructor.Clientset) + ctx, err := contexts.GetPTPDaemonContext(constructor.Clientset, constructor.PTPNodeName) if err != nil { return &DPLLFilesystemCollector{}, fmt.Errorf("failed to create DPLLFilesystemCollector: %w", err) } diff --git a/pkg/collectors/gps_ubx_collector.go b/pkg/collectors/gps_ubx_collector.go index 8086dbc1..c793f017 100644 --- a/pkg/collectors/gps_ubx_collector.go +++ b/pkg/collectors/gps_ubx_collector.go @@ -54,7 +54,7 @@ func (gps *GPSCollector) Poll(resultsChan chan PollResult, wg *utils.WaitGroupCo // Returns a new GPSCollector based on values in the CollectionConstructor func NewGPSCollector(constructor *CollectionConstructor) (Collector, error) { - ctx, err := contexts.GetPTPDaemonContext(constructor.Clientset) + ctx, err := contexts.GetPTPDaemonContext(constructor.Clientset, constructor.PTPNodeName) if err != nil { return &GPSCollector{}, fmt.Errorf("failed to create DPLLCollector: %w", err) } diff --git a/pkg/collectors/log_follower.go b/pkg/collectors/log_follower.go index 16db7a9b..de3fd6bd 100644 --- a/pkg/collectors/log_follower.go +++ b/pkg/collectors/log_follower.go @@ -63,6 +63,7 @@ type LogsCollector struct { client *clients.Clientset sliceQuit chan os.Signal logsOutputFileName string + nodeName string lastPoll loglines.GenerationalLockedTime wg sync.WaitGroup withTimeStamps bool @@ -200,7 +201,7 @@ func processStream(stream io.ReadCloser, expectedEndtime time.Time) ([]*loglines } func (logs *LogsCollector) poll() error { - podName, err := logs.client.FindPodNameFromPrefix(contexts.PTPNamespace, contexts.PTPPodNamePrefix) + podName, err := logs.client.FindPodNameFromPrefix(contexts.PTPNamespace, contexts.PTPPodNamePrefix, logs.nodeName) if err != nil { return fmt.Errorf("failed to poll: %w", err) } @@ -282,6 +283,7 @@ func NewLogsCollector(constructor *CollectionConstructor) (Collector, error) { Store: make(map[uint32][]*loglines.LineSlice), Dumper: loglines.NewGenerationDumper(constructor.TempDir, constructor.KeepDebugFiles), }, + nodeName: constructor.PTPNodeName, } return &collector, nil } diff --git a/pkg/collectors/pmc_collector.go b/pkg/collectors/pmc_collector.go index e9e33c10..c13d584a 100644 --- a/pkg/collectors/pmc_collector.go +++ b/pkg/collectors/pmc_collector.go @@ -53,7 +53,7 @@ func (pmc *PMCCollector) Poll(resultsChan chan PollResult, wg *utils.WaitGroupCo // Returns a new PMCCollector based on values in the CollectionConstructor func NewPMCCollector(constructor *CollectionConstructor) (Collector, error) { - ctx, err := contexts.GetPTPDaemonContext(constructor.Clientset) + ctx, err := contexts.GetPTPDaemonContext(constructor.Clientset, constructor.PTPNodeName) if err != nil { return &PMCCollector{}, fmt.Errorf("failed to create PMCCollector: %w", err) } diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index cc5d8e9a..2f174d0a 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -63,6 +63,7 @@ func NewCollectorRunner(selectedCollectors []string) *CollectorRunner { func (runner *CollectorRunner) initialise( //nolint:funlen // allow a slightly long function callback callbacks.Callback, ptpInterface string, + ptpNodeName string, clientset *clients.Clientset, pollInterval int, requestedDuration time.Duration, @@ -79,6 +80,7 @@ func (runner *CollectorRunner) initialise( //nolint:funlen // allow a slightly l constructor := &collectors.CollectionConstructor{ Callback: callback, PTPInterface: ptpInterface, + PTPNodeName: ptpNodeName, Clientset: clientset, PollInterval: pollInterval, DevInfoAnnouceInterval: devInfoAnnouceInterval, @@ -220,6 +222,7 @@ func (runner *CollectorRunner) cleanUpAll() { func (runner *CollectorRunner) Run( //nolint:funlen // allow a slightly long function kubeConfig string, outputFile string, + nodeName string, requestedDuration time.Duration, pollInterval int, devInfoAnnouceInterval int, @@ -243,6 +246,7 @@ func (runner *CollectorRunner) Run( //nolint:funlen // allow a slightly long fun runner.initialise( callback, ptpInterface, + nodeName, clientset, pollInterval, requestedDuration, diff --git a/pkg/verify/verify.go b/pkg/verify/verify.go index 4d61d7dd..fdb1cb28 100644 --- a/pkg/verify/verify.go +++ b/pkg/verify/verify.go @@ -27,8 +27,9 @@ const ( func getDevInfoValidations( clientset *clients.Clientset, interfaceName string, + ptpNodeName string, ) []validations.Validation { - ctx, err := contexts.GetPTPDaemonContext(clientset) + ctx, err := contexts.GetPTPDaemonContext(clientset, ptpNodeName) utils.IfErrorExitOrPanic(err) devInfo, err := devices.GetPTPDeviceInfo(interfaceName, ctx) utils.IfErrorExitOrPanic(err) @@ -40,8 +41,9 @@ func getDevInfoValidations( func getGPSVersionValidations( clientset *clients.Clientset, + ptpNodeName string, ) []validations.Validation { - ctx, err := contexts.GetPTPDaemonContext(clientset) + ctx, err := contexts.GetPTPDaemonContext(clientset, ptpNodeName) utils.IfErrorExitOrPanic(err) gnssVersions, err := devices.GetGPSVersions(ctx) utils.IfErrorExitOrPanic(err) @@ -56,8 +58,9 @@ func getGPSVersionValidations( func getGPSStatusValidation( clientset *clients.Clientset, + ptpNodeName string, ) []validations.Validation { - ctx, err := contexts.GetPTPDaemonContext(clientset) + ctx, err := contexts.GetPTPDaemonContext(clientset, ptpNodeName) utils.IfErrorExitOrPanic(err) // If we need to do this for more validations then consider a generic @@ -80,13 +83,13 @@ func getGPSStatusValidation( } } -func getValidations(interfaceName, kubeConfig string) []validations.Validation { +func getValidations(interfaceName, ptpNodeName, kubeConfig string) []validations.Validation { checks := make([]validations.Validation, 0) clientset, err := clients.GetClientset(kubeConfig) utils.IfErrorExitOrPanic(err) - checks = append(checks, getDevInfoValidations(clientset, interfaceName)...) - checks = append(checks, getGPSVersionValidations(clientset)...) - checks = append(checks, getGPSStatusValidation(clientset)...) + checks = append(checks, getDevInfoValidations(clientset, interfaceName, ptpNodeName)...) + checks = append(checks, getGPSVersionValidations(clientset, ptpNodeName)...) + checks = append(checks, getGPSStatusValidation(clientset, ptpNodeName)...) checks = append( checks, validations.NewIsGrandMaster(clientset), @@ -165,8 +168,8 @@ func report(results []*ValidationResult, useAnalyserJSON bool) { } } -func Verify(interfaceName, kubeConfig string, useAnalyserJSON bool) { - checks := getValidations(interfaceName, kubeConfig) +func Verify(interfaceName, kubeConfig string, useAnalyserJSON bool, nodeName string) { + checks := getValidations(interfaceName, kubeConfig, nodeName) results := make([]*ValidationResult, 0) for _, check := range checks {