diff --git a/pkg/virtualkubelet/config.go b/pkg/virtualkubelet/config.go index 4f615e9b..11345504 100644 --- a/pkg/virtualkubelet/config.go +++ b/pkg/virtualkubelet/config.go @@ -2,24 +2,25 @@ package virtualkubelet // Config holds the whole configuration type Config struct { - InterlinkURL string `yaml:"InterlinkURL"` - Interlinkport string `yaml:"InterlinkPort"` - KubernetesAPIAddr string `yaml:"KubernetesApiAddr"` - KubernetesAPIPort string `yaml:"KubernetesApiPort"` - KubernetesAPICaCrt string `yaml:"KubernetesApiCaCrt"` - VKConfigPath string `yaml:"VKConfigPath"` - VKTokenFile string `yaml:"VKTokenFile"` - ServiceAccount string `yaml:"ServiceAccount"` - Namespace string `yaml:"Namespace"` - PodIP string `yaml:"PodIP"` - VerboseLogging bool `yaml:"VerboseLogging"` - ErrorsOnlyLogging bool `yaml:"ErrorsOnlyLogging"` - HTTP HTTP `yaml:"HTTP"` - KubeletHTTP HTTP `yaml:"KubeletHTTP"` - CPU string `yaml:"CPU,omitempty"` - Memory string `yaml:"Memory,omitempty"` - Pods string `yaml:"Pods,omitempty"` - GPU string `yaml:"nvidia.com/gpu,omitempty"` + InterlinkURL string `yaml:"InterlinkURL"` + Interlinkport string `yaml:"InterlinkPort"` + KubernetesAPIAddr string `yaml:"KubernetesApiAddr"` + KubernetesAPIPort string `yaml:"KubernetesApiPort"` + KubernetesAPICaCrt string `yaml:"KubernetesApiCaCrt"` + DisableProjectedVolumes bool `yaml:"DisableProjectedVolumes"` + VKConfigPath string `yaml:"VKConfigPath"` + VKTokenFile string `yaml:"VKTokenFile"` + ServiceAccount string `yaml:"ServiceAccount"` + Namespace string `yaml:"Namespace"` + PodIP string `yaml:"PodIP"` + VerboseLogging bool `yaml:"VerboseLogging"` + ErrorsOnlyLogging bool `yaml:"ErrorsOnlyLogging"` + HTTP HTTP `yaml:"HTTP"` + KubeletHTTP HTTP `yaml:"KubeletHTTP"` + CPU string `yaml:"CPU,omitempty"` + Memory string `yaml:"Memory,omitempty"` + Pods string `yaml:"Pods,omitempty"` + GPU string `yaml:"nvidia.com/gpu,omitempty"` } type HTTP struct { diff --git a/pkg/virtualkubelet/execute.go b/pkg/virtualkubelet/execute.go index 074ec6d7..82c07a30 100644 --- a/pkg/virtualkubelet/execute.go +++ b/pkg/virtualkubelet/execute.go @@ -28,9 +28,9 @@ import ( const PodPhaseInitialize = "Initializing" const PodPhaseCompleted = "Completed" -func failedMount(ctx context.Context, failedAndWait *bool, name string, pod *v1.Pod, p *Provider) error { +func failedMount(ctx context.Context, failedAndWait *bool, name string, pod *v1.Pod, p *Provider, err error) error { *failedAndWait = true - log.G(ctx).Warning("Unable to find ConfigMap " + name + " for pod " + pod.Name + ". Waiting for it to be initialized") + log.G(ctx).Warningf("Unable to find ConfigMap %s for pod %s. Waiting for it to be initialized. Error was: %v. Current phase: %s", name, pod.Name, err, pod.Status.Phase) if pod.Status.Phase != PodPhaseInitialize { pod.Status.Phase = PodPhaseInitialize err := p.UpdatePod(ctx, pod) @@ -609,8 +609,8 @@ func remoteExecutionHandleProjectedSource( func remoteExecutionHandleVolumes(ctx context.Context, p *Provider, pod *v1.Pod, req *types.PodCreateRequests) error { startTime := time.Now() + endTime := startTime.Add(5 * time.Minute) - timeNow := time.Now() _, err := p.clientSet.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) if err != nil { log.G(ctx).Warning("Deleted Pod before actual creation") @@ -625,12 +625,12 @@ func remoteExecutionHandleVolumes(ctx context.Context, p *Provider, pod *v1.Pod, log.G(ctx).Debug("Looking at volume ", volume) for { failedAndWait = false - if timeNow.Sub(startTime).Seconds() < time.Hour.Minutes()*5 { + if time.Now().Before(endTime) { switch { case volume.ConfigMap != nil: cfgmap, err := p.clientSet.CoreV1().ConfigMaps(pod.Namespace).Get(ctx, volume.ConfigMap.Name, metav1.GetOptions{}) if err != nil { - err = failedMount(ctx, &failedAndWait, volume.ConfigMap.Name, pod, p) + err = failedMount(ctx, &failedAndWait, volume.ConfigMap.Name, pod, p, err) if err != nil { return err } @@ -641,6 +641,12 @@ func remoteExecutionHandleVolumes(ctx context.Context, p *Provider, pod *v1.Pod, case volume.Projected != nil: // The service account token uses the projected volume in K8S >= 1.24. + if p.config.DisableProjectedVolumes { + // This flag disable doing anything about Projected Volumes. + log.G(ctx).Warning("Flag DisableProjectedVolumes set to true, so not handing Projected Volume: ", volume) + break + } + var projectedVolume v1.ConfigMap projectedVolume.Name = volume.Name projectedVolume.Data = make(map[string]string) @@ -659,7 +665,7 @@ func remoteExecutionHandleVolumes(ctx context.Context, p *Provider, pod *v1.Pod, case volume.Secret != nil: scrt, err := p.clientSet.CoreV1().Secrets(pod.Namespace).Get(ctx, volume.Secret.SecretName, metav1.GetOptions{}) if err != nil { - err = failedMount(ctx, &failedAndWait, volume.Secret.SecretName, pod, p) + err = failedMount(ctx, &failedAndWait, volume.Secret.SecretName, pod, p, err) if err != nil { return err } @@ -675,7 +681,8 @@ func remoteExecutionHandleVolumes(ctx context.Context, p *Provider, pod *v1.Pod, } if failedAndWait { - time.Sleep(time.Second) + log.G(ctx).Warningf("volume %s not ready, sleeping 2s, attempt %f / 5 minutes max", volume.Name, time.Since(startTime).Minutes()) + time.Sleep(2 * time.Second) continue } pod.Status.Phase = v1.PodPending @@ -695,7 +702,7 @@ func remoteExecutionHandleVolumes(ctx context.Context, p *Provider, pod *v1.Pod, if err != nil { return err } - return errors.New("unable to retrieve ConfigMaps or Secrets. Check logs") + return errors.New("unable to retrieve ConfigMaps or Secrets after 5m. Check logs") } } return nil