diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container.go b/pkg/kubelet/kuberuntime/kuberuntime_container.go index 5622969446f86..433659a39d6e3 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_container.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_container.go @@ -219,7 +219,7 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb if handlerErr != nil { m.recordContainerEvent(pod, container, kubeContainerID.ID, v1.EventTypeWarning, events.FailedPostStartHook, msg) if err := m.killContainer(pod, kubeContainerID, container.Name, "FailedPostStartHook", 0); err != nil { - glog.Errorf("Failed to kill container %q(id=%q) in pod %q: %v, %v", + klog.Errorf("Failed to kill container %q(id=%q) in pod %q: %v, %v", container.Name, kubeContainerID.String(), format.Pod(pod), ErrPostStartHook, err) } return msg, fmt.Errorf("%s: %v", ErrPostStartHook, handlerErr) @@ -669,7 +669,7 @@ func (m *kubeGenericRuntimeManager) killContainersWithSyncResult(pod *v1.Pod, ru containerResults := make(chan *kubecontainer.SyncResult, len(runningPod.Containers)) // non-sidecars first start := time.Now() - glog.Infof("Pod: %s, killContainersWithSyncResult: killing %d non-sidecars, %s termination period", runningPod.Name, len(nonSidecars), gracePeriodDuration) + klog.Infof("Pod: %s, killContainersWithSyncResult: killing %d non-sidecars, %s termination period", runningPod.Name, len(nonSidecars), gracePeriodDuration) nonSidecarsWg := sync.WaitGroup{} nonSidecarsWg.Add(len(nonSidecars)) for _, container := range nonSidecars { @@ -691,7 +691,7 @@ func (m *kubeGenericRuntimeManager) killContainersWithSyncResult(pod *v1.Pod, ru } // then sidecars - glog.Infof("Pod: %s, killContainersWithSyncResult: killing %d sidecars, %s left", runningPod.Name, len(sidecars), gracePeriodDuration) + klog.Infof("Pod: %s, killContainersWithSyncResult: killing %d sidecars, %s left", runningPod.Name, len(sidecars), gracePeriodDuration) wg := sync.WaitGroup{} wg.Add(len(sidecars)) for _, container := range sidecars { diff --git a/pkg/kubelet/kuberuntime/kuberuntime_manager.go b/pkg/kubelet/kuberuntime/kuberuntime_manager.go index da32f584490a9..57102497450e0 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_manager.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_manager.go @@ -479,7 +479,7 @@ func containerSucceeded(c *v1.Container, podStatus *kubecontainer.PodStatus) boo func isSidecar(pod *v1.Pod, containerName string) bool { if pod == nil { - glog.V(5).Infof("isSidecar: pod is nil, so returning false") + klog.V(5).Infof("isSidecar: pod is nil, so returning false") return false } return pod.Annotations[fmt.Sprintf("sidecars.lyft.net/container-lifecycle-%s", containerName)] == "Sidecar" @@ -506,21 +506,9 @@ func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *ku } } - // Determine if there are any non sidecar containers that are running or need restarting - // if there are none, we can kill the remaining sidecars - sidecarsOnly := true - for _, container := range pod.Spec.Containers { - containerStatus := podStatus.FindContainerStatusByName(container.Name) - if !isSidecar(pod, container.Name) { - if kubecontainer.ShouldContainerBeRestarted(&container, pod, podStatus) || (containerStatus != nil && containerStatus.State == kubecontainer.ContainerStateRunning) { - sidecarsOnly = false - } - } - } - // determine sidecar status sidecarStatus := status.GetSidecarsStatus(pod) - glog.Infof("Pod: %s, sidecars: %s, status: Present=%v,Ready=%v,ContainerWaiting=%v", format.Pod(pod), sidecarNames, sidecarStatus.SidecarsPresent, sidecarStatus.SidecarsReady, sidecarStatus.ContainersWaiting) + klog.Infof("Pod: %s, sidecars: %s, status: Present=%v,Ready=%v,ContainersWaiting=%v", format.Pod(pod), sidecarNames, sidecarStatus.SidecarsPresent, sidecarStatus.SidecarsReady, sidecarStatus.ContainersWaiting) // If we need to (re-)create the pod sandbox, everything will need to be // killed and recreated, and init containers should be purged. @@ -613,7 +601,7 @@ func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *ku // - the non-sidecars have run before (i.e. they are not in a Waiting state) OR // - the sidecars are ready (we're starting them for the first time) if !isSidecar(pod, container.Name) && sidecarStatus.SidecarsPresent && sidecarStatus.ContainersWaiting && !sidecarStatus.SidecarsReady { - glog.Infof("Pod: %s, Container: %s, sidecar=%v skipped: Present=%v,Ready=%v,ContainerWaiting=%v", format.Pod(pod), container.Name, isSidecar(pod, container.Name), sidecarStatus.SidecarsPresent, sidecarStatus.SidecarsReady, sidecarStatus.ContainersWaiting) + klog.Infof("Pod: %s, Container: %s, sidecar=%v skipped: Present=%v,Ready=%v,ContainerWaiting=%v", format.Pod(pod), container.Name, isSidecar(pod, container.Name), sidecarStatus.SidecarsPresent, sidecarStatus.SidecarsReady, sidecarStatus.ContainersWaiting) continue } @@ -632,10 +620,6 @@ func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *ku // If container does not exist, or is not running, check whether we // need to restart it. if containerStatus == nil || containerStatus.State != kubecontainer.ContainerStateRunning { - if sidecarsOnly && isSidecar(pod, container.Name) { - glog.Infof("Pod: %s: %s: is sidecar, sidecars only, so not restarting", format.Pod(pod), container.Name) - continue - } if kubecontainer.ShouldContainerBeRestarted(&container, pod, podStatus) { message := fmt.Sprintf("%s: Container %+v is dead, but RestartPolicy says that we should restart it.", pod.Name, container) klog.V(3).Infof(message) @@ -662,17 +646,6 @@ func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *ku // Restart regardless of the restart policy because the container // spec changed. restart = true - } else if sidecarsOnly && isSidecar(pod, container.Name) { - // in this case, the container is a sidecar, but no - // non-sidecars are ever going to run again. we don't need - // the sidecars, so we kill it as well - reason = "Non-sidecars have completed. Container will be killed." - // we are not planning to restart this container. - restart = false - // keepCount set to avoid killing pod right away - we should only - // kill pod once all containers have exited and we get back into this - // loop. - keepCount += 1 } else if liveness, found := m.livenessManager.Get(containerStatus.ID); found && liveness == proberesults.Failure { // If the container failed the liveness probe, we should kill it. message = fmt.Sprintf("Container %s failed liveness probe", container.Name) @@ -702,7 +675,7 @@ func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *ku } if keepCount == 0 && len(changes.ContainersToStart) == 0 { - glog.Infof("Pod: %s: KillPod=true", format.Pod(pod)) + klog.Infof("Pod: %s: KillPod=true", format.Pod(pod)) changes.KillPod = true } @@ -951,10 +924,10 @@ func (m *kubeGenericRuntimeManager) KillPod(pod *v1.Pod, runningPod kubecontaine // grace period and also the sidecar status. if pod == nil { for _, container := range runningPod.Containers { - glog.Infof("Pod: %s, KillPod: pod nil, trying to restore from container %s", runningPod.Name, container.ID) + klog.Infof("Pod: %s, KillPod: pod nil, trying to restore from container %s", runningPod.Name, container.ID) podSpec, _, err := m.restoreSpecsFromContainerLabels(container.ID) if err != nil { - glog.Errorf("Pod: %s, KillPod: couldn't restore: %s", runningPod.Name, container.ID) + klog.Errorf("Pod: %s, KillPod: couldn't restore: %s", runningPod.Name, container.ID) continue } pod = podSpec diff --git a/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go b/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go index d715c80c82807..a0e1390578e9f 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go @@ -1336,72 +1336,11 @@ func TestComputePodActionsWithSidecar(t *testing.T) { mutateStatusFn func(*kubecontainer.PodStatus) actions podActions }{ - "Kill sidecars if all non-sidecars are terminated": { - mutatePodFn: func(pod *v1.Pod) { - pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure - }, - mutateStatusFn: func(status *kubecontainer.PodStatus) { - for i := range status.ContainerStatuses { - if i == 1 { - continue - } - status.ContainerStatuses[i].State = kubecontainer.ContainerStateExited - status.ContainerStatuses[i].ExitCode = 0 - } - }, - actions: podActions{ - SandboxID: baseStatus.SandboxStatuses[0].Id, - ContainersToKill: getKillMap(basePod, baseStatus, []int{1}), - ContainersToStart: []int{}, - }, - }, - "Kill pod if all sidecars and non-sidecars are terminated": { - mutatePodFn: func(pod *v1.Pod) { - pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure - }, - mutateStatusFn: func(status *kubecontainer.PodStatus) { - for i := range status.ContainerStatuses { - status.ContainerStatuses[i].State = kubecontainer.ContainerStateExited - if i == 1 { - status.ContainerStatuses[i].ExitCode = 1 - } else { - status.ContainerStatuses[i].ExitCode = 0 - } - } - }, - actions: podActions{ - KillPod: true, - SandboxID: baseStatus.SandboxStatuses[0].Id, - ContainersToKill: getKillMap(basePod, baseStatus, []int{}), - ContainersToStart: []int{}, - }, - }, - "Don't restart sidecars if all non-sidecars are terminated": { - mutatePodFn: func(pod *v1.Pod) { - pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure - }, - mutateStatusFn: func(status *kubecontainer.PodStatus) { - for i := range status.ContainerStatuses { - status.ContainerStatuses[i].State = kubecontainer.ContainerStateExited - if i == 1 { - status.ContainerStatuses[i].ExitCode = 1 - } else { - status.ContainerStatuses[i].ExitCode = 0 - } - } - }, - actions: podActions{ - KillPod: true, - SandboxID: baseStatus.SandboxStatuses[0].Id, - ContainersToKill: getKillMap(basePod, baseStatus, []int{}), - ContainersToStart: []int{}, - }, - }, "Start sidecar containers before non-sidecars when creating a new pod": { mutateStatusFn: func(status *kubecontainer.PodStatus) { // No container or sandbox exists. status.SandboxStatuses = []*runtimeapi.PodSandboxStatus{} - status.ContainerStatuses = []*kubecontainer.ContainerStatus{} + status.ContainerStatuses = []*kubecontainer.Status{} }, actions: podActions{ KillPod: true, diff --git a/pkg/kubelet/status/status_manager.go b/pkg/kubelet/status/status_manager.go index 0b35a6a3f43ae..838e70627dfa5 100644 --- a/pkg/kubelet/status/status_manager.go +++ b/pkg/kubelet/status/status_manager.go @@ -24,7 +24,6 @@ import ( "time" clientset "k8s.io/client-go/kubernetes" - "k8s.io/klog/glog" v1 "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" @@ -755,11 +754,11 @@ type SidecarsStatus struct { // GetSidecarsStatus returns the SidecarsStatus for the given pod func GetSidecarsStatus(pod *v1.Pod) SidecarsStatus { if pod == nil { - glog.Infof("Pod was nil, returning empty sidecar status") + klog.Infof("Pod was nil, returning empty sidecar status") return SidecarsStatus{} } if pod.Spec.Containers == nil || pod.Status.ContainerStatuses == nil { - glog.Infof("Pod Containers or Container status was nil, returning empty sidecar status") + klog.Infof("Pod Containers or Container status was nil, returning empty sidecar status") return SidecarsStatus{} } sidecarsStatus := SidecarsStatus{SidecarsPresent: false, SidecarsReady: true, ContainersWaiting: false} @@ -769,14 +768,14 @@ func GetSidecarsStatus(pod *v1.Pod) SidecarsStatus { if pod.Annotations[fmt.Sprintf("sidecars.lyft.net/container-lifecycle-%s", container.Name)] == "Sidecar" { sidecarsStatus.SidecarsPresent = true if !status.Ready { - glog.Infof("Pod %s: %s: sidecar not ready", format.Pod(pod), container.Name) + klog.Infof("Pod %s: %s: sidecar not ready", format.Pod(pod), container.Name) sidecarsStatus.SidecarsReady = false } else { - glog.Infof("Pod %s: %s: sidecar is ready", format.Pod(pod), container.Name) + klog.Infof("Pod %s: %s: sidecar is ready", format.Pod(pod), container.Name) } } else if status.State.Waiting != nil { // check if non-sidecars have started - glog.Infof("Pod: %s: %s: non-sidecar waiting", format.Pod(pod), container.Name) + klog.Infof("Pod: %s: %s: non-sidecar waiting", format.Pod(pod), container.Name) sidecarsStatus.ContainersWaiting = true } }