Improve error handling
This commit is contained in:
parent
0b0db0ee8f
commit
7f10c6a2c2
|
@ -12,6 +12,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/watch"
|
"k8s.io/apimachinery/pkg/watch"
|
||||||
|
@ -165,11 +166,10 @@ func copyPodLogs(ctx context.Context, wg *sync.WaitGroup, clientset KubernetesCl
|
||||||
req := clientset.CoreV1().Pods(p.Namespace).GetLogs(p.Name, &podLogOpts)
|
req := clientset.CoreV1().Pods(p.Namespace).GetLogs(p.Name, &podLogOpts)
|
||||||
logs, err := req.Stream(ctx)
|
logs, err := req.Stream(ctx)
|
||||||
|
|
||||||
// If one pod or container is in a non-running state, we don't want to quit.
|
// If one container is still being created, do not treat this as a fatal error.
|
||||||
// Checking the response string avoids the risk of a race condition but
|
// We try to verify the error type as strictly as possible.
|
||||||
// obviously feels a bit brittle too.
|
var statusErr *apierrors.StatusError
|
||||||
// TODO: introspect error
|
if errors.As(err, &statusErr) && statusErr.Status().Reason == metav1.StatusReasonBadRequest && strings.Contains(statusErr.Error(), "ContainerCreating") {
|
||||||
if err != nil && strings.Contains(err.Error(), "is waiting to start") {
|
|
||||||
return &streamError{err: err, podName: p.Name, recoverable: true}
|
return &streamError{err: err, podName: p.Name, recoverable: true}
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return &streamError{err: err, podName: p.Name}
|
return &streamError{err: err, podName: p.Name}
|
||||||
|
|
|
@ -78,9 +78,9 @@ func (w *Watcher) Watch(ctx context.Context) error {
|
||||||
// Check if the deployment exists before we commence watching, to allow us to
|
// Check if the deployment exists before we commence watching, to allow us to
|
||||||
// return an error if needed.
|
// return an error if needed.
|
||||||
_, err := deploymentsClient.Get(ctx, w.deployName, metav1.GetOptions{})
|
_, err := deploymentsClient.Get(ctx, w.deployName, metav1.GetOptions{})
|
||||||
var statusError *apierrors.StatusError
|
var statusErr *apierrors.StatusError
|
||||||
if errors.As(err, &statusError) && statusError.Status().Reason == metav1.StatusReasonNotFound {
|
if errors.As(err, &statusErr) && statusErr.Status().Reason == metav1.StatusReasonNotFound {
|
||||||
if !w.allowNonExistentDeployment {
|
if w.strictExist {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Printf(`deployment "%s" does not exist, waiting`, w.deployName)
|
log.Printf(`deployment "%s" does not exist, waiting`, w.deployName)
|
||||||
|
|
Loading…
Reference in New Issue