podWatcher: handle removed pods
continuous-integration/drone/push Build is passing Details

This commit is contained in:
Rob Watson 2022-06-12 15:26:27 +02:00
parent 397708336d
commit bf49105619
2 changed files with 124 additions and 36 deletions

View File

@ -20,9 +20,23 @@ import (
const nl = "\n" const nl = "\n"
type stream struct {
podName string
namespace string
done chan struct{}
}
func newStream(podName, namespace string) stream {
return stream{podName: podName, namespace: namespace, done: make(chan struct{})}
}
func (s stream) close() {
close(s.done)
}
type streamError struct { type streamError struct {
err error err error
podName string stream stream
recoverable bool recoverable bool
} }
@ -30,12 +44,12 @@ func (re *streamError) Error() string {
return re.err.Error() return re.err.Error()
} }
func newStreamError(err error, podName string) *streamError { func newStreamError(err error, stream stream) *streamError {
return &streamError{err: err, podName: podName} return &streamError{err: err, stream: stream}
} }
func newRecoverableError(err error, podName string) *streamError { func newRecoverableError(err error, stream stream) *streamError {
return &streamError{err: err, podName: podName, recoverable: true} return &streamError{err: err, stream: stream, recoverable: true}
} }
// PodWatcher consumes and merges the logs for a specified set of pods. // PodWatcher consumes and merges the logs for a specified set of pods.
@ -44,7 +58,7 @@ type PodWatcher struct {
container string container string
labelSelector labels.Selector labelSelector labels.Selector
spec map[string]*corev1.Pod spec map[string]*corev1.Pod
status map[string]bool status map[string]stream
streamResults chan error streamResults chan error
dst io.Writer dst io.Writer
closeChan chan struct{} closeChan chan struct{}
@ -58,7 +72,7 @@ func NewPodWatcher(client KubernetesClient, container string, labelSelector labe
container: container, container: container,
labelSelector: labelSelector, labelSelector: labelSelector,
spec: make(map[string]*corev1.Pod), spec: make(map[string]*corev1.Pod),
status: make(map[string]bool), status: make(map[string]stream),
streamResults: make(chan error), streamResults: make(chan error),
dst: dst, dst: dst,
closeChan: make(chan struct{}), closeChan: make(chan struct{}),
@ -82,12 +96,13 @@ func (pw *PodWatcher) WatchPods(ctx context.Context) error {
const tickerInterval = time.Millisecond * 250 const tickerInterval = time.Millisecond * 250
// Close terminates the PodWatcher. // Close terminates the PodWatcher and should be called at most once.
func (pw *PodWatcher) Close() { func (pw *PodWatcher) Close() {
pw.closeChan <- struct{}{} pw.closeChan <- struct{}{}
} }
func (pw *PodWatcher) watchPods(ctx context.Context, wg *sync.WaitGroup) error { func (pw *PodWatcher) watchPods(ctx context.Context, wg *sync.WaitGroup) error {
// TODO: pass namespace
podsClient := pw.client.Typed.CoreV1().Pods(corev1.NamespaceDefault) podsClient := pw.client.Typed.CoreV1().Pods(corev1.NamespaceDefault)
// Returns a watcher which notifies of changes in the relevant pods. // Returns a watcher which notifies of changes in the relevant pods.
@ -102,7 +117,7 @@ func (pw *PodWatcher) watchPods(ctx context.Context, wg *sync.WaitGroup) error {
defer ticker.Stop() defer ticker.Stop()
logStream := make(chan string) logStream := make(chan string)
streamErrors := make(chan *streamError) streamErrors := make(chan error)
resultChan := watcher.ResultChan() resultChan := watcher.ResultChan()
for { for {
@ -111,29 +126,32 @@ func (pw *PodWatcher) watchPods(ctx context.Context, wg *sync.WaitGroup) error {
return ctx.Err() return ctx.Err()
case <-pw.closeChan: case <-pw.closeChan:
for _, stream := range pw.status {
stream.close()
}
return nil return nil
case <-ticker.C: case <-ticker.C:
// Iterate through the desired state (w.spec) and launch goroutines to // Iterate through the desired state (w.spec) and launch goroutines to
// process the logs of any missing pods. // process the logs of any missing pods.
for podName, pod := range pw.spec { for podName, pod := range pw.spec {
pod := pod
if _, ok := pw.status[podName]; !ok { if _, ok := pw.status[podName]; !ok {
pw.logger.Printf("[PodWatcher] adding pod, name = %s", pod.Name) pw.logger.Printf("[PodWatcher] adding pod, name = %s", pod.Name)
pw.status[pod.Name] = true s := newStream(pod.Name, pod.Namespace)
pw.status[pod.Name] = s
wg.Add(1) wg.Add(1)
go func() { go func() {
if err := copyPodLogs(ctx, wg, pw.client, pod, pw.container, logStream); err != nil { if err := copyPodLogs(ctx, wg, pw.client, s, pw.container, logStream); err != nil {
streamErrors <- err streamErrors <- err
} }
}() }()
} }
} }
// For any pods which no longer exist, remove the pod. // For any pods which no longer exist, remove the pod.
// TODO: stop the log streaming. for podName, stream := range pw.status {
for podName := range pw.status {
if _, ok := pw.spec[podName]; !ok { if _, ok := pw.spec[podName]; !ok {
pw.removePod(podName) pw.removePod(stream)
} }
} }
@ -144,11 +162,12 @@ func (pw *PodWatcher) watchPods(ctx context.Context, wg *sync.WaitGroup) error {
} }
// streamErrors is never closed // streamErrors is never closed
case streamErr := <-streamErrors: case err := <-streamErrors:
if streamErr.recoverable { var streamErr *streamError
if errors.As(err, &streamErr) && streamErr.recoverable {
// if the error is recoverable, we just remove the pod from the status // if the error is recoverable, we just remove the pod from the status
// map. It will be recreated and retried on the next iteration. // map. It will be recreated and retried on the next iteration.
pw.removePod(streamErr.podName) pw.removePod(streamErr.stream)
} else { } else {
return fmt.Errorf("error streaming logs: %w", streamErr) return fmt.Errorf("error streaming logs: %w", streamErr)
} }
@ -171,33 +190,50 @@ func (pw *PodWatcher) watchPods(ctx context.Context, wg *sync.WaitGroup) error {
} }
} }
func (pw *PodWatcher) removePod(podName string) { func (pw *PodWatcher) removePod(stream stream) {
pw.logger.Printf("[PodWatcher] removing pod, name = %s", podName) pw.logger.Printf("[PodWatcher] removing pod, name = %s", stream.podName)
delete(pw.status, podName) stream.close()
delete(pw.status, stream.podName)
} }
func copyPodLogs(ctx context.Context, wg *sync.WaitGroup, client KubernetesClient, pod *corev1.Pod, container string, logStream chan string) *streamError { func copyPodLogs(ctx context.Context, wg *sync.WaitGroup, client KubernetesClient, stream stream, container string, logStream chan string) error {
defer wg.Done() defer wg.Done()
req := client.Typed.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &corev1.PodLogOptions{Follow: true, Container: container}) req := client.Typed.CoreV1().Pods(stream.namespace).GetLogs(stream.podName, &corev1.PodLogOptions{Follow: true, Container: container})
logs, err := req.Stream(ctx) logs, err := req.Stream(ctx)
// If one container is still being created, do not treat this as a fatal error. // If one container is still being created, do not treat this as a fatal error.
var statusErr *apierrors.StatusError var statusErr *apierrors.StatusError
if errors.As(err, &statusErr) && statusErr.Status().Reason == metav1.StatusReasonBadRequest && strings.Contains(statusErr.Error(), "ContainerCreating") { if errors.As(err, &statusErr) && statusErr.Status().Reason == metav1.StatusReasonBadRequest && strings.Contains(statusErr.Error(), "ContainerCreating") {
return newRecoverableError(err, pod.Name) return newRecoverableError(err, stream)
} else if err != nil { } else if err != nil {
return newStreamError(err, pod.Name) return newStreamError(err, stream)
} }
// Closing the reader ensures that the goroutine below is not leaked.
defer func() { _ = logs.Close() }() defer func() { _ = logs.Close() }()
scanner := bufio.NewScanner(logs) done := make(chan error, 1)
for scanner.Scan() { go func() {
logStream <- "[" + pod.Name + "] " + scanner.Text() + nl scanner := bufio.NewScanner(logs)
for scanner.Scan() {
logStream <- "[" + stream.podName + "] " + scanner.Text() + nl
}
if err := scanner.Err(); err != nil {
done <- newStreamError(fmt.Errorf("error scanning logs: %v", err), stream)
return
}
done <- nil
}()
for {
select {
case err := <-done:
return err
case <-stream.done:
return nil
case <-ctx.Done():
return ctx.Err()
}
} }
if err := scanner.Err(); err != nil {
return newStreamError(fmt.Errorf("error scanning logs: %v", err), pod.Name)
}
return nil
} }

View File

@ -34,7 +34,7 @@ type mockClientset struct {
podsWatcher watch.Interface podsWatcher watch.Interface
getLogsStatusCode int getLogsStatusCode int
getLogsRespBody string getLogsReaderFunc func() io.ReadCloser
getLogsErr error getLogsErr error
} }
@ -52,7 +52,7 @@ func (m *mockClientset) GetLogs(podName string, _ *corev1.PodLogOptions) *rest.R
} }
return &http.Response{ return &http.Response{
StatusCode: m.getLogsStatusCode, StatusCode: m.getLogsStatusCode,
Body: io.NopCloser(strings.NewReader(m.getLogsRespBody)), Body: m.getLogsReaderFunc(),
}, nil }, nil
}), }),
NegotiatedSerializer: scheme.Codecs.WithoutConversion(), NegotiatedSerializer: scheme.Codecs.WithoutConversion(),
@ -65,7 +65,7 @@ func TestPodWatcherClose(t *testing.T) {
podsWatcher := watch.NewFake() podsWatcher := watch.NewFake()
clientset := mockClientset{ clientset := mockClientset{
getLogsRespBody: "it worked", getLogsReaderFunc: func() io.ReadCloser { return io.NopCloser(strings.NewReader("it worked")) },
getLogsStatusCode: http.StatusOK, getLogsStatusCode: http.StatusOK,
podsWatcher: podsWatcher, podsWatcher: podsWatcher,
} }
@ -90,6 +90,58 @@ func TestPodWatcherClose(t *testing.T) {
assert.Equal(t, "[foo] it worked\n", buf.String()) assert.Equal(t, "[foo] it worked\n", buf.String())
} }
type logReaderIterator struct {
rcs []io.ReadCloser
}
func (f *logReaderIterator) next() io.ReadCloser {
var rc io.ReadCloser
rc, f.rcs = f.rcs[0], f.rcs[1:]
return rc
}
func TestPodWatcherRemovedPod(t *testing.T) {
podsWatcher := watch.NewFake()
r1, w1 := io.Pipe()
r2, w2 := io.Pipe()
it := logReaderIterator{[]io.ReadCloser{r1, r2}}
clientset := mockClientset{
getLogsReaderFunc: it.next,
getLogsStatusCode: http.StatusOK,
podsWatcher: podsWatcher,
}
clientset.PrependWatchReactor("pods", k8stest.DefaultWatchReactor(podsWatcher, nil))
client := logs.KubernetesClient{Typed: &clientset}
selector := labels.SelectorFromSet(map[string]string{"foo": "bar"})
var buf bytes.Buffer
pw := logs.NewPodWatcher(client, "mycontainer", selector, &buf, discardLogger())
go func() {
defer pw.Close()
podsWatcher.Add(&corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, Status: corev1.PodStatus{Phase: corev1.PodRunning}})
time.Sleep(time.Millisecond * 500)
w1.Write([]byte("should be logged\n"))
podsWatcher.Delete(&corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, Status: corev1.PodStatus{Phase: corev1.PodRunning}})
time.Sleep(time.Millisecond * 500)
w1.Write([]byte("should not be logged\n"))
time.Sleep(time.Millisecond * 500)
podsWatcher.Add(&corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "bar", Namespace: "default"}, Status: corev1.PodStatus{Phase: corev1.PodRunning}})
time.Sleep(time.Millisecond * 500)
w2.Write([]byte("should be logged\n"))
time.Sleep(time.Millisecond * 500)
}()
err := pw.WatchPods(context.Background())
require.NoError(t, err)
assert.Equal(t, "[foo] should be logged\n[bar] should be logged\n", buf.String())
}
func TestPodWatcher(t *testing.T) { func TestPodWatcher(t *testing.T) {
testCases := []struct { testCases := []struct {
name string name string
@ -135,7 +187,7 @@ func TestPodWatcher(t *testing.T) {
podsWatcher := watch.NewFake() podsWatcher := watch.NewFake()
clientset := mockClientset{ clientset := mockClientset{
getLogsRespBody: tc.getLogsRespBody, getLogsReaderFunc: func() io.ReadCloser { return io.NopCloser(strings.NewReader(tc.getLogsRespBody)) },
getLogsStatusCode: tc.getLogsStatusCode, getLogsStatusCode: tc.getLogsStatusCode,
getLogsErr: tc.getLogsErr, getLogsErr: tc.getLogsErr,
podsWatcher: podsWatcher, podsWatcher: podsWatcher,