podWatcher: handle removed pods
continuous-integration/drone/push Build is passing Details

This commit is contained in:
Rob Watson 2022-06-12 15:26:27 +02:00
parent 397708336d
commit bf49105619
2 changed files with 124 additions and 36 deletions

View File

@ -20,9 +20,23 @@ import (
const nl = "\n"
type stream struct {
podName string
namespace string
done chan struct{}
}
func newStream(podName, namespace string) stream {
return stream{podName: podName, namespace: namespace, done: make(chan struct{})}
}
func (s stream) close() {
close(s.done)
}
type streamError struct {
err error
podName string
stream stream
recoverable bool
}
@ -30,12 +44,12 @@ func (re *streamError) Error() string {
return re.err.Error()
}
func newStreamError(err error, podName string) *streamError {
return &streamError{err: err, podName: podName}
func newStreamError(err error, stream stream) *streamError {
return &streamError{err: err, stream: stream}
}
func newRecoverableError(err error, podName string) *streamError {
return &streamError{err: err, podName: podName, recoverable: true}
func newRecoverableError(err error, stream stream) *streamError {
return &streamError{err: err, stream: stream, recoverable: true}
}
// PodWatcher consumes and merges the logs for a specified set of pods.
@ -44,7 +58,7 @@ type PodWatcher struct {
container string
labelSelector labels.Selector
spec map[string]*corev1.Pod
status map[string]bool
status map[string]stream
streamResults chan error
dst io.Writer
closeChan chan struct{}
@ -58,7 +72,7 @@ func NewPodWatcher(client KubernetesClient, container string, labelSelector labe
container: container,
labelSelector: labelSelector,
spec: make(map[string]*corev1.Pod),
status: make(map[string]bool),
status: make(map[string]stream),
streamResults: make(chan error),
dst: dst,
closeChan: make(chan struct{}),
@ -82,12 +96,13 @@ func (pw *PodWatcher) WatchPods(ctx context.Context) error {
const tickerInterval = time.Millisecond * 250
// Close terminates the PodWatcher.
// Close terminates the PodWatcher and should be called at most once.
func (pw *PodWatcher) Close() {
pw.closeChan <- struct{}{}
}
func (pw *PodWatcher) watchPods(ctx context.Context, wg *sync.WaitGroup) error {
// TODO: pass namespace
podsClient := pw.client.Typed.CoreV1().Pods(corev1.NamespaceDefault)
// Returns a watcher which notifies of changes in the relevant pods.
@ -102,7 +117,7 @@ func (pw *PodWatcher) watchPods(ctx context.Context, wg *sync.WaitGroup) error {
defer ticker.Stop()
logStream := make(chan string)
streamErrors := make(chan *streamError)
streamErrors := make(chan error)
resultChan := watcher.ResultChan()
for {
@ -111,29 +126,32 @@ func (pw *PodWatcher) watchPods(ctx context.Context, wg *sync.WaitGroup) error {
return ctx.Err()
case <-pw.closeChan:
for _, stream := range pw.status {
stream.close()
}
return nil
case <-ticker.C:
// Iterate through the desired state (w.spec) and launch goroutines to
// process the logs of any missing pods.
for podName, pod := range pw.spec {
pod := pod
if _, ok := pw.status[podName]; !ok {
pw.logger.Printf("[PodWatcher] adding pod, name = %s", pod.Name)
pw.status[pod.Name] = true
s := newStream(pod.Name, pod.Namespace)
pw.status[pod.Name] = s
wg.Add(1)
go func() {
if err := copyPodLogs(ctx, wg, pw.client, pod, pw.container, logStream); err != nil {
if err := copyPodLogs(ctx, wg, pw.client, s, pw.container, logStream); err != nil {
streamErrors <- err
}
}()
}
}
// For any pods which no longer exist, remove the pod.
// TODO: stop the log streaming.
for podName := range pw.status {
for podName, stream := range pw.status {
if _, ok := pw.spec[podName]; !ok {
pw.removePod(podName)
pw.removePod(stream)
}
}
@ -144,11 +162,12 @@ func (pw *PodWatcher) watchPods(ctx context.Context, wg *sync.WaitGroup) error {
}
// streamErrors is never closed
case streamErr := <-streamErrors:
if streamErr.recoverable {
case err := <-streamErrors:
var streamErr *streamError
if errors.As(err, &streamErr) && streamErr.recoverable {
// if the error is recoverable, we just remove the pod from the status
// map. It will be recreated and retried on the next iteration.
pw.removePod(streamErr.podName)
pw.removePod(streamErr.stream)
} else {
return fmt.Errorf("error streaming logs: %w", streamErr)
}
@ -171,33 +190,50 @@ func (pw *PodWatcher) watchPods(ctx context.Context, wg *sync.WaitGroup) error {
}
}
func (pw *PodWatcher) removePod(podName string) {
pw.logger.Printf("[PodWatcher] removing pod, name = %s", podName)
delete(pw.status, podName)
func (pw *PodWatcher) removePod(stream stream) {
pw.logger.Printf("[PodWatcher] removing pod, name = %s", stream.podName)
stream.close()
delete(pw.status, stream.podName)
}
func copyPodLogs(ctx context.Context, wg *sync.WaitGroup, client KubernetesClient, pod *corev1.Pod, container string, logStream chan string) *streamError {
func copyPodLogs(ctx context.Context, wg *sync.WaitGroup, client KubernetesClient, stream stream, container string, logStream chan string) error {
defer wg.Done()
req := client.Typed.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &corev1.PodLogOptions{Follow: true, Container: container})
req := client.Typed.CoreV1().Pods(stream.namespace).GetLogs(stream.podName, &corev1.PodLogOptions{Follow: true, Container: container})
logs, err := req.Stream(ctx)
// If one container is still being created, do not treat this as a fatal error.
var statusErr *apierrors.StatusError
if errors.As(err, &statusErr) && statusErr.Status().Reason == metav1.StatusReasonBadRequest && strings.Contains(statusErr.Error(), "ContainerCreating") {
return newRecoverableError(err, pod.Name)
return newRecoverableError(err, stream)
} else if err != nil {
return newStreamError(err, pod.Name)
return newStreamError(err, stream)
}
// Closing the reader ensures that the goroutine below is not leaked.
defer func() { _ = logs.Close() }()
done := make(chan error, 1)
go func() {
scanner := bufio.NewScanner(logs)
for scanner.Scan() {
logStream <- "[" + pod.Name + "] " + scanner.Text() + nl
logStream <- "[" + stream.podName + "] " + scanner.Text() + nl
}
if err := scanner.Err(); err != nil {
return newStreamError(fmt.Errorf("error scanning logs: %v", err), pod.Name)
done <- newStreamError(fmt.Errorf("error scanning logs: %v", err), stream)
return
}
done <- nil
}()
for {
select {
case err := <-done:
return err
case <-stream.done:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
}

View File

@ -34,7 +34,7 @@ type mockClientset struct {
podsWatcher watch.Interface
getLogsStatusCode int
getLogsRespBody string
getLogsReaderFunc func() io.ReadCloser
getLogsErr error
}
@ -52,7 +52,7 @@ func (m *mockClientset) GetLogs(podName string, _ *corev1.PodLogOptions) *rest.R
}
return &http.Response{
StatusCode: m.getLogsStatusCode,
Body: io.NopCloser(strings.NewReader(m.getLogsRespBody)),
Body: m.getLogsReaderFunc(),
}, nil
}),
NegotiatedSerializer: scheme.Codecs.WithoutConversion(),
@ -65,7 +65,7 @@ func TestPodWatcherClose(t *testing.T) {
podsWatcher := watch.NewFake()
clientset := mockClientset{
getLogsRespBody: "it worked",
getLogsReaderFunc: func() io.ReadCloser { return io.NopCloser(strings.NewReader("it worked")) },
getLogsStatusCode: http.StatusOK,
podsWatcher: podsWatcher,
}
@ -90,6 +90,58 @@ func TestPodWatcherClose(t *testing.T) {
assert.Equal(t, "[foo] it worked\n", buf.String())
}
type logReaderIterator struct {
rcs []io.ReadCloser
}
func (f *logReaderIterator) next() io.ReadCloser {
var rc io.ReadCloser
rc, f.rcs = f.rcs[0], f.rcs[1:]
return rc
}
func TestPodWatcherRemovedPod(t *testing.T) {
podsWatcher := watch.NewFake()
r1, w1 := io.Pipe()
r2, w2 := io.Pipe()
it := logReaderIterator{[]io.ReadCloser{r1, r2}}
clientset := mockClientset{
getLogsReaderFunc: it.next,
getLogsStatusCode: http.StatusOK,
podsWatcher: podsWatcher,
}
clientset.PrependWatchReactor("pods", k8stest.DefaultWatchReactor(podsWatcher, nil))
client := logs.KubernetesClient{Typed: &clientset}
selector := labels.SelectorFromSet(map[string]string{"foo": "bar"})
var buf bytes.Buffer
pw := logs.NewPodWatcher(client, "mycontainer", selector, &buf, discardLogger())
go func() {
defer pw.Close()
podsWatcher.Add(&corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, Status: corev1.PodStatus{Phase: corev1.PodRunning}})
time.Sleep(time.Millisecond * 500)
w1.Write([]byte("should be logged\n"))
podsWatcher.Delete(&corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, Status: corev1.PodStatus{Phase: corev1.PodRunning}})
time.Sleep(time.Millisecond * 500)
w1.Write([]byte("should not be logged\n"))
time.Sleep(time.Millisecond * 500)
podsWatcher.Add(&corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "bar", Namespace: "default"}, Status: corev1.PodStatus{Phase: corev1.PodRunning}})
time.Sleep(time.Millisecond * 500)
w2.Write([]byte("should be logged\n"))
time.Sleep(time.Millisecond * 500)
}()
err := pw.WatchPods(context.Background())
require.NoError(t, err)
assert.Equal(t, "[foo] should be logged\n[bar] should be logged\n", buf.String())
}
func TestPodWatcher(t *testing.T) {
testCases := []struct {
name string
@ -135,7 +187,7 @@ func TestPodWatcher(t *testing.T) {
podsWatcher := watch.NewFake()
clientset := mockClientset{
getLogsRespBody: tc.getLogsRespBody,
getLogsReaderFunc: func() io.ReadCloser { return io.NopCloser(strings.NewReader(tc.getLogsRespBody)) },
getLogsStatusCode: tc.getLogsStatusCode,
getLogsErr: tc.getLogsErr,
podsWatcher: podsWatcher,