kubectl-persistent-logger/logs/pod_watcher.go

243 lines
6.3 KiB
Go
Raw Normal View History

2022-06-01 17:19:55 +00:00
package logs
import (
"bufio"
"context"
"errors"
"fmt"
"io"
"log"
"strings"
"sync"
"time"
corev1 "k8s.io/api/core/v1"
2022-06-01 20:04:02 +00:00
apierrors "k8s.io/apimachinery/pkg/api/errors"
2022-06-01 17:19:55 +00:00
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/watch"
)
const nl = "\n"
2022-06-14 19:11:06 +00:00
// logStream represents the logStream from an individual pod.
type logStream struct {
2022-06-12 13:26:27 +00:00
podName string
2022-06-14 19:11:06 +00:00
container string
2022-06-12 13:26:27 +00:00
namespace string
done chan struct{}
}
2022-06-14 19:11:06 +00:00
func newStream(podName, container, namespace string) logStream {
return logStream{
podName: podName,
container: container,
namespace: namespace,
done: make(chan struct{}),
}
2022-06-12 13:26:27 +00:00
}
2022-06-14 19:11:06 +00:00
type logError struct {
2022-06-01 17:19:55 +00:00
err error
2022-06-14 19:11:06 +00:00
logStream logStream
2022-06-01 17:19:55 +00:00
recoverable bool
}
2022-06-14 19:11:06 +00:00
func (re *logError) Error() string {
2022-06-01 17:19:55 +00:00
return re.err.Error()
}
2022-06-14 19:11:06 +00:00
func newLogError(err error, stream logStream) *logError {
return &logError{err: err, logStream: stream}
2022-06-02 17:23:41 +00:00
}
2022-06-14 19:11:06 +00:00
func newRecoverableLogError(err error, stream logStream) *logError {
return &logError{err: err, logStream: stream, recoverable: true}
2022-06-02 17:23:41 +00:00
}
2022-06-01 17:19:55 +00:00
// PodWatcher consumes and merges the logs for a specified set of pods.
type PodWatcher struct {
client KubernetesClient
2022-06-14 05:47:38 +00:00
namespace string
2022-06-01 17:19:55 +00:00
container string
labelSelector labels.Selector
2022-06-01 17:19:55 +00:00
spec map[string]*corev1.Pod
2022-06-14 19:11:06 +00:00
status map[string]logStream
2022-06-01 17:19:55 +00:00
streamResults chan error
dst io.Writer
closeChan chan struct{}
2022-06-10 17:04:52 +00:00
logger *log.Logger
2022-06-01 17:19:55 +00:00
}
// NewPodWatcher initializes a new PodWatcher.
2022-06-14 05:47:38 +00:00
func NewPodWatcher(client KubernetesClient, namespace string, container string, labelSelector labels.Selector, dst io.Writer, logger *log.Logger) PodWatcherInterface {
2022-06-01 17:19:55 +00:00
return &PodWatcher{
client: client,
2022-06-14 05:47:38 +00:00
namespace: namespace,
2022-06-01 17:19:55 +00:00
container: container,
labelSelector: labelSelector,
spec: make(map[string]*corev1.Pod),
2022-06-14 19:11:06 +00:00
status: make(map[string]logStream),
2022-06-01 17:19:55 +00:00
streamResults: make(chan error),
dst: dst,
closeChan: make(chan struct{}),
2022-06-10 17:04:52 +00:00
logger: logger,
2022-06-01 17:19:55 +00:00
}
}
// WatchPods blocks while it watches the pods which match the Selector of the
2022-06-14 16:05:36 +00:00
// provided resource.
2022-06-01 17:19:55 +00:00
func (pw *PodWatcher) WatchPods(ctx context.Context) error {
var wg sync.WaitGroup
err := pw.watchPods(ctx, &wg)
wg.Wait()
2022-06-10 17:04:52 +00:00
pw.logger.Println("[PodWatcher] all goroutines exited, exiting")
2022-06-01 17:19:55 +00:00
return err
}
const tickerInterval = time.Millisecond * 250
2022-06-14 05:47:38 +00:00
// Close terminates the PodWatcher.
2022-06-01 17:19:55 +00:00
func (pw *PodWatcher) Close() {
close(pw.closeChan)
2022-06-01 17:19:55 +00:00
}
func (pw *PodWatcher) watchPods(ctx context.Context, wg *sync.WaitGroup) error {
2022-06-14 05:47:38 +00:00
podsClient := pw.client.Typed.CoreV1().Pods(pw.namespace)
// Returns a watcher which notifies of changes in the relevant pods.
// We don't defer Stop() on the returned value because the sender is the
// Kubernetes SDK, and that may introduce a send-on-closed-channel panic.
watcher, err := podsClient.Watch(ctx, metav1.ListOptions{LabelSelector: pw.labelSelector.String()})
2022-06-01 17:19:55 +00:00
if err != nil {
return err
}
ticker := time.NewTicker(tickerInterval)
2022-06-01 17:19:55 +00:00
defer ticker.Stop()
2022-06-14 19:11:06 +00:00
logLines := make(chan string)
logErrorChan := make(chan error)
2022-06-01 17:19:55 +00:00
resultChan := watcher.ResultChan()
2022-06-01 17:19:55 +00:00
for {
select {
case <-ctx.Done():
return ctx.Err()
case <-pw.closeChan:
2022-06-12 13:26:27 +00:00
for _, stream := range pw.status {
2022-06-14 19:11:06 +00:00
pw.removePod(stream)
2022-06-12 13:26:27 +00:00
}
2022-06-01 17:19:55 +00:00
return nil
case <-ticker.C:
// Iterate through the desired state (w.spec) and launch goroutines to
// process the logs of any missing pods.
for podName, pod := range pw.spec {
if _, ok := pw.status[podName]; !ok {
2022-06-14 19:11:06 +00:00
pw.logger.Printf("[PodWatcher] adding pod, name = %s, container = %s, namespace = %s,", pod.Name, pw.container, pod.Namespace)
stream := newStream(pod.Name, pw.container, pod.Namespace)
pw.status[pod.Name] = stream
2022-06-12 13:26:27 +00:00
2022-06-01 17:19:55 +00:00
wg.Add(1)
go func() {
2022-06-14 19:11:06 +00:00
if err := copyLogStream(ctx, wg, pw.client, stream, logLines); err != nil {
logErrorChan <- err
2022-06-01 17:19:55 +00:00
}
}()
}
}
// For any pods which no longer exist, remove the pod.
2022-06-12 13:26:27 +00:00
for podName, stream := range pw.status {
2022-06-01 17:19:55 +00:00
if _, ok := pw.spec[podName]; !ok {
2022-06-12 13:26:27 +00:00
pw.removePod(stream)
2022-06-01 17:19:55 +00:00
}
}
2022-06-14 19:11:06 +00:00
// logLines is never closed
case line := <-logLines:
if _, err := pw.dst.Write([]byte(line)); err != nil {
return fmt.Errorf("error writing logs: %v", err)
}
2022-06-14 19:11:06 +00:00
// logErrorChan is never closed
case err := <-logErrorChan:
var logErr *logError
if errors.As(err, &logErr) && logErr.recoverable {
2022-06-01 17:19:55 +00:00
// if the error is recoverable, we just remove the pod from the status
// map. It will be recreated and retried on the next iteration.
2022-06-14 19:11:06 +00:00
pw.removePod(logErr.logStream)
2022-06-01 17:19:55 +00:00
} else {
2022-06-14 19:11:06 +00:00
return fmt.Errorf("error streaming logs: %w", logErr)
2022-06-01 17:19:55 +00:00
}
2022-06-03 20:20:23 +00:00
case evt, ok := <-resultChan:
if !ok {
return nil
2022-06-03 20:20:23 +00:00
}
2022-06-14 19:11:06 +00:00
2022-06-01 17:19:55 +00:00
switch evt.Type {
case watch.Added, watch.Modified:
pod := evt.Object.(*corev1.Pod)
if pod.Status.Phase == corev1.PodRunning {
pw.spec[pod.Name] = pod
}
case watch.Deleted:
pod := evt.Object.(*corev1.Pod)
delete(pw.spec, pod.Name)
}
}
}
}
2022-06-14 19:11:06 +00:00
func (pw *PodWatcher) removePod(stream logStream) {
2022-06-12 13:26:27 +00:00
pw.logger.Printf("[PodWatcher] removing pod, name = %s", stream.podName)
2022-06-14 19:11:06 +00:00
close(stream.done)
2022-06-12 13:26:27 +00:00
delete(pw.status, stream.podName)
2022-06-01 17:19:55 +00:00
}
2022-06-14 19:11:06 +00:00
func copyLogStream(ctx context.Context, wg *sync.WaitGroup, client KubernetesClient, stream logStream, logsLines chan string) error {
2022-06-01 17:19:55 +00:00
defer wg.Done()
2022-06-14 19:11:06 +00:00
req := client.Typed.CoreV1().Pods(stream.namespace).GetLogs(stream.podName, &corev1.PodLogOptions{Follow: true, Container: stream.container})
logReader, err := req.Stream(ctx)
2022-06-01 17:19:55 +00:00
2022-06-01 20:04:02 +00:00
// If one container is still being created, do not treat this as a fatal error.
var statusErr *apierrors.StatusError
if errors.As(err, &statusErr) && statusErr.Status().Reason == metav1.StatusReasonBadRequest && strings.Contains(statusErr.Error(), "ContainerCreating") {
2022-06-14 19:11:06 +00:00
return newRecoverableLogError(err, stream)
2022-06-01 17:19:55 +00:00
} else if err != nil {
2022-06-14 19:11:06 +00:00
return newLogError(err, stream)
2022-06-01 17:19:55 +00:00
}
2022-06-12 13:26:27 +00:00
// Closing the reader ensures that the goroutine below is not leaked.
2022-06-14 19:11:06 +00:00
defer func() { _ = logReader.Close() }()
2022-06-01 17:19:55 +00:00
2022-06-15 18:29:33 +00:00
errch := make(chan error, 1)
2022-06-12 13:26:27 +00:00
go func() {
2022-06-15 18:29:33 +00:00
defer close(errch)
2022-06-14 19:11:06 +00:00
scanner := bufio.NewScanner(logReader)
2022-06-12 13:26:27 +00:00
for scanner.Scan() {
2022-06-14 19:11:06 +00:00
logsLines <- "[" + stream.podName + "] " + scanner.Text() + nl
2022-06-12 13:26:27 +00:00
}
if err := scanner.Err(); err != nil {
2022-06-15 18:29:33 +00:00
errch <- newLogError(fmt.Errorf("error scanning logs: %v", err), stream)
2022-06-12 13:26:27 +00:00
return
}
}()
for {
select {
2022-06-15 18:29:33 +00:00
case err := <-errch:
2022-06-12 13:26:27 +00:00
return err
case <-stream.done:
return nil
case <-ctx.Done():
return ctx.Err()
}
2022-06-01 17:19:55 +00:00
}
}