kubectl-persistent-logger/logs/watcher.go

133 lines
3.5 KiB
Go

package logs
import (
"context"
"io"
"log"
"time"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
)
type KubernetesClient struct {
kubernetes.Interface
}
// Watcher watches a deployment and tails the logs for its currently active
// pods.
type Watcher struct {
deployName string
clientset KubernetesClient
spec map[string]*corev1.Pod
status map[string]*Stream
dst io.Writer
}
// NewWatcher creates a new Watcher.
func NewWatcher(deployName string, clientset KubernetesClient, dst io.Writer) *Watcher {
return &Watcher{
deployName: deployName,
clientset: clientset,
spec: make(map[string]*corev1.Pod),
status: make(map[string]*Stream),
dst: dst,
}
}
// Watch watches a deployment.
func (w *Watcher) Watch(ctx context.Context) error {
dst := NewConcurrentWriter(w.dst)
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
deploymentsClient := w.clientset.AppsV1().Deployments(corev1.NamespaceDefault)
var opts metav1.GetOptions
deployment, err := deploymentsClient.Get(ctx, w.deployName, opts)
if err != nil {
return err
}
podsClient := w.clientset.CoreV1().Pods(corev1.NamespaceDefault)
labelsMap, err := metav1.LabelSelectorAsMap(deployment.Spec.Selector)
if err != nil {
return err
}
watcher, err := podsClient.Watch(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(labelsMap).String()})
if err != nil {
return err
}
defer watcher.Stop()
// channel to receive pod names which experience unexpected errors in the
// logging flow. They will be removed from the state and and then recreated
// if the pod still exists.
podsWithErrors := make(chan string)
for {
select {
case <-ctx.Done():
return ctx.Err()
case <-ticker.C:
// Iterate through the desired state (w.spec) and launch goroutines to
// process the logs of any missing pods.
for podName, pod := range w.spec {
if _, ok := w.status[podName]; !ok {
w.addPod(ctx, pod, dst, podsWithErrors)
}
}
// For any pods which no longer exist, remove the pod.
// TODO: check this is needed when a pod's labels change to no longer
// match the deployment's selector.
for podName := range w.status {
if _, ok := w.spec[podName]; !ok {
w.removePod(podName)
}
}
case podName := <-podsWithErrors:
w.removePod(podName)
case evt := <-watcher.ResultChan():
switch evt.Type {
case watch.Added, watch.Modified:
pod := evt.Object.(*corev1.Pod)
log.Printf("event rcvd, type = %s, pod name = %s, phase = %s", evt.Type, pod.Name, pod.Status.Phase)
if pod.Status.Phase == corev1.PodRunning {
w.spec[pod.Name] = pod
}
case watch.Deleted:
pod := evt.Object.(*corev1.Pod)
delete(w.spec, pod.Name)
log.Printf("event rcvd, type = DELETED, pod name = %s", pod.Name)
case watch.Error:
// TODO: error handling
log.Fatalf("event rcvd, type = ERROR, object = %+v", evt.Object)
}
}
}
}
func (w *Watcher) addPod(ctx context.Context, p *corev1.Pod, dst io.Writer, errChan chan string) {
log.Printf("adding pod, name = %s", p.Name)
ls := NewStream(w.clientset, p, dst)
w.status[p.Name] = ls
go func() {
if err := ls.Copy(ctx); err != nil {
log.Printf("error copying logs: %v", err)
errChan <- p.Name
}
}()
}
func (w *Watcher) removePod(podName string) {
log.Printf("removing pod, name = %s", podName)
delete(w.status, podName)
}