refactor: container logic
This commit is contained in:
parent
f34e16b034
commit
9c7989018b
@ -1,7 +1,6 @@
|
|||||||
package container
|
package container
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"cmp"
|
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
@ -22,36 +21,22 @@ import (
|
|||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
// stopTimeout is the timeout for stopping a container.
|
||||||
stopTimeout = 10 * time.Second
|
var stopTimeout = 10 * time.Second
|
||||||
defaultChanSize = 64
|
|
||||||
)
|
|
||||||
|
|
||||||
type action func()
|
// Client provides a thin wrapper around the Docker API client, and provides
|
||||||
|
// additional functionality such as exposing container stats.
|
||||||
// Actor is an actor that provides a thin wrapper around the Docker API client,
|
type Client struct {
|
||||||
// and provides additional functionality such as exposing container stats.
|
|
||||||
type Actor struct {
|
|
||||||
id uuid.UUID
|
id uuid.UUID
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
ch chan action
|
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
apiClient *client.Client
|
apiClient *client.Client
|
||||||
logger *slog.Logger
|
logger *slog.Logger
|
||||||
|
|
||||||
// mutable state
|
|
||||||
containers map[string]*domain.Container
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewActorParams are the parameters for creating a new Actor.
|
// NewClient creates a new Client.
|
||||||
type NewActorParams struct {
|
func NewClient(ctx context.Context, logger *slog.Logger) (*Client, error) {
|
||||||
ChanSize int
|
|
||||||
Logger *slog.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewActor creates a new Actor.
|
|
||||||
func NewActor(ctx context.Context, params NewActorParams) (*Actor, error) {
|
|
||||||
apiClient, err := client.NewClientWithOpts(client.FromEnv)
|
apiClient, err := client.NewClientWithOpts(client.FromEnv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -59,45 +44,34 @@ func NewActor(ctx context.Context, params NewActorParams) (*Actor, error) {
|
|||||||
|
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
|
||||||
client := &Actor{
|
client := &Client{
|
||||||
id: uuid.New(),
|
id: uuid.New(),
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
cancel: cancel,
|
cancel: cancel,
|
||||||
ch: make(chan action, cmp.Or(params.ChanSize, defaultChanSize)),
|
|
||||||
apiClient: apiClient,
|
apiClient: apiClient,
|
||||||
logger: params.Logger,
|
logger: logger,
|
||||||
containers: make(map[string]*domain.Container),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
client.wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer client.wg.Done()
|
|
||||||
|
|
||||||
client.dockerEventLoop()
|
|
||||||
}()
|
|
||||||
|
|
||||||
go client.actorLoop()
|
|
||||||
|
|
||||||
return client, nil
|
return client, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// actorLoop is the main loop for the client.
|
// stats is a struct to hold container stats.
|
||||||
//
|
type stats struct {
|
||||||
// It continues to run until the internal channel is closed, which only happens
|
cpuPercent float64
|
||||||
// when [Close] is called. This means that it is not reliant on any context
|
memoryUsageBytes uint64
|
||||||
// remaining open, and any method calls made after [Close] may deadlock or
|
|
||||||
// fail.
|
|
||||||
func (a *Actor) actorLoop() {
|
|
||||||
for action := range a.ch {
|
|
||||||
action()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Actor) handleStats(id string) {
|
// getStats returns a channel that will receive container stats. The channel is
|
||||||
statsReader, err := a.apiClient.ContainerStats(a.ctx, id, true)
|
// never closed, but the spawned goroutine will exit when the context is
|
||||||
|
// cancelled.
|
||||||
|
func (a *Client) getStats(containerID string) <-chan stats {
|
||||||
|
ch := make(chan stats)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
statsReader, err := a.apiClient.ContainerStats(a.ctx, containerID, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// TODO: error handling?
|
// TODO: error handling?
|
||||||
a.logger.Error("Error getting container stats", "err", err, "id", shortID(id))
|
a.logger.Error("Error getting container stats", "err", err, "id", shortID(containerID))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer statsReader.Body.Close()
|
defer statsReader.Body.Close()
|
||||||
@ -107,96 +81,74 @@ func (a *Actor) handleStats(id string) {
|
|||||||
n, err := statsReader.Body.Read(buf)
|
n, err := statsReader.Body.Read(buf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, io.EOF) || errors.Is(err, context.Canceled) {
|
if errors.Is(err, io.EOF) || errors.Is(err, context.Canceled) {
|
||||||
return
|
break
|
||||||
}
|
}
|
||||||
a.logger.Error("Error reading stats", "err", err, "id", shortID(id))
|
a.logger.Error("Error reading stats", "err", err, "id", shortID(containerID))
|
||||||
return
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
var statsResp container.StatsResponse
|
var statsResp container.StatsResponse
|
||||||
if err = json.Unmarshal(buf[:n], &statsResp); err != nil {
|
if err = json.Unmarshal(buf[:n], &statsResp); err != nil {
|
||||||
a.logger.Error("Error unmarshalling stats", "err", err, "id", shortID(id))
|
a.logger.Error("Error unmarshalling stats", "err", err, "id", shortID(containerID))
|
||||||
continue
|
break
|
||||||
}
|
|
||||||
|
|
||||||
a.ch <- func() {
|
|
||||||
ctr, ok := a.containers[id]
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// https://stackoverflow.com/a/30292327/62871
|
// https://stackoverflow.com/a/30292327/62871
|
||||||
cpuDelta := float64(statsResp.CPUStats.CPUUsage.TotalUsage - statsResp.PreCPUStats.CPUUsage.TotalUsage)
|
cpuDelta := float64(statsResp.CPUStats.CPUUsage.TotalUsage - statsResp.PreCPUStats.CPUUsage.TotalUsage)
|
||||||
systemDelta := float64(statsResp.CPUStats.SystemUsage - statsResp.PreCPUStats.SystemUsage)
|
systemDelta := float64(statsResp.CPUStats.SystemUsage - statsResp.PreCPUStats.SystemUsage)
|
||||||
ctr.CPUPercent = (cpuDelta / systemDelta) * float64(statsResp.CPUStats.OnlineCPUs) * 100
|
ch <- stats{
|
||||||
ctr.MemoryUsageBytes = statsResp.MemoryStats.Usage
|
cpuPercent: (cpuDelta / systemDelta) * float64(statsResp.CPUStats.OnlineCPUs) * 100,
|
||||||
|
memoryUsageBytes: statsResp.MemoryStats.Usage,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return ch
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Actor) dockerEventLoop() {
|
// getEvents returns a channel that will receive container events. The channel is
|
||||||
for {
|
// never closed, but the spawned goroutine will exit when the context is
|
||||||
ch, errCh := a.apiClient.Events(a.ctx, events.ListOptions{
|
// cancelled.
|
||||||
|
func (a *Client) getEvents(containerID string) <-chan events.Message {
|
||||||
|
sendC := make(chan events.Message)
|
||||||
|
|
||||||
|
getEvents := func() (bool, error) {
|
||||||
|
recvC, errC := a.apiClient.Events(a.ctx, events.ListOptions{
|
||||||
Filters: filters.NewArgs(
|
Filters: filters.NewArgs(
|
||||||
filters.Arg("label", "app=termstream"),
|
filters.Arg("container", containerID),
|
||||||
filters.Arg("label", "app-id="+a.id.String()),
|
filters.Arg("type", "container"),
|
||||||
),
|
),
|
||||||
})
|
})
|
||||||
|
|
||||||
|
for {
|
||||||
select {
|
select {
|
||||||
case <-a.ctx.Done():
|
case <-a.ctx.Done():
|
||||||
return
|
return false, a.ctx.Err()
|
||||||
case evt := <-ch:
|
case evt := <-recvC:
|
||||||
a.handleDockerEvent(evt)
|
sendC <- evt
|
||||||
case err := <-errCh:
|
case err := <-errC:
|
||||||
if a.ctx.Err() != nil {
|
if a.ctx.Err() != nil || errors.Is(err, io.EOF) {
|
||||||
return
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
a.logger.Warn("Error receiving Docker events", "err", err)
|
return true, err
|
||||||
time.Sleep(1 * time.Second)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Actor) handleDockerEvent(evt events.Message) {
|
|
||||||
a.ch <- func() {
|
|
||||||
ctr, ok := a.containers[evt.ID]
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if strings.HasPrefix(string(evt.Action), "health_status") {
|
go func() {
|
||||||
a.logger.Info("Event: health status changed", "type", evt.Type, "action", evt.Action, "id", shortID(evt.ID))
|
for {
|
||||||
|
shouldRetry, err := getEvents()
|
||||||
switch evt.Action {
|
if !shouldRetry {
|
||||||
case events.ActionHealthStatusRunning:
|
break
|
||||||
ctr.HealthState = "running"
|
|
||||||
case events.ActionHealthStatusHealthy:
|
|
||||||
ctr.HealthState = "healthy"
|
|
||||||
case events.ActionHealthStatusUnhealthy:
|
|
||||||
ctr.HealthState = "unhealthy"
|
|
||||||
default:
|
|
||||||
a.logger.Warn("Unknown health status", "action", evt.Action)
|
|
||||||
ctr.HealthState = "unknown"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetContainerState returns a copy of the current state of all containers.
|
|
||||||
func (a *Actor) GetContainerState() map[string]domain.Container {
|
|
||||||
resultChan := make(chan map[string]domain.Container)
|
|
||||||
|
|
||||||
a.ch <- func() {
|
|
||||||
result := make(map[string]domain.Container, len(a.containers))
|
|
||||||
for id, ctr := range a.containers {
|
|
||||||
result[id] = *ctr
|
|
||||||
}
|
|
||||||
resultChan <- result
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return <-resultChan
|
a.logger.Warn("Error receiving Docker events", "err", err, "id", shortID(containerID))
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return sendC
|
||||||
}
|
}
|
||||||
|
|
||||||
// RunContainerParams are the parameters for running a container.
|
// RunContainerParams are the parameters for running a container.
|
||||||
@ -207,7 +159,10 @@ type RunContainerParams struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// RunContainer runs a container with the given parameters.
|
// RunContainer runs a container with the given parameters.
|
||||||
func (a *Actor) RunContainer(ctx context.Context, params RunContainerParams) (string, <-chan struct{}, error) {
|
//
|
||||||
|
// The returned channel will receive the current state of the container, and
|
||||||
|
// will be closed after the container has stopped.
|
||||||
|
func (a *Client) RunContainer(ctx context.Context, params RunContainerParams) (string, <-chan domain.ContainerState, error) {
|
||||||
pullReader, err := a.apiClient.ImagePull(ctx, params.ContainerConfig.Image, image.PullOptions{})
|
pullReader, err := a.apiClient.ImagePull(ctx, params.ContainerConfig.Image, image.PullOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, fmt.Errorf("image pull: %w", err)
|
return "", nil, fmt.Errorf("image pull: %w", err)
|
||||||
@ -240,44 +195,65 @@ func (a *Actor) RunContainer(ctx context.Context, params RunContainerParams) (st
|
|||||||
}
|
}
|
||||||
a.logger.Info("Started container", "id", shortID(createResp.ID))
|
a.logger.Info("Started container", "id", shortID(createResp.ID))
|
||||||
|
|
||||||
go a.handleStats(createResp.ID)
|
containerStateC := make(chan domain.ContainerState, 1)
|
||||||
|
|
||||||
ch := make(chan struct{}, 1)
|
|
||||||
a.wg.Add(1)
|
a.wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer a.wg.Done()
|
defer a.wg.Done()
|
||||||
|
defer close(containerStateC)
|
||||||
|
|
||||||
respChan, errChan := a.apiClient.ContainerWait(ctx, createResp.ID, container.WaitConditionNotRunning)
|
a.runContainerLoop(ctx, createResp.ID, containerStateC)
|
||||||
select {
|
|
||||||
case resp := <-respChan:
|
|
||||||
a.logger.Info("Container entered non-running state", "exit_code", resp.StatusCode, "id", shortID(createResp.ID))
|
|
||||||
a.ch <- func() {
|
|
||||||
delete(a.containers, createResp.ID)
|
|
||||||
}
|
|
||||||
case err = <-errChan:
|
|
||||||
// TODO: error handling?
|
|
||||||
if err != context.Canceled {
|
|
||||||
a.logger.Error("Error setting container wait", "err", err, "id", shortID(createResp.ID))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ch <- struct{}{}
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Update the containers map which must be done on the actor loop, but before
|
return createResp.ID, containerStateC, nil
|
||||||
// we return from the method.
|
}
|
||||||
done := make(chan struct{})
|
|
||||||
a.ch <- func() {
|
|
||||||
a.containers[createResp.ID] = &domain.Container{ID: createResp.ID, HealthState: "healthy"}
|
|
||||||
done <- struct{}{}
|
|
||||||
}
|
|
||||||
<-done
|
|
||||||
|
|
||||||
return createResp.ID, ch, nil
|
// runContainerLoop is the control loop for a single container. It returns only
|
||||||
|
// when the container exits.
|
||||||
|
func (a *Client) runContainerLoop(ctx context.Context, containerID string, stateCh chan<- domain.ContainerState) {
|
||||||
|
statsC := a.getStats(containerID)
|
||||||
|
eventsC := a.getEvents(containerID)
|
||||||
|
respC, errC := a.apiClient.ContainerWait(ctx, containerID, container.WaitConditionNotRunning)
|
||||||
|
|
||||||
|
state := &domain.ContainerState{ID: containerID}
|
||||||
|
sendState := func() { stateCh <- *state }
|
||||||
|
sendState()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case resp := <-respC:
|
||||||
|
a.logger.Info("Container entered non-running state", "exit_code", resp.StatusCode, "id", shortID(containerID))
|
||||||
|
return
|
||||||
|
case err := <-errC:
|
||||||
|
// TODO: error handling?
|
||||||
|
if err != context.Canceled {
|
||||||
|
a.logger.Error("Error setting container wait", "err", err, "id", shortID(containerID))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
case evt := <-eventsC:
|
||||||
|
if strings.Contains(string(evt.Action), "health_status") {
|
||||||
|
switch evt.Action {
|
||||||
|
case events.ActionHealthStatusRunning:
|
||||||
|
state.HealthState = "running"
|
||||||
|
case events.ActionHealthStatusHealthy:
|
||||||
|
state.HealthState = "healthy"
|
||||||
|
case events.ActionHealthStatusUnhealthy:
|
||||||
|
state.HealthState = "unhealthy"
|
||||||
|
default:
|
||||||
|
a.logger.Warn("Unknown health status", "status", evt.Action)
|
||||||
|
state.HealthState = "unknown"
|
||||||
|
}
|
||||||
|
sendState()
|
||||||
|
}
|
||||||
|
case stats := <-statsC:
|
||||||
|
state.CPUPercent = stats.cpuPercent
|
||||||
|
state.MemoryUsageBytes = stats.memoryUsageBytes
|
||||||
|
sendState()
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close closes the client, stopping and removing all running containers.
|
// Close closes the client, stopping and removing all running containers.
|
||||||
func (a *Actor) Close() error {
|
func (a *Client) Close() error {
|
||||||
a.cancel()
|
a.cancel()
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), stopTimeout)
|
ctx, cancel := context.WithTimeout(context.Background(), stopTimeout)
|
||||||
@ -296,12 +272,10 @@ func (a *Actor) Close() error {
|
|||||||
|
|
||||||
a.wg.Wait()
|
a.wg.Wait()
|
||||||
|
|
||||||
close(a.ch)
|
|
||||||
|
|
||||||
return a.apiClient.Close()
|
return a.apiClient.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Actor) removeContainer(ctx context.Context, id string) error {
|
func (a *Client) removeContainer(ctx context.Context, id string) error {
|
||||||
a.logger.Info("Stopping container", "id", shortID(id))
|
a.logger.Info("Stopping container", "id", shortID(id))
|
||||||
stopTimeout := int(stopTimeout.Seconds())
|
stopTimeout := int(stopTimeout.Seconds())
|
||||||
if err := a.apiClient.ContainerStop(ctx, id, container.StopOptions{Timeout: &stopTimeout}); err != nil {
|
if err := a.apiClient.ContainerStop(ctx, id, container.StopOptions{Timeout: &stopTimeout}); err != nil {
|
||||||
@ -317,7 +291,7 @@ func (a *Actor) removeContainer(ctx context.Context, id string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ContainerRunning checks if a container with the given labels is running.
|
// ContainerRunning checks if a container with the given labels is running.
|
||||||
func (a *Actor) ContainerRunning(ctx context.Context, labels map[string]string) (bool, error) {
|
func (a *Client) ContainerRunning(ctx context.Context, labels map[string]string) (bool, error) {
|
||||||
containers, err := a.containersMatchingLabels(ctx, labels)
|
containers, err := a.containersMatchingLabels(ctx, labels)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, fmt.Errorf("container list: %w", err)
|
return false, fmt.Errorf("container list: %w", err)
|
||||||
@ -333,7 +307,7 @@ func (a *Actor) ContainerRunning(ctx context.Context, labels map[string]string)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// RemoveContainers removes all containers with the given labels.
|
// RemoveContainers removes all containers with the given labels.
|
||||||
func (a *Actor) RemoveContainers(ctx context.Context, labels map[string]string) error {
|
func (a *Client) RemoveContainers(ctx context.Context, labels map[string]string) error {
|
||||||
containers, err := a.containersMatchingLabels(ctx, labels)
|
containers, err := a.containersMatchingLabels(ctx, labels)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("container list: %w", err)
|
return fmt.Errorf("container list: %w", err)
|
||||||
@ -348,7 +322,7 @@ func (a *Actor) RemoveContainers(ctx context.Context, labels map[string]string)
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Actor) containersMatchingLabels(ctx context.Context, labels map[string]string) ([]types.Container, error) {
|
func (a *Client) containersMatchingLabels(ctx context.Context, labels map[string]string) ([]types.Container, error) {
|
||||||
filterArgs := filters.NewArgs(
|
filterArgs := filters.NewArgs(
|
||||||
filters.Arg("label", "app=termstream"),
|
filters.Arg("label", "app=termstream"),
|
||||||
filters.Arg("label", "app-id="+a.id.String()),
|
filters.Arg("label", "app-id="+a.id.String()),
|
||||||
|
@ -21,17 +21,14 @@ func TestClientStartStop(t *testing.T) {
|
|||||||
containerName := "termstream-test-" + uuid.NewString()
|
containerName := "termstream-test-" + uuid.NewString()
|
||||||
component := "test-start-stop"
|
component := "test-start-stop"
|
||||||
|
|
||||||
client, err := container.NewActor(ctx, container.NewActorParams{
|
client, err := container.NewClient(ctx, logger)
|
||||||
ChanSize: 1,
|
|
||||||
Logger: logger,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
running, err := client.ContainerRunning(ctx, map[string]string{"component": component})
|
running, err := client.ContainerRunning(ctx, map[string]string{"component": component})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.False(t, running)
|
assert.False(t, running)
|
||||||
|
|
||||||
containerID, _, err := client.RunContainer(ctx, container.RunContainerParams{
|
containerID, containerStateC, err := client.RunContainer(ctx, container.RunContainerParams{
|
||||||
Name: containerName,
|
Name: containerName,
|
||||||
ContainerConfig: &typescontainer.Config{
|
ContainerConfig: &typescontainer.Config{
|
||||||
Image: "netfluxio/mediamtx-alpine:latest",
|
Image: "netfluxio/mediamtx-alpine:latest",
|
||||||
@ -42,6 +39,8 @@ func TestClientStartStop(t *testing.T) {
|
|||||||
},
|
},
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
testhelpers.DiscardChannel(containerStateC)
|
||||||
|
|
||||||
assert.NotEmpty(t, containerID)
|
assert.NotEmpty(t, containerID)
|
||||||
|
|
||||||
require.Eventually(
|
require.Eventually(
|
||||||
@ -55,17 +54,6 @@ func TestClientStartStop(t *testing.T) {
|
|||||||
"container not in RUNNING state",
|
"container not in RUNNING state",
|
||||||
)
|
)
|
||||||
|
|
||||||
require.Eventually(
|
|
||||||
t,
|
|
||||||
func() bool {
|
|
||||||
ctr, ok := client.GetContainerState()[containerID]
|
|
||||||
return ok && ctr.HealthState == "healthy" && ctr.CPUPercent > 0 && ctr.MemoryUsageBytes > 0
|
|
||||||
},
|
|
||||||
2*time.Second,
|
|
||||||
100*time.Millisecond,
|
|
||||||
"container state not updated",
|
|
||||||
)
|
|
||||||
|
|
||||||
client.Close()
|
client.Close()
|
||||||
|
|
||||||
running, err = client.ContainerRunning(ctx, map[string]string{"component": component})
|
running, err = client.ContainerRunning(ctx, map[string]string{"component": component})
|
||||||
@ -80,14 +68,11 @@ func TestClientRemoveContainers(t *testing.T) {
|
|||||||
logger := testhelpers.NewTestLogger()
|
logger := testhelpers.NewTestLogger()
|
||||||
component := "test-remove-containers"
|
component := "test-remove-containers"
|
||||||
|
|
||||||
client, err := container.NewActor(ctx, container.NewActorParams{
|
client, err := container.NewClient(ctx, logger)
|
||||||
ChanSize: 1,
|
|
||||||
Logger: logger,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
t.Cleanup(func() { client.Close() })
|
t.Cleanup(func() { client.Close() })
|
||||||
|
|
||||||
_, _, err = client.RunContainer(ctx, container.RunContainerParams{
|
_, stateC, err := client.RunContainer(ctx, container.RunContainerParams{
|
||||||
ContainerConfig: &typescontainer.Config{
|
ContainerConfig: &typescontainer.Config{
|
||||||
Image: "netfluxio/mediamtx-alpine:latest",
|
Image: "netfluxio/mediamtx-alpine:latest",
|
||||||
Labels: map[string]string{"component": component, "group": "test1"},
|
Labels: map[string]string{"component": component, "group": "test1"},
|
||||||
@ -95,8 +80,9 @@ func TestClientRemoveContainers(t *testing.T) {
|
|||||||
HostConfig: &typescontainer.HostConfig{NetworkMode: "default"},
|
HostConfig: &typescontainer.HostConfig{NetworkMode: "default"},
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
testhelpers.DiscardChannel(stateC)
|
||||||
|
|
||||||
_, _, err = client.RunContainer(ctx, container.RunContainerParams{
|
_, stateC, err = client.RunContainer(ctx, container.RunContainerParams{
|
||||||
ContainerConfig: &typescontainer.Config{
|
ContainerConfig: &typescontainer.Config{
|
||||||
Image: "netfluxio/mediamtx-alpine:latest",
|
Image: "netfluxio/mediamtx-alpine:latest",
|
||||||
Labels: map[string]string{"component": component, "group": "test1"},
|
Labels: map[string]string{"component": component, "group": "test1"},
|
||||||
@ -104,8 +90,9 @@ func TestClientRemoveContainers(t *testing.T) {
|
|||||||
HostConfig: &typescontainer.HostConfig{NetworkMode: "default"},
|
HostConfig: &typescontainer.HostConfig{NetworkMode: "default"},
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
testhelpers.DiscardChannel(stateC)
|
||||||
|
|
||||||
_, _, err = client.RunContainer(ctx, container.RunContainerParams{
|
_, stateC, err = client.RunContainer(ctx, container.RunContainerParams{
|
||||||
ContainerConfig: &typescontainer.Config{
|
ContainerConfig: &typescontainer.Config{
|
||||||
Image: "netfluxio/mediamtx-alpine:latest",
|
Image: "netfluxio/mediamtx-alpine:latest",
|
||||||
Labels: map[string]string{"component": component, "group": "test2"},
|
Labels: map[string]string{"component": component, "group": "test2"},
|
||||||
@ -113,7 +100,7 @@ func TestClientRemoveContainers(t *testing.T) {
|
|||||||
HostConfig: &typescontainer.HostConfig{NetworkMode: "default"},
|
HostConfig: &typescontainer.HostConfig{NetworkMode: "default"},
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 3, len(client.GetContainerState()))
|
testhelpers.DiscardChannel(stateC)
|
||||||
|
|
||||||
// check all containers in group 1 are running
|
// check all containers in group 1 are running
|
||||||
require.Eventually(
|
require.Eventually(
|
||||||
@ -154,7 +141,6 @@ func TestClientRemoveContainers(t *testing.T) {
|
|||||||
100*time.Millisecond,
|
100*time.Millisecond,
|
||||||
"container group 1 still in RUNNING state",
|
"container group 1 still in RUNNING state",
|
||||||
)
|
)
|
||||||
require.Equal(t, 1, len(client.GetContainerState()))
|
|
||||||
|
|
||||||
// check group 2 is still running
|
// check group 2 is still running
|
||||||
running, err := client.ContainerRunning(ctx, map[string]string{"group": "test2"})
|
running, err := client.ContainerRunning(ctx, map[string]string{"group": "test2"})
|
||||||
|
@ -4,26 +4,26 @@ package domain
|
|||||||
type AppState struct {
|
type AppState struct {
|
||||||
Source Source
|
Source Source
|
||||||
Destinations []Destination
|
Destinations []Destination
|
||||||
Containers map[string]Container
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Source represents the source, currently always the mediaserver.
|
// Source represents the source, currently always the mediaserver.
|
||||||
type Source struct {
|
type Source struct {
|
||||||
ContainerID string
|
ContainerState ContainerState
|
||||||
Live bool
|
Live bool
|
||||||
URL string
|
URL string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Destination is a single destination.
|
// Destination is a single destination.
|
||||||
type Destination struct {
|
type Destination struct {
|
||||||
|
ContainerState ContainerState
|
||||||
URL string
|
URL string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Container represents the current state of an individual container.
|
// ContainerState represents the current state of an individual container.
|
||||||
//
|
//
|
||||||
// The source of truth is always the Docker daemon, this struct is used only
|
// The source of truth is always the Docker daemon, this struct is used only
|
||||||
// for passing asynchronous state.
|
// for passing asynchronous state.
|
||||||
type Container struct {
|
type ContainerState struct {
|
||||||
ID string
|
ID string
|
||||||
HealthState string
|
HealthState string
|
||||||
CPUPercent float64
|
CPUPercent float64
|
||||||
|
18
main.go
18
main.go
@ -42,9 +42,7 @@ func run(ctx context.Context, cfgReader io.Reader) error {
|
|||||||
logger := slog.New(slog.NewTextHandler(logFile, nil))
|
logger := slog.New(slog.NewTextHandler(logFile, nil))
|
||||||
logger.Info("Starting termstream", slog.Any("initial_state", state))
|
logger.Info("Starting termstream", slog.Any("initial_state", state))
|
||||||
|
|
||||||
containerClient, err := container.NewActor(ctx, container.NewActorParams{
|
containerClient, err := container.NewClient(ctx, logger.With("component", "container_client"))
|
||||||
Logger: logger.With("component", "container_client"),
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("new container client: %w", err)
|
return fmt.Errorf("new container client: %w", err)
|
||||||
}
|
}
|
||||||
@ -80,16 +78,11 @@ func run(ctx context.Context, cfgReader io.Reader) error {
|
|||||||
}
|
}
|
||||||
logger.Info("Command received", "cmd", cmd)
|
logger.Info("Command received", "cmd", cmd)
|
||||||
case <-uiTicker.C:
|
case <-uiTicker.C:
|
||||||
applyContainerState(containerClient, state)
|
// TODO: update UI with current state?
|
||||||
updateUI()
|
updateUI()
|
||||||
case serverState, ok := <-srv.C():
|
case serverState := <-srv.C():
|
||||||
if ok {
|
|
||||||
applyServerState(serverState, state)
|
applyServerState(serverState, state)
|
||||||
updateUI()
|
updateUI()
|
||||||
} else {
|
|
||||||
logger.Info("Source state channel closed, shutting down...")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -106,8 +99,3 @@ func applyConfig(cfg config.Config, appState *domain.AppState) {
|
|||||||
appState.Destinations = append(appState.Destinations, domain.Destination{URL: dest.URL})
|
appState.Destinations = append(appState.Destinations, domain.Destination{URL: dest.URL})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// applyContainerState applies the current container state to the app state.
|
|
||||||
func applyContainerState(containerClient *container.Actor, appState *domain.AppState) {
|
|
||||||
appState.Containers = containerClient.GetContainerState()
|
|
||||||
}
|
|
||||||
|
@ -26,18 +26,20 @@ type action func()
|
|||||||
|
|
||||||
// Actor is responsible for managing the media server.
|
// Actor is responsible for managing the media server.
|
||||||
type Actor struct {
|
type Actor struct {
|
||||||
ch chan action
|
actorC chan action
|
||||||
state *domain.Source
|
stateC chan domain.Source
|
||||||
stateChan chan domain.Source
|
containerClient *container.Client
|
||||||
containerClient *container.Actor
|
|
||||||
logger *slog.Logger
|
logger *slog.Logger
|
||||||
httpClient *http.Client
|
httpClient *http.Client
|
||||||
|
|
||||||
|
// mutable state
|
||||||
|
state *domain.Source
|
||||||
}
|
}
|
||||||
|
|
||||||
// StartActorParams contains the parameters for starting a new media server
|
// StartActorParams contains the parameters for starting a new media server
|
||||||
// actor.
|
// actor.
|
||||||
type StartActorParams struct {
|
type StartActorParams struct {
|
||||||
ContainerClient *container.Actor
|
ContainerClient *container.Client
|
||||||
ChanSize int
|
ChanSize int
|
||||||
Logger *slog.Logger
|
Logger *slog.Logger
|
||||||
}
|
}
|
||||||
@ -53,22 +55,22 @@ func StartActor(ctx context.Context, params StartActorParams) (*Actor, error) {
|
|||||||
chanSize := cmp.Or(params.ChanSize, defaultChanSize)
|
chanSize := cmp.Or(params.ChanSize, defaultChanSize)
|
||||||
|
|
||||||
actor := &Actor{
|
actor := &Actor{
|
||||||
ch: make(chan action, chanSize),
|
actorC: make(chan action, chanSize),
|
||||||
state: new(domain.Source),
|
state: new(domain.Source),
|
||||||
stateChan: make(chan domain.Source, chanSize),
|
stateC: make(chan domain.Source, chanSize),
|
||||||
containerClient: params.ContainerClient,
|
containerClient: params.ContainerClient,
|
||||||
logger: params.Logger,
|
logger: params.Logger,
|
||||||
httpClient: &http.Client{Timeout: httpClientTimeout},
|
httpClient: &http.Client{Timeout: httpClientTimeout},
|
||||||
}
|
}
|
||||||
|
|
||||||
containerID, containerDone, err := params.ContainerClient.RunContainer(
|
containerID, containerStateC, err := params.ContainerClient.RunContainer(
|
||||||
ctx,
|
ctx,
|
||||||
container.RunContainerParams{
|
container.RunContainerParams{
|
||||||
Name: "server",
|
Name: "server",
|
||||||
ContainerConfig: &typescontainer.Config{
|
ContainerConfig: &typescontainer.Config{
|
||||||
Image: imageNameMediaMTX,
|
Image: imageNameMediaMTX,
|
||||||
Env: []string{
|
Env: []string{
|
||||||
"MTX_LOGLEVEL=debug",
|
"MTX_LOGLEVEL=info",
|
||||||
"MTX_API=yes",
|
"MTX_API=yes",
|
||||||
},
|
},
|
||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
@ -91,23 +93,23 @@ func StartActor(ctx context.Context, params StartActorParams) (*Actor, error) {
|
|||||||
return nil, fmt.Errorf("run container: %w", err)
|
return nil, fmt.Errorf("run container: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
actor.state.ContainerID = containerID
|
actor.state.ContainerState.ID = containerID
|
||||||
actor.state.URL = "rtmp://localhost:1935/" + rtmpPath
|
actor.state.URL = "rtmp://localhost:1935/" + rtmpPath
|
||||||
|
|
||||||
go actor.actorLoop(containerDone)
|
go actor.actorLoop(containerStateC)
|
||||||
|
|
||||||
return actor, nil
|
return actor, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// C returns a channel that will receive the current state of the media server.
|
// C returns a channel that will receive the current state of the media server.
|
||||||
func (s *Actor) C() <-chan domain.Source {
|
func (s *Actor) C() <-chan domain.Source {
|
||||||
return s.stateChan
|
return s.stateC
|
||||||
}
|
}
|
||||||
|
|
||||||
// State returns the current state of the media server.
|
// State returns the current state of the media server.
|
||||||
func (s *Actor) State() domain.Source {
|
func (s *Actor) State() domain.Source {
|
||||||
resultChan := make(chan domain.Source)
|
resultChan := make(chan domain.Source)
|
||||||
s.ch <- func() {
|
s.actorC <- func() {
|
||||||
resultChan <- *s.state
|
resultChan <- *s.state
|
||||||
}
|
}
|
||||||
return <-resultChan
|
return <-resultChan
|
||||||
@ -119,32 +121,37 @@ func (s *Actor) Close() error {
|
|||||||
return fmt.Errorf("remove containers: %w", err)
|
return fmt.Errorf("remove containers: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
close(s.actorC)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Actor) actorLoop(containerDone <-chan struct{}) {
|
// actorLoop is the main loop of the media server actor. It only exits when the
|
||||||
defer close(s.ch)
|
// actor is closed.
|
||||||
|
func (s *Actor) actorLoop(containerStateC <-chan domain.ContainerState) {
|
||||||
ticker := time.NewTicker(5 * time.Second)
|
ticker := time.NewTicker(5 * time.Second)
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
|
|
||||||
var closing bool
|
sendState := func() { s.stateC <- *s.state }
|
||||||
sendState := func() {
|
|
||||||
if !closing {
|
|
||||||
s.stateChan <- *s.state
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-containerDone:
|
case containerState, ok := <-containerStateC:
|
||||||
|
if !ok {
|
||||||
ticker.Stop()
|
ticker.Stop()
|
||||||
|
|
||||||
|
if s.state.Live {
|
||||||
s.state.Live = false
|
s.state.Live = false
|
||||||
sendState()
|
sendState()
|
||||||
|
}
|
||||||
|
|
||||||
closing = true
|
continue
|
||||||
close(s.stateChan)
|
}
|
||||||
|
|
||||||
|
s.state.ContainerState = containerState
|
||||||
|
sendState()
|
||||||
|
|
||||||
|
continue
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
ingressLive, err := s.fetchIngressStateFromServer()
|
ingressLive, err := s.fetchIngressStateFromServer()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -155,7 +162,7 @@ func (s *Actor) actorLoop(containerDone <-chan struct{}) {
|
|||||||
s.state.Live = ingressLive
|
s.state.Live = ingressLive
|
||||||
sendState()
|
sendState()
|
||||||
}
|
}
|
||||||
case action, ok := <-s.ch:
|
case action, ok := <-s.actorC:
|
||||||
if !ok {
|
if !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -21,7 +21,7 @@ func TestMediaServerStartStop(t *testing.T) {
|
|||||||
t.Cleanup(cancel)
|
t.Cleanup(cancel)
|
||||||
|
|
||||||
logger := testhelpers.NewTestLogger()
|
logger := testhelpers.NewTestLogger()
|
||||||
containerClient, err := container.NewActor(ctx, container.NewActorParams{ChanSize: 1, Logger: logger})
|
containerClient, err := container.NewClient(ctx, logger)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
t.Cleanup(func() { require.NoError(t, containerClient.Close()) })
|
t.Cleanup(func() { require.NoError(t, containerClient.Close()) })
|
||||||
|
|
||||||
@ -35,6 +35,7 @@ func TestMediaServerStartStop(t *testing.T) {
|
|||||||
Logger: logger,
|
Logger: logger,
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
testhelpers.DiscardChannel(mediaServer.C())
|
||||||
|
|
||||||
require.Eventually(
|
require.Eventually(
|
||||||
t,
|
t,
|
||||||
@ -54,10 +55,13 @@ func TestMediaServerStartStop(t *testing.T) {
|
|||||||
launchFFMPEG(t, "rtmp://localhost:1935/live")
|
launchFFMPEG(t, "rtmp://localhost:1935/live")
|
||||||
require.Eventually(
|
require.Eventually(
|
||||||
t,
|
t,
|
||||||
func() bool { return mediaServer.State().Live },
|
func() bool {
|
||||||
|
currState := mediaServer.State()
|
||||||
|
return currState.Live && currState.ContainerState.HealthState == "healthy"
|
||||||
|
},
|
||||||
5*time.Second,
|
5*time.Second,
|
||||||
250*time.Millisecond,
|
250*time.Millisecond,
|
||||||
"actor not in LIVE state",
|
"actor not healthy and/or in LIVE state",
|
||||||
)
|
)
|
||||||
|
|
||||||
mediaServer.Close()
|
mediaServer.Close()
|
||||||
|
@ -135,23 +135,25 @@ func (a *Actor) redrawFromState(state domain.AppState) {
|
|||||||
setHeaderRow := func(tableView *tview.Table) {
|
setHeaderRow := func(tableView *tview.Table) {
|
||||||
tableView.SetCell(0, 0, tview.NewTableCell("[grey]URL").SetAlign(tview.AlignLeft).SetExpansion(7).SetSelectable(false))
|
tableView.SetCell(0, 0, tview.NewTableCell("[grey]URL").SetAlign(tview.AlignLeft).SetExpansion(7).SetSelectable(false))
|
||||||
tableView.SetCell(0, 1, tview.NewTableCell("[grey]Status").SetAlign(tview.AlignLeft).SetExpansion(1).SetSelectable(false))
|
tableView.SetCell(0, 1, tview.NewTableCell("[grey]Status").SetAlign(tview.AlignLeft).SetExpansion(1).SetSelectable(false))
|
||||||
tableView.SetCell(0, 2, tview.NewTableCell("[grey]CPU %").SetAlign(tview.AlignLeft).SetExpansion(1).SetSelectable(false))
|
tableView.SetCell(0, 2, tview.NewTableCell("[grey]Health").SetAlign(tview.AlignLeft).SetExpansion(1).SetSelectable(false))
|
||||||
tableView.SetCell(0, 3, tview.NewTableCell("[grey]Mem used (MB)").SetAlign(tview.AlignLeft).SetExpansion(1).SetSelectable(false))
|
tableView.SetCell(0, 3, tview.NewTableCell("[grey]CPU %").SetAlign(tview.AlignLeft).SetExpansion(1).SetSelectable(false))
|
||||||
tableView.SetCell(0, 4, tview.NewTableCell("[grey]Actions").SetAlign(tview.AlignLeft).SetExpansion(2).SetSelectable(false))
|
tableView.SetCell(0, 4, tview.NewTableCell("[grey]Mem used (MB)").SetAlign(tview.AlignLeft).SetExpansion(1).SetSelectable(false))
|
||||||
|
tableView.SetCell(0, 5, tview.NewTableCell("[grey]Actions").SetAlign(tview.AlignLeft).SetExpansion(2).SetSelectable(false))
|
||||||
}
|
}
|
||||||
|
|
||||||
a.sourceView.Clear()
|
a.sourceView.Clear()
|
||||||
setHeaderRow(a.sourceView)
|
setHeaderRow(a.sourceView)
|
||||||
sourceContainer := state.Containers[state.Source.ContainerID]
|
|
||||||
a.sourceView.SetCell(1, 0, tview.NewTableCell(state.Source.URL))
|
a.sourceView.SetCell(1, 0, tview.NewTableCell(state.Source.URL))
|
||||||
|
|
||||||
if state.Source.Live {
|
if state.Source.Live {
|
||||||
a.sourceView.SetCell(1, 1, tview.NewTableCell("[green]on-air"))
|
a.sourceView.SetCell(1, 1, tview.NewTableCell("[green]on-air"))
|
||||||
} else {
|
} else {
|
||||||
a.sourceView.SetCell(1, 1, tview.NewTableCell("[yellow]off-air"))
|
a.sourceView.SetCell(1, 1, tview.NewTableCell("[yellow]off-air"))
|
||||||
}
|
}
|
||||||
a.sourceView.SetCell(1, 2, tview.NewTableCell("[white]"+fmt.Sprintf("%.1f", sourceContainer.CPUPercent)))
|
a.sourceView.SetCell(1, 2, tview.NewTableCell("[white]"+cmp.Or(state.Source.ContainerState.HealthState, "starting")))
|
||||||
a.sourceView.SetCell(1, 3, tview.NewTableCell("[white]"+fmt.Sprintf("%.1f", float64(sourceContainer.MemoryUsageBytes)/1024/1024)))
|
a.sourceView.SetCell(1, 3, tview.NewTableCell("[white]"+fmt.Sprintf("%.1f", state.Source.ContainerState.CPUPercent)))
|
||||||
a.sourceView.SetCell(1, 4, tview.NewTableCell(""))
|
a.sourceView.SetCell(1, 4, tview.NewTableCell("[white]"+fmt.Sprintf("%.1f", float64(state.Source.ContainerState.MemoryUsageBytes)/1024/1024)))
|
||||||
|
a.sourceView.SetCell(1, 5, tview.NewTableCell(""))
|
||||||
|
|
||||||
a.destView.Clear()
|
a.destView.Clear()
|
||||||
setHeaderRow(a.destView)
|
setHeaderRow(a.destView)
|
||||||
@ -161,7 +163,8 @@ func (a *Actor) redrawFromState(state domain.AppState) {
|
|||||||
a.destView.SetCell(i+1, 1, tview.NewTableCell("[yellow]off-air"))
|
a.destView.SetCell(i+1, 1, tview.NewTableCell("[yellow]off-air"))
|
||||||
a.destView.SetCell(i+1, 2, tview.NewTableCell("[white]-"))
|
a.destView.SetCell(i+1, 2, tview.NewTableCell("[white]-"))
|
||||||
a.destView.SetCell(i+1, 3, tview.NewTableCell("[white]-"))
|
a.destView.SetCell(i+1, 3, tview.NewTableCell("[white]-"))
|
||||||
a.destView.SetCell(i+1, 4, tview.NewTableCell("[green]Tab to go live"))
|
a.destView.SetCell(i+1, 4, tview.NewTableCell("[white]-"))
|
||||||
|
a.destView.SetCell(i+1, 5, tview.NewTableCell("[green]Tab to go live"))
|
||||||
}
|
}
|
||||||
|
|
||||||
a.app.Draw()
|
a.app.Draw()
|
||||||
|
@ -16,3 +16,12 @@ func NewNopLogger() *slog.Logger {
|
|||||||
func NewTestLogger() *slog.Logger {
|
func NewTestLogger() *slog.Logger {
|
||||||
return slog.New(slog.NewTextHandler(os.Stderr, nil))
|
return slog.New(slog.NewTextHandler(os.Stderr, nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NoopChannel consumes a channel and discards all values.
|
||||||
|
func DiscardChannel[T any](ch <-chan T) {
|
||||||
|
go func() {
|
||||||
|
for range ch {
|
||||||
|
// no-op
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user