Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
129 changes: 79 additions & 50 deletions cli/command/container/stats.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,14 +11,14 @@ import (
"time"

"github.com/containerd/errdefs"
"github.com/containerd/log"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/command/completion"
"github.com/docker/cli/cli/command/formatter"
flagsHelper "github.com/docker/cli/cli/flags"
"github.com/moby/moby/api/types/events"
"github.com/moby/moby/client"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)

Expand Down Expand Up @@ -113,8 +113,10 @@ func RunStats(ctx context.Context, dockerCLI command.Cli, options *StatsOptions)

// waitFirst is a WaitGroup to wait first stat data's reach for each container
waitFirst := &sync.WaitGroup{}
// closeChan is a non-buffered channel used to collect errors from goroutines.
closeChan := make(chan error)
// closeChan is used to collect errors from goroutines. It uses a small buffer
// to avoid blocking sends when sends occur after closeChan is set to nil or
// after the reader has exited, preventing deadlocks.
closeChan := make(chan error, 4)
cStats := stats{}

showAll := len(options.Containers) == 0
Expand All @@ -139,24 +141,34 @@ func RunStats(ctx context.Context, dockerCLI command.Cli, options *StatsOptions)
eh := newEventHandler()
if options.All {
eh.setHandler(events.ActionCreate, func(e events.Message) {
s := NewStats(e.Actor.ID)
if cStats.add(s) {
if s := NewStats(e.Actor.ID); cStats.add(s) {
waitFirst.Add(1)
log.G(ctx).WithFields(map[string]any{
"event": e.Action,
"container": e.Actor.ID,
}).Debug("collecting stats for container")
go collect(ctx, s, apiClient, !options.NoStream, waitFirst)
}
})
}

eh.setHandler(events.ActionStart, func(e events.Message) {
s := NewStats(e.Actor.ID)
if cStats.add(s) {
if s := NewStats(e.Actor.ID); cStats.add(s) {
waitFirst.Add(1)
log.G(ctx).WithFields(map[string]any{
"event": e.Action,
"container": e.Actor.ID,
}).Debug("collecting stats for container")
go collect(ctx, s, apiClient, !options.NoStream, waitFirst)
}
})

if !options.All {
eh.setHandler(events.ActionDie, func(e events.Message) {
log.G(ctx).WithFields(map[string]any{
"event": e.Action,
"container": e.Actor.ID,
}).Debug("stop collecting stats for container")
cStats.remove(e.Actor.ID)
})
}
Expand All @@ -182,10 +194,17 @@ func RunStats(ctx context.Context, dockerCLI command.Cli, options *StatsOptions)
select {
case <-stopped:
return
case <-ctx.Done():
return
case event := <-eventChan:
c <- event
case err := <-errChan:
closeChan <- err
// Prevent blocking if closeChan is full or unread
select {
case closeChan <- err:
default:
// drop if not read; avoids deadlock
}
return
}
}
Expand All @@ -209,9 +228,11 @@ func RunStats(ctx context.Context, dockerCLI command.Cli, options *StatsOptions)
return err
}
for _, ctr := range cs {
s := NewStats(ctr.ID)
if cStats.add(s) {
if s := NewStats(ctr.ID); cStats.add(s) {
waitFirst.Add(1)
log.G(ctx).WithFields(map[string]any{
"container": ctr.ID,
}).Debug("collecting stats for container")
go collect(ctx, s, apiClient, !options.NoStream, waitFirst)
}
}
Expand All @@ -230,15 +251,18 @@ func RunStats(ctx context.Context, dockerCLI command.Cli, options *StatsOptions)
// Create the list of containers, and start collecting stats for all
// containers passed.
for _, ctr := range options.Containers {
s := NewStats(ctr)
if cStats.add(s) {
if s := NewStats(ctr); cStats.add(s) {
waitFirst.Add(1)
log.G(ctx).WithFields(map[string]any{
"container": ctr,
}).Debug("collecting stats for container")
go collect(ctx, s, apiClient, !options.NoStream, waitFirst)
}
}

// We don't expect any asynchronous errors: closeChan can be closed.
// We don't expect any asynchronous errors: closeChan can be closed and disabled.
close(closeChan)
closeChan = nil

// make sure each container get at least one valid stat data
waitFirst.Wait()
Expand All @@ -257,7 +281,7 @@ func RunStats(ctx context.Context, dockerCLI command.Cli, options *StatsOptions)
}

format := options.Format
if len(format) == 0 {
if format == "" {
if len(dockerCLI.ConfigFile().StatsFormat) > 0 {
format = dockerCLI.ConfigFile().StatsFormat
} else {
Expand All @@ -274,63 +298,68 @@ func RunStats(ctx context.Context, dockerCLI command.Cli, options *StatsOptions)
Format: NewStatsFormat(format, daemonOSType),
}

var err error
ticker := time.NewTicker(500 * time.Millisecond)
defer ticker.Stop()
for range ticker.C {
var ccStats []StatsEntry
if options.NoStream {
cStats.mu.RLock()
ccStats := make([]StatsEntry, 0, len(cStats.cs))
for _, c := range cStats.cs {
ccStats = append(ccStats, c.GetStatistics())
}
cStats.mu.RUnlock()

if !options.NoStream {
if len(ccStats) == 0 {
return nil
}
if err := statsFormatWrite(statsCtx, ccStats, daemonOSType, !options.NoTrunc); err != nil {
return err
}
_, _ = fmt.Fprint(dockerCLI.Out(), statsTextBuffer.String())
return nil
}

ticker := time.NewTicker(500 * time.Millisecond)
defer ticker.Stop()
for {
select {
case <-ticker.C:
cStats.mu.RLock()
ccStats := make([]StatsEntry, 0, len(cStats.cs))
for _, c := range cStats.cs {
ccStats = append(ccStats, c.GetStatistics())
}
cStats.mu.RUnlock()

// Start by moving the cursor to the top-left
_, _ = fmt.Fprint(&statsTextBuffer, "\033[H")
}

if err = statsFormatWrite(statsCtx, ccStats, daemonOSType, !options.NoTrunc); err != nil {
break
}
if err := statsFormatWrite(statsCtx, ccStats, daemonOSType, !options.NoTrunc); err != nil {
return err
}

if !options.NoStream {
for _, line := range strings.Split(statsTextBuffer.String(), "\n") {
// In case the new text is shorter than the one we are writing over,
// we'll append the "erase line" escape sequence to clear the remaining text.
_, _ = fmt.Fprintln(&statsTextBuffer, line, "\033[K")
}

// We might have fewer containers than before, so let's clear the remaining text
_, _ = fmt.Fprint(&statsTextBuffer, "\033[J")
}

_, _ = fmt.Fprint(dockerCLI.Out(), statsTextBuffer.String())
statsTextBuffer.Reset()
_, _ = fmt.Fprint(dockerCLI.Out(), statsTextBuffer.String())
statsTextBuffer.Reset()

if len(cStats.cs) == 0 && !showAll {
break
}
if options.NoStream {
break
}
select {
if len(ccStats) == 0 && !showAll {
return nil
}
case err, ok := <-closeChan:
if ok {
if err != nil {
// Suppress "unexpected EOF" errors in the CLI so that
// it shuts down cleanly when the daemon restarts.
if errors.Is(err, io.ErrUnexpectedEOF) {
return nil
}
return err
}
if !ok || err == nil || errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) {
// Suppress "unexpected EOF" errors in the CLI so that
// it shuts down cleanly when the daemon restarts.
return nil
}
default:
// just skip
return err
case <-ctx.Done():
return ctx.Err()
}
}
return err
}

// newEventHandler initializes and returns an eventHandler
Expand All @@ -357,11 +386,11 @@ func (eh *eventHandler) watch(c <-chan events.Message) {
continue
}
if e.Actor.ID == "" {
logrus.WithField("event", e).Errorf("event handler: received %s event with empty ID", e.Action)
log.G(context.TODO()).WithField("event", e).Errorf("event handler: received %s event with empty ID", e.Action)
continue
}

logrus.WithField("event", e).Debugf("event handler: received %s event for: %s", e.Action, e.Actor.ID)
log.G(context.TODO()).WithField("event", e).Debugf("event handler: received %s event for: %s", e.Action, e.Actor.ID)
go h(e)
}
}
18 changes: 11 additions & 7 deletions cli/command/container/stats_helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@ import (

"github.com/moby/moby/api/types/container"
"github.com/moby/moby/client"
"github.com/sirupsen/logrus"
)

type stats struct {
Expand Down Expand Up @@ -50,8 +49,7 @@ func (s *stats) isKnownContainer(cid string) (int, bool) {
return -1, false
}

func collect(ctx context.Context, s *Stats, cli client.ContainerAPIClient, streamStats bool, waitFirst *sync.WaitGroup) {
logrus.Debugf("collecting stats for %s", s.Container)
func collect(ctx context.Context, s *Stats, cli client.ContainerAPIClient, streamStats bool, waitFirst *sync.WaitGroup) { //nolint:gocyclo
var (
getFirst bool
previousCPU uint64
Expand All @@ -72,11 +70,14 @@ func collect(ctx context.Context, s *Stats, cli client.ContainerAPIClient, strea
s.SetError(err)
return
}
defer response.Body.Close()

dec := json.NewDecoder(response.Body)
go func() {
defer response.Body.Close()
dec := json.NewDecoder(response.Body)
for {
if ctx.Err() != nil {
return
}
var (
v *container.StatsResponse
memPercent, cpuPercent float64
Expand Down Expand Up @@ -143,8 +144,8 @@ func collect(ctx context.Context, s *Stats, cli client.ContainerAPIClient, strea
}
case err := <-u:
s.SetError(err)
if err == io.EOF {
break
if errors.Is(err, io.EOF) {
return
}
if err != nil {
continue
Expand All @@ -154,6 +155,9 @@ func collect(ctx context.Context, s *Stats, cli client.ContainerAPIClient, strea
getFirst = true
waitFirst.Done()
}
case <-ctx.Done():
s.SetError(ctx.Err())
return
}
if !streamStats {
return
Expand Down
Loading