Skip to content

Refresh containerd remotes on containerd restarted #36173

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Feb 8, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 23 additions & 8 deletions libcontainerd/client_daemon.go
Original file line number Diff line number Diff line change
Expand Up @@ -114,8 +114,21 @@ type client struct {
containers map[string]*container
}

func (c *client) setRemote(remote *containerd.Client) {
c.Lock()
c.remote = remote
c.Unlock()
}

func (c *client) getRemote() *containerd.Client {
c.RLock()
remote := c.remote
c.RUnlock()
return remote
}

func (c *client) Version(ctx context.Context) (containerd.Version, error) {
return c.remote.Version(ctx)
return c.getRemote().Version(ctx)
}

func (c *client) Restore(ctx context.Context, id string, attachStdio StdioCallback) (alive bool, pid int, err error) {
Expand Down Expand Up @@ -187,7 +200,7 @@ func (c *client) Create(ctx context.Context, id string, ociSpec *specs.Spec, run

c.logger.WithField("bundle", bdir).WithField("root", ociSpec.Root.Path).Debug("bundle dir created")

cdCtr, err := c.remote.NewContainer(ctx, id,
cdCtr, err := c.getRemote().NewContainer(ctx, id,
containerd.WithSpec(ociSpec),
// TODO(mlaventure): when containerd support lcow, revisit runtime value
containerd.WithRuntime(fmt.Sprintf("io.containerd.runtime.v1.%s", runtime.GOOS), runtimeOptions))
Expand Down Expand Up @@ -230,7 +243,7 @@ func (c *client) Start(ctx context.Context, id, checkpointDir string, withStdin
// remove the checkpoint when we're done
defer func() {
if cp != nil {
err := c.remote.ContentStore().Delete(context.Background(), cp.Digest)
err := c.getRemote().ContentStore().Delete(context.Background(), cp.Digest)
if err != nil {
c.logger.WithError(err).WithFields(logrus.Fields{
"ref": checkpointDir,
Expand Down Expand Up @@ -528,14 +541,14 @@ func (c *client) CreateCheckpoint(ctx context.Context, containerID, checkpointDi
}
// Whatever happens, delete the checkpoint from containerd
defer func() {
err := c.remote.ImageService().Delete(context.Background(), img.Name())
err := c.getRemote().ImageService().Delete(context.Background(), img.Name())
if err != nil {
c.logger.WithError(err).WithField("digest", img.Target().Digest).
Warnf("failed to delete checkpoint image")
}
}()

b, err := content.ReadBlob(ctx, c.remote.ContentStore(), img.Target().Digest)
b, err := content.ReadBlob(ctx, c.getRemote().ContentStore(), img.Target().Digest)
if err != nil {
return errdefs.System(errors.Wrapf(err, "failed to retrieve checkpoint data"))
}
Expand All @@ -555,7 +568,7 @@ func (c *client) CreateCheckpoint(ctx context.Context, containerID, checkpointDi
return errdefs.System(errors.Wrapf(err, "invalid checkpoint"))
}

rat, err := c.remote.ContentStore().ReaderAt(ctx, cpDesc.Digest)
rat, err := c.getRemote().ContentStore().ReaderAt(ctx, cpDesc.Digest)
if err != nil {
return errdefs.System(errors.Wrapf(err, "failed to get checkpoint reader"))
}
Expand Down Expand Up @@ -708,7 +721,7 @@ func (c *client) processEventStream(ctx context.Context) {
}
}()

eventStream, err = c.remote.EventService().Subscribe(ctx, &eventsapi.SubscribeRequest{
eventStream, err = c.getRemote().EventService().Subscribe(ctx, &eventsapi.SubscribeRequest{
Filters: []string{
// Filter on both namespace *and* topic. To create an "and" filter,
// this must be a single, comma-separated string
Expand All @@ -719,6 +732,8 @@ func (c *client) processEventStream(ctx context.Context) {
return
}

c.logger.WithField("namespace", c.namespace).Debug("processing event stream")

var oomKilled bool
for {
ev, err = eventStream.Recv()
Expand Down Expand Up @@ -822,7 +837,7 @@ func (c *client) processEventStream(ctx context.Context) {
}

func (c *client) writeContent(ctx context.Context, mediaType, ref string, r io.Reader) (*types.Descriptor, error) {
writer, err := c.remote.ContentStore().Writer(ctx, ref, 0, "")
writer, err := c.getRemote().ContentStore().Writer(ctx, ref, 0, "")
if err != nil {
return nil, err
}
Expand Down
86 changes: 58 additions & 28 deletions libcontainerd/remote_daemon.go
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,7 @@ func (r *remote) startContainerd() error {
return nil
}

func (r *remote) monitorConnection(client *containerd.Client) {
func (r *remote) monitorConnection(monitor *containerd.Client) {
var transientFailureCount = 0

ticker := time.NewTicker(500 * time.Millisecond)
Expand All @@ -269,7 +269,7 @@ func (r *remote) monitorConnection(client *containerd.Client) {
for {
<-ticker.C
ctx, cancel := context.WithTimeout(r.shutdownContext, healthCheckTimeout)
_, err := client.IsServing(ctx)
_, err := monitor.IsServing(ctx)
cancel()
if err == nil {
transientFailureCount = 0
Expand All @@ -279,39 +279,69 @@ func (r *remote) monitorConnection(client *containerd.Client) {
select {
case <-r.shutdownContext.Done():
r.logger.Info("stopping healthcheck following graceful shutdown")
client.Close()
monitor.Close()
return
default:
}

r.logger.WithError(err).WithField("binary", binaryName).Debug("daemon is not responding")

if r.daemonPid != -1 {
transientFailureCount++
if transientFailureCount >= maxConnectionRetryCount || !system.IsProcessAlive(r.daemonPid) {
transientFailureCount = 0
if system.IsProcessAlive(r.daemonPid) {
r.logger.WithField("pid", r.daemonPid).Info("killing and restarting containerd")
// Try to get a stack trace
syscall.Kill(r.daemonPid, syscall.SIGUSR1)
<-time.After(100 * time.Millisecond)
system.KillProcess(r.daemonPid)
}
<-r.daemonWaitCh
var err error
client.Close()
os.Remove(r.GRPC.Address)
if err = r.startContainerd(); err != nil {
r.logger.WithError(err).Error("failed restarting containerd")
} else {
newClient, err := containerd.New(r.GRPC.Address)
if err != nil {
r.logger.WithError(err).Error("failed connect to containerd")
} else {
client = newClient
}
if r.daemonPid == -1 {
continue
}

transientFailureCount++
if transientFailureCount < maxConnectionRetryCount || system.IsProcessAlive(r.daemonPid) {
continue
}

transientFailureCount = 0
if system.IsProcessAlive(r.daemonPid) {
r.logger.WithField("pid", r.daemonPid).Info("killing and restarting containerd")
// Try to get a stack trace
syscall.Kill(r.daemonPid, syscall.SIGUSR1)
<-time.After(100 * time.Millisecond)
system.KillProcess(r.daemonPid)
}
<-r.daemonWaitCh

monitor.Close()
os.Remove(r.GRPC.Address)
if err := r.startContainerd(); err != nil {
r.logger.WithError(err).Error("failed restarting containerd")
continue
}

newMonitor, err := containerd.New(r.GRPC.Address)
if err != nil {
r.logger.WithError(err).Error("failed connect to containerd")
continue
}

monitor = newMonitor
var wg sync.WaitGroup

for _, c := range r.clients {
wg.Add(1)

go func(c *client) {
defer wg.Done()
c.logger.WithField("namespace", c.namespace).Debug("creating new containerd remote client")
c.remote.Close()

remote, err := containerd.New(r.GRPC.Address, containerd.WithDefaultNamespace(c.namespace))
if err != nil {
r.logger.WithError(err).Error("failed to connect to containerd")
// TODO: Better way to handle this?
// This *shouldn't* happen, but this could wind up where the daemon
// is not able to communicate with an eventually up containerd
return
}
}

c.setRemote(remote)
}(c)

wg.Wait()
}
}
}