forked from kata-containers/runtime
-
Notifications
You must be signed in to change notification settings - Fork 0
/
start.go
137 lines (112 loc) · 3.19 KB
/
start.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
// Copyright (c) 2018 HyperHQ Inc.
//
// SPDX-License-Identifier: Apache-2.0
//
package containerdshim
import (
"context"
"fmt"
"github.com/containerd/containerd/api/types/task"
"github.com/kata-containers/runtime/pkg/katautils"
"github.com/sirupsen/logrus"
)
func startContainer(ctx context.Context, s *service, c *container) error {
//start a container
if c.cType == "" {
err := fmt.Errorf("Bug, the container %s type is empty", c.id)
return err
}
if s.sandbox == nil {
err := fmt.Errorf("Bug, the sandbox hasn't been created for this container %s", c.id)
return err
}
if c.cType.IsSandbox() {
err := s.sandbox.Start()
if err != nil {
return err
}
// Start monitor after starting sandbox
s.monitor, err = s.sandbox.Monitor()
if err != nil {
return err
}
go watchSandbox(s)
// We don't rely on the context passed to startContainer as it can be cancelled after
// this rpc call.
go watchOOMEvents(s.ctx, s)
} else {
_, err := s.sandbox.StartContainer(c.id)
if err != nil {
return err
}
}
// Run post-start OCI hooks.
err := katautils.EnterNetNS(s.sandbox.GetNetNs(), func() error {
return katautils.PostStartHooks(ctx, *c.spec, s.sandbox.ID(), c.bundle)
})
if err != nil {
// log warning and continue, as defined in oci runtime spec
// https://github.com/opencontainers/runtime-spec/blob/master/runtime.md#lifecycle
logrus.WithError(err).Warn("Failed to run post-start hooks")
}
c.status = task.StatusRunning
stdin, stdout, stderr, err := s.sandbox.IOStream(c.id, c.id)
if err != nil {
return err
}
c.stdinPipe = stdin
if c.stdin != "" || c.stdout != "" || c.stderr != "" {
tty, err := newTtyIO(ctx, c.stdin, c.stdout, c.stderr, c.terminal)
if err != nil {
return err
}
c.ttyio = tty
go ioCopy(c.exitIOch, c.stdinCloser, tty, stdin, stdout, stderr)
} else {
//close the io exit channel, since there is no io for this container,
//otherwise the following wait goroutine will hang on this channel.
close(c.exitIOch)
//close the stdin closer channel to notify that it's safe to close process's
// io.
close(c.stdinCloser)
}
go wait(s, c, "")
return nil
}
func startExec(ctx context.Context, s *service, containerID, execID string) (*exec, error) {
//start an exec
c, err := s.getContainer(containerID)
if err != nil {
return nil, err
}
execs, err := c.getExec(execID)
if err != nil {
return nil, err
}
_, proc, err := s.sandbox.EnterContainer(containerID, *execs.cmds)
if err != nil {
err := fmt.Errorf("cannot enter container %s, with err %s", containerID, err)
return nil, err
}
execs.id = proc.Token
execs.status = task.StatusRunning
if execs.tty.height != 0 && execs.tty.width != 0 {
err = s.sandbox.WinsizeProcess(c.id, execs.id, execs.tty.height, execs.tty.width)
if err != nil {
return nil, err
}
}
stdin, stdout, stderr, err := s.sandbox.IOStream(c.id, execs.id)
if err != nil {
return nil, err
}
execs.stdinPipe = stdin
tty, err := newTtyIO(ctx, execs.tty.stdin, execs.tty.stdout, execs.tty.stderr, execs.tty.terminal)
if err != nil {
return nil, err
}
execs.ttyio = tty
go ioCopy(execs.exitIOch, execs.stdinCloser, tty, stdin, stdout, stderr)
go wait(s, c, execID)
return execs, nil
}