This repository has been archived by the owner on Jul 18, 2019. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 2
/
util.go
173 lines (153 loc) · 4.77 KB
/
util.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
package builder
import (
"bufio"
"fmt"
"io"
"io/ioutil"
"math"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
s2iapi "github.com/openshift/source-to-image/pkg/api"
)
var (
// procCGroupPattern is a regular expression that parses the entries in /proc/self/cgroup
procCGroupPattern = regexp.MustCompile(`\d+:([a-z_,]+):/.*/(docker-|)([a-z0-9]+).*`)
)
// readNetClsCGroup parses /proc/self/cgroup in order to determine the container id that can be used
// the network namespace that this process is running on.
func readNetClsCGroup(reader io.Reader) string {
cgroups := make(map[string]string)
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
if match := procCGroupPattern.FindStringSubmatch(scanner.Text()); match != nil {
list := strings.Split(match[1], ",")
containerId := match[3]
if len(list) > 0 {
for _, key := range list {
cgroups[key] = containerId
}
} else {
cgroups[match[1]] = containerId
}
}
}
names := []string{"net_cls", "cpu"}
for _, group := range names {
if value, ok := cgroups[group]; ok {
return value
}
}
return ""
}
// getDockerNetworkMode determines whether the builder is running as a container
// by examining /proc/self/cgroup. This contenxt is then passed to source-to-image.
func getDockerNetworkMode() s2iapi.DockerNetworkMode {
file, err := os.Open("/proc/self/cgroup")
if err != nil {
return ""
}
defer file.Close()
if id := readNetClsCGroup(file); id != "" {
return s2iapi.NewDockerNetworkModeContainer(id)
}
return ""
}
// GetCGroupLimits returns a struct populated with cgroup limit values gathered
// from the local /sys/fs/cgroup filesystem. Overflow values are set to
// math.MaxInt64.
func GetCGroupLimits() (*s2iapi.CGroupLimits, error) {
byteLimit, err := readInt64("/sys/fs/cgroup/memory/memory.limit_in_bytes")
if err != nil {
return nil, fmt.Errorf("cannot determine cgroup limits: %v", err)
}
// math.MaxInt64 seems to give cgroups trouble, this value is
// still 92 terabytes, so it ought to be sufficiently large for
// our purposes.
if byteLimit > 92233720368547 {
byteLimit = 92233720368547
}
// different docker versions seem to use different cgroup directories,
// check for all of them.
// seen on rhel systems
cpuDir := "/sys/fs/cgroup/cpuacct,cpu"
// seen on fedora systems with docker 1.9
// note that in this case there is also a /sys/fs/cgroup/cpu that symlinks
// to /sys/fs/cgroup/cpu,cpuacct, so technically the next check
// would be sufficient, but it seems better to rely on the real directory
// rather than a symlink.
if _, err := os.Stat("/sys/fs/cgroup/cpu,cpuacct"); err == nil {
cpuDir = "/sys/fs/cgroup/cpu,cpuacct"
}
// seen on debian systems with docker 1.10
if _, err := os.Stat("/sys/fs/cgroup/cpu"); err == nil {
cpuDir = "/sys/fs/cgroup/cpu"
}
cpuQuota, err := readInt64(filepath.Join(cpuDir, "cpu.cfs_quota_us"))
if err != nil {
return nil, fmt.Errorf("cannot determine cgroup limits: %v", err)
}
cpuPeriod, err := readInt64(filepath.Join(cpuDir, "cpu.cfs_period_us"))
if err != nil {
return nil, fmt.Errorf("cannot determine cgroup limits: %v", err)
}
cpuShares, err := readInt64(filepath.Join(cpuDir, "cpu.shares"))
if err != nil {
return nil, fmt.Errorf("cannot determine cgroup limits: %v", err)
}
return &s2iapi.CGroupLimits{
CPUShares: cpuShares,
CPUPeriod: cpuPeriod,
CPUQuota: cpuQuota,
MemoryLimitBytes: byteLimit,
// Set memoryswap==memorylimit, this ensures no swapping occurs.
// see: https://docs.docker.com/engine/reference/run/#runtime-constraints-on-cpu-and-memory
MemorySwap: byteLimit,
}, nil
}
func readInt64(filePath string) (int64, error) {
data, err := ioutil.ReadFile(filePath)
if err != nil {
return -1, err
}
s := strings.TrimSpace(string(data))
val, err := strconv.ParseInt(s, 10, 64)
// overflow errors are ok, we'll get return a math.MaxInt64 value which is more
// than enough anyway. For underflow we'll return MinInt64 and the error.
if err != nil && err.(*strconv.NumError).Err == strconv.ErrRange {
if s[0] == '-' {
return math.MinInt64, err
}
return math.MaxInt64, nil
} else if err != nil {
return -1, err
}
return val, nil
}
// MergeEnv will take an existing environment and merge it with a new set of
// variables. For variables with the same name in both, only the one in the
// new environment will be kept.
func MergeEnv(oldEnv, newEnv []string) []string {
key := func(e string) string {
i := strings.Index(e, "=")
if i == -1 {
return e
}
return e[:i]
}
result := []string{}
newVars := map[string]struct{}{}
for _, e := range newEnv {
newVars[key(e)] = struct{}{}
}
result = append(result, newEnv...)
for _, e := range oldEnv {
if _, exists := newVars[key(e)]; exists {
continue
}
result = append(result, e)
}
return result
}