/
main.go
139 lines (121 loc) · 3.52 KB
/
main.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
package main
import (
"context"
"fmt"
"io/ioutil"
"log"
"os"
"sync"
"github.com/docker/docker/client"
"github.com/komuw/meli/api"
"github.com/komuw/meli/cli"
"gopkg.in/yaml.v2"
)
/* DOCS:
1. https://godoc.org/github.com/moby/moby/client
2. https://docs.docker.com/engine/api/v1.31/
*/
var version string
func main() {
showVersion, followLogs, dockerComposeFile := cli.Cli()
if showVersion {
log.Println("Meli version: ", version)
os.Exit(0)
}
data, err := ioutil.ReadFile(dockerComposeFile)
if err != nil {
log.Fatal(err, " :unable to read docker-compose file")
}
var dockerCyaml api.DockerComposeConfig
err = yaml.Unmarshal([]byte(data), &dockerCyaml)
if err != nil {
log.Fatal(err, " :unable to parse docker-compose file contents")
}
ctx := context.Background()
cli, err := client.NewEnvClient()
if err != nil {
log.Fatal(err, " :unable to intialize docker client")
}
defer cli.Close()
curentDir, err := os.Getwd()
if err != nil {
log.Fatal(err, " :unable to get the current working directory")
}
networkName := "meli_network_" + api.GetCwdName(curentDir)
networkID, err := api.GetNetwork(ctx, networkName, cli)
if err != nil {
log.Fatal(err, " :unable to create/get network")
}
api.GetAuth()
// Create top level volumes, if any
if len(dockerCyaml.Volumes) > 0 {
for k := range dockerCyaml.Volumes {
// TODO we need to synchronise here else we'll get a race
// but I think we can get away for now because:
// 1. there are on average a lot more containers in a compose file
// than volumes, so the sync in the for loop for containers is enough
// 2. since we intend to stream logs as containers run(see; issues/24);
// then meli will be up long enough for the volume creation goroutines to have finished.
go api.CreateDockerVolume(ctx, cli, "meli_"+k, "local", os.Stdout)
}
}
var wg sync.WaitGroup
for k, v := range dockerCyaml.Services {
wg.Add(1)
v.Labels = append(v.Labels, fmt.Sprintf("meli_service=meli_%s", k))
dc := &api.DockerContainer{
ServiceName: k,
ComposeService: v,
NetworkID: networkID,
NetworkName: networkName,
FollowLogs: followLogs,
DockerComposeFile: dockerComposeFile,
LogMedium: os.Stdout}
go startComposeServices(ctx, cli, &wg, dc)
}
wg.Wait()
}
func startComposeServices(ctx context.Context, cli *client.Client, wg *sync.WaitGroup, dc *api.DockerContainer) {
defer wg.Done()
/*
1. Pull Image
2. Create a container
3. Connect container to network
4. Start container
5. Stream container logs
*/
if len(dc.ComposeService.Image) > 0 {
err := api.PullDockerImage(ctx, cli, dc)
if err != nil {
// clean exit since we want other goroutines for fetching other images
// to continue running
log.Printf("\n\t service=%s error=%s", dc.ServiceName, err)
return
}
}
alreadyCreated, _, err := api.CreateContainer(ctx, cli, dc)
if err != nil {
// clean exit since we want other goroutines for fetching other images
// to continue running
log.Printf("\n\t service=%s error=%s", dc.ServiceName, err)
return
}
if !alreadyCreated {
err = api.ConnectNetwork(ctx, cli, dc)
if err != nil {
// create whitespace so that error is visible to human
log.Printf("\n\t service=%s error=%s", dc.ServiceName, err)
return
}
}
err = api.ContainerStart(ctx, cli, dc)
if err != nil {
log.Printf("\n\t service=%s error=%s", dc.ServiceName, err)
return
}
err = api.ContainerLogs(ctx, cli, dc)
if err != nil {
log.Printf("\n\t service=%s error=%s", dc.ServiceName, err)
return
}
}