使用ConfigMap作为容器的配置文件

构建demo镜像

自己制作个镜像方便测试。

go mod init k8s-configmap-demo

main.go

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
package main

import (
"fmt"
"net/http"

"github.com/gin-gonic/gin"
"github.com/spf13/pflag"
"github.com/spf13/viper"
)

var (
conf = pflag.StringP("config", "c", "", "config filepath")
)


type Config struct {
Name string
}

// 对外的初始化配置方法
func configRun(cfg string) error {
c := Config{
Name: cfg,
}

if err := c.init(); err != nil {
return err
}

return nil
}

func (c *Config) init() error {
if c.Name != "" {
viper.SetConfigFile(c.Name)
} else {
// 默认配置文件是./config.yaml
viper.AddConfigPath(".")
viper.SetConfigName("config")
}

viper.SetConfigType("yaml")
// viper解析配置文件
err := viper.ReadInConfig()
if err != nil {
panic(fmt.Errorf("Fatal error config file: %s \n", err))
}


return nil
}

func main() {
pflag.Parse()

// 初始化配置
if err := configRun(*conf); err != nil {
panic(err)
}


gin.SetMode(viper.GetString("mode"))
g := gin.New()
g = LoadRoute(g)

g.Run(viper.GetString("addr"))
}

func LoadRoute(g *gin.Engine) *gin.Engine {
g.Use(gin.Recovery())
// 404
g.NoRoute(func (c *gin.Context) {
c.String(http.StatusNotFound, "404 not found");
})

g.GET("/", Index)

return g
}

// 返回
type Response struct {
Code int `json:"code"`
Message string `json:"message"`
Data interface{} `json:"data"`
}

// api返回结构
func ApiResponse(c *gin.Context, code int, message string, data interface{}) {
c.JSON(http.StatusOK, Response{
Code: code,
Message: message,
Data: data,
})
}

func Index(c *gin.Context) {
ApiResponse(c, 0, "success", viper.GetString("hi"))
}

代码比较简单,读取config,运行一个gin http服务,返回配置项[hi]的字符串。

config.yaml

1
2
3
4
name: demo2
mode: debug
addr: :8080
hi: w~o~w

Dockerfile

1
2
3
4
5
6
7
FROM golang:alpine
RUN mkdir /app
WORKDIR /app
COPY ./hi /app
COPY ./config.yaml /app
RUN chmod +x hi
CMD ["./hi"]

构建:

1
2
docker build -t app:0.2 .
docker tag sai:0.2 13sai/sai:0.1

提交镜像到hub

1
2
3
docker login

docker push 13sai/sai:0.1

k8s的配置我使用的是Kuboard,下面的操作都是在Kuboard执行

ConfigMap配置

创建ConfigMap:

进入default空间 > 资源 > 配置字典:

名称 sai
配置数据名称 config.yaml
配置内容 config.yaml里的内容

创建负载

配置可参照下面截图,注意几处:

  • 数据卷 Volume,选择configMap
  • 运行容器组pod的Command和挂载点

负载配置

保存后即可。

crd

最后贴出生成的yaml文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
---
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: '10'
k8s.kuboard.cn/ingress: 'false'
k8s.kuboard.cn/service: NodePort
k8s.kuboard.cn/workload: web-test
creationTimestamp: '2021-06-25T02:30:05Z'
generation: 11
labels:
k8s.kuboard.cn/layer: web
k8s.kuboard.cn/name: web-test
managedFields:
- apiVersion: apps/v1
fieldsType: FieldsV1
fieldsV1:
'f:metadata':
'f:annotations':
.: {}
'f:k8s.kuboard.cn/ingress': {}
'f:k8s.kuboard.cn/service': {}
'f:k8s.kuboard.cn/workload': {}
'f:labels':
.: {}
'f:k8s.kuboard.cn/layer': {}
'f:k8s.kuboard.cn/name': {}
'f:spec':
'f:progressDeadlineSeconds': {}
'f:replicas': {}
'f:revisionHistoryLimit': {}
'f:selector':
'f:matchLabels':
.: {}
'f:k8s.kuboard.cn/layer': {}
'f:k8s.kuboard.cn/name': {}
'f:strategy':
'f:rollingUpdate':
.: {}
'f:maxSurge': {}
'f:maxUnavailable': {}
'f:type': {}
'f:template':
'f:metadata':
'f:annotations':
.: {}
'f:kubectl.kubernetes.io/restartedAt': {}
'f:labels':
.: {}
'f:k8s.kuboard.cn/layer': {}
'f:k8s.kuboard.cn/name': {}
'f:spec':
'f:containers':
'k:{"name":"test1"}':
.: {}
'f:command': {}
'f:image': {}
'f:imagePullPolicy': {}
'f:lifecycle': {}
'f:name': {}
'f:resources': {}
'f:terminationMessagePath': {}
'f:terminationMessagePolicy': {}
'f:volumeMounts':
.: {}
'k:{"mountPath":"/app/config.yaml"}':
.: {}
'f:mountPath': {}
'f:name': {}
'f:subPath': {}
'f:dnsConfig': {}
'f:dnsPolicy': {}
'f:restartPolicy': {}
'f:schedulerName': {}
'f:securityContext':
.: {}
'f:seLinuxOptions': {}
'f:serviceAccount': {}
'f:serviceAccountName': {}
'f:terminationGracePeriodSeconds': {}
'f:volumes':
.: {}
'k:{"name":"config"}':
.: {}
'f:configMap':
.: {}
'f:defaultMode': {}
'f:name': {}
'f:name': {}
manager: Mozilla
operation: Update
time: '2021-06-25T02:52:22Z'
- apiVersion: apps/v1
fieldsType: FieldsV1
fieldsV1:
'f:metadata':
'f:annotations':
'f:deployment.kubernetes.io/revision': {}
'f:status':
'f:availableReplicas': {}
'f:conditions':
'k:{"type":"Available"}':
'f:lastTransitionTime': {}
'f:lastUpdateTime': {}
'f:message': {}
'f:reason': {}
'f:status': {}
'k:{"type":"Progressing"}':
'f:lastUpdateTime': {}
'f:message': {}
'f:reason': {}
'f:observedGeneration': {}
'f:readyReplicas': {}
'f:replicas': {}
'f:updatedReplicas': {}
manager: kube-controller-manager
operation: Update
time: '2021-06-25T06:56:11Z'
name: web-test
namespace: default
resourceVersion: '372632'
selfLink: /apis/apps/v1/namespaces/default/deployments/web-test
uid: 31fbb0dc-9862-4f1a-b8c0-e6a5a307e34c
spec:
progressDeadlineSeconds: 600
replicas: 3
revisionHistoryLimit: 10
selector:
matchLabels:
k8s.kuboard.cn/layer: web
k8s.kuboard.cn/name: web-test
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
annotations:
kubectl.kubernetes.io/restartedAt: '2021-06-25T10:52:22+08:00'
creationTimestamp: null
labels:
k8s.kuboard.cn/layer: web
k8s.kuboard.cn/name: web-test
spec:
containers:
- command:
- /app/hi
image: '13sai/k8s-configmap-demo:0.1'
imagePullPolicy: Always
lifecycle: {}
name: test1
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /app/config.yaml
name: config
subPath: config.yaml
dnsConfig: {}
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
seLinuxOptions: {}
serviceAccount: default
serviceAccountName: default
terminationGracePeriodSeconds: 30
volumes:
- configMap:
defaultMode: 420
name: sai
name: config

---
apiVersion: v1
kind: Service
metadata:
annotations:
k8s.kuboard.cn/workload: web-test
creationTimestamp: '2021-06-25T02:32:38Z'
labels:
k8s.kuboard.cn/layer: web
k8s.kuboard.cn/name: web-test
managedFields:
- apiVersion: v1
fieldsType: FieldsV1
fieldsV1:
'f:metadata':
'f:annotations':
.: {}
'f:k8s.kuboard.cn/workload': {}
'f:labels':
.: {}
'f:k8s.kuboard.cn/layer': {}
'f:k8s.kuboard.cn/name': {}
'f:spec':
'f:externalTrafficPolicy': {}
'f:ports':
.: {}
'k:{"port":80,"protocol":"TCP"}':
.: {}
'f:name': {}
'f:port': {}
'f:protocol': {}
'f:targetPort': {}
'f:selector':
.: {}
'f:k8s.kuboard.cn/layer': {}
'f:k8s.kuboard.cn/name': {}
'f:sessionAffinity': {}
'f:type': {}
manager: Mozilla
operation: Update
time: '2021-06-25T02:37:44Z'
name: web-test
namespace: default
resourceVersion: '346740'
selfLink: /api/v1/namespaces/default/services/web-test
uid: 21ae5d1d-3a52-496d-8636-1a96b0256ddf
spec:
clusterIP: 10.110.30.213
externalTrafficPolicy: Cluster
ports:
- name: rsxstz
nodePort: 32286
port: 80
protocol: TCP
targetPort: 80
selector:
k8s.kuboard.cn/layer: web
k8s.kuboard.cn/name: web-test
sessionAffinity: None
type: NodePort