Skip to content

Commit ccce821

Browse files
Tomclifplk
authored andcommitted
Make helm charts and scripts compatible to deploy FfDL on any namespace (#110)
* make helm charts and scripts compatible to deploy FfDL on any namespace * allow users to export all the enviornment variables in a txt file * Update readme with new notice * Fix typo * Update static volumes config v2 namespace parameter * capitalize NAMESPACE, update Makefile, developer guide, and trobleshooting.
1 parent cc7388a commit ccce821

38 files changed

+157
-89
lines changed

Makefile

Lines changed: 16 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@ CLUSTER_NAME ?= mycluster
3232
PUBLIC_IP ?= 127.0.0.1
3333
CI_MINIKUBE_VERSION ?= v0.25.1
3434
CI_KUBECTL_VERSION ?= v1.9.4
35+
NAMESPACE ?= default
3536

3637
AWS_ACCESS_KEY_ID ?= test
3738
AWS_SECRET_ACCESS_KEY ?= test
@@ -99,6 +100,7 @@ docker-push:
99100
# TODO: setup-registry
100101

101102
create-registry:
103+
@kubectl config set-context $$(kubectl config current-context) --namespace=$$NAMESPACE
102104
@kubectl create secret docker-registry regcred --docker-server=${DOCKER_REPO} --docker-username="${DOCKER_REPO_USER}" --docker-password="${DOCKER_REPO_PASS}" [email protected] ; \
103105
cd ${DOCKER_REPO_DIR} ; \
104106
docker-compose up -d
@@ -125,20 +127,21 @@ deploy-plugin:
125127
done; \
126128
fi;
127129
@existingPlugin=$$(helm list | grep ibmcloud-object-storage-plugin | awk '{print $$1}' | head -n 1);
130+
@kubectl config set-context $$(kubectl config current-context) --namespace=$$NAMESPACE
128131
@if [ "$(VM_TYPE)" = "dind" ]; then \
129132
export FFDL_PATH=$$(pwd); \
130133
./bin/s3_driver.sh; \
131134
sleep 10; \
132135
(if [ -z "$$existingPlugin" ]; then \
133-
helm install --set dind=true,cloud=false storage-plugin; \
136+
helm install --set dind=true,cloud=false,namespace=$$NAMESPACE storage-plugin; \
134137
else \
135-
helm upgrade --set dind=true,cloud=false $$existingPlugin storage-plugin; \
138+
helm upgrade --set dind=true,cloud=false,namespace=$$NAMESPACE $$existingPlugin storage-plugin; \
136139
fi) & pid=$$!; \
137140
else \
138141
(if [ -z "$$existingPlugin" ]; then \
139-
helm install storage-plugin; \
142+
helm install --set namespace=$$NAMESPACE storage-plugin; \
140143
else \
141-
helm upgrade $$existingPlugin storage-plugin; \
144+
helm upgrade --set namespace=$$NAMESPACE $$existingPlugin storage-plugin; \
142145
fi) & pid=$$!; \
143146
fi;
144147
@echo "Wait while kubectl get pvc shows static-volume-1 in state Pending"
@@ -148,6 +151,7 @@ deploy-plugin:
148151

149152
quickstart-deploy:
150153
@echo "collecting existing pods"
154+
@kubectl config set-context $$(kubectl config current-context) --namespace=$$NAMESPACE
151155
@while kubectl get pods --all-namespaces | \
152156
grep -v RESTARTS | \
153157
grep -v Running | \
@@ -161,11 +165,11 @@ quickstart-deploy:
161165
existing=$$(helm list | grep ffdl | awk '{print $$1}' | head -n 1); \
162166
(if [ -z "$$existing" ]; then \
163167
echo "Deploying the stack via Helm. This will take a while."; \
164-
helm install --set lcm.shared_volume_storage_class=$$SHARED_VOLUME_STORAGE_CLASS . ; \
168+
helm install --set lcm.shared_volume_storage_class=$$SHARED_VOLUME_STORAGE_CLASS,namespace=$$NAMESPACE . ; \
165169
sleep 10; \
166170
else \
167171
echo "Upgrading existing Helm deployment ($$existing). This will take a while."; \
168-
helm upgrade --set lcm.shared_volume_storage_class=$$SHARED_VOLUME_STORAGE_CLASS $$existing . ; \
172+
helm upgrade --set lcm.shared_volume_storage_class=$$SHARED_VOLUME_STORAGE_CLASS,namespace=$$NAMESPACE $$existing . ; \
169173
fi) & pid=$$!; \
170174
sleep 5; \
171175
while kubectl get pods --all-namespaces | \
@@ -208,6 +212,7 @@ quickstart-deploy:
208212
test-job-submit: ## Submit test training job
209213
@# make sure the buckets with training data exist
210214
@echo Downloading Docker images and test training data. This may take a while.
215+
@kubectl config set-context $$(kubectl config current-context) --namespace=$$NAMESPACE
211216
@if [ "$(VM_TYPE)" = "minikube" ]; then \
212217
eval $(minikube docker-env); docker images | grep tensorflow | grep latest > /dev/null || docker pull tensorflow/tensorflow > /dev/null; \
213218
fi
@@ -253,6 +258,7 @@ deploy: ## Deploy the services to Kubernetes
253258
sleep 3; \
254259
fi;
255260
@echo collecting existing pods
261+
@kubectl config set-context $$(kubectl config current-context) --namespace=$$NAMESPACE
256262
@while kubectl get pods --all-namespaces | \
257263
grep -v RESTARTS | \
258264
grep -v Running | \
@@ -272,9 +278,9 @@ deploy: ## Deploy the services to Kubernetes
272278
cp -rf Chart.yaml values.yaml templates ${HELM_DEPLOY_DIR}; \
273279
existing=$$(helm list | grep ffdl | awk '{print $$1}' | head -n 1); \
274280
if [ "$$CI" = "true" ]; then \
275-
export helm_params='--set lcm.shared_volume_storage_class=${SHARED_VOLUME_STORAGE_CLASS},has_static_volumes=${HAS_STATIC_VOLUMES},prometheus.deploy=false,learner.docker_namespace=${DOCKER_NAMESPACE},docker.namespace=${DOCKER_NAMESPACE},learner.tag=${IMAGE_TAG},docker.pullPolicy=${DOCKER_PULL_POLICY},docker.registry=${DOCKER_REPO},trainer.version=${IMAGE_TAG},restapi.version=${IMAGE_TAG},lcm.version=${IMAGE_TAG},trainingdata.version=${IMAGE_TAG},databroker.tag=${IMAGE_TAG},databroker.version=${IMAGE_TAG},webui.version=${IMAGE_TAG}'; \
281+
export helm_params='--set lcm.shared_volume_storage_class=${SHARED_VOLUME_STORAGE_CLASS},has_static_volumes=${HAS_STATIC_VOLUMES},namespace=${NAMESPACE},prometheus.deploy=false,learner.docker_namespace=${DOCKER_NAMESPACE},docker.namespace=${DOCKER_NAMESPACE},learner.tag=${IMAGE_TAG},docker.pullPolicy=${DOCKER_PULL_POLICY},docker.registry=${DOCKER_REPO},trainer.version=${IMAGE_TAG},restapi.version=${IMAGE_TAG},lcm.version=${IMAGE_TAG},trainingdata.version=${IMAGE_TAG},databroker.tag=${IMAGE_TAG},databroker.version=${IMAGE_TAG},webui.version=${IMAGE_TAG}'; \
276282
else \
277-
export helm_params='--set lcm.shared_volume_storage_class=${SHARED_VOLUME_STORAGE_CLASS},has_static_volumes=${HAS_STATIC_VOLUMES},learner.docker_namespace=${DOCKER_NAMESPACE},docker.namespace=${DOCKER_NAMESPACE},learner.tag=${IMAGE_TAG},docker.pullPolicy=${DOCKER_PULL_POLICY},docker.registry=${DOCKER_REPO},trainer.version=${IMAGE_TAG},restapi.version=${IMAGE_TAG},lcm.version=${IMAGE_TAG},trainingdata.version=${IMAGE_TAG},databroker.tag=${IMAGE_TAG},databroker.version=${IMAGE_TAG},webui.version=${IMAGE_TAG}'; \
283+
export helm_params='--set lcm.shared_volume_storage_class=${SHARED_VOLUME_STORAGE_CLASS},has_static_volumes=${HAS_STATIC_VOLUMES},namespace=${NAMESPACE},learner.docker_namespace=${DOCKER_NAMESPACE},docker.namespace=${DOCKER_NAMESPACE},learner.tag=${IMAGE_TAG},docker.pullPolicy=${DOCKER_PULL_POLICY},docker.registry=${DOCKER_REPO},trainer.version=${IMAGE_TAG},restapi.version=${IMAGE_TAG},lcm.version=${IMAGE_TAG},trainingdata.version=${IMAGE_TAG},databroker.tag=${IMAGE_TAG},databroker.version=${IMAGE_TAG},webui.version=${IMAGE_TAG}'; \
278284
fi; \
279285
(if [ -z "$$existing" ]; then \
280286
echo "Deploying the stack via Helm. This will take a while."; \
@@ -448,6 +454,7 @@ docker-build-logcollectors:
448454
test-push-data-s3: ## Test
449455
@# Pushes test data to S3 buckets.
450456
@echo Pushing test data.
457+
@kubectl config set-context $$(kubectl config current-context) --namespace=$$NAMESPACE
451458
@s3_ip=$$(make --no-print-directory kubernetes-ip); \
452459
s3_port=$$(kubectl get service s3 -o jsonpath='{.spec.ports[0].nodePort}'); \
453460
s3_url=http://$$s3_ip:$$s3_port; \
@@ -557,6 +564,7 @@ test-submit: ## Submit test training job
557564
@if [ "$(VM_TYPE)" = "minikube" ]; then \
558565
eval $(minikube docker-env); docker images | grep tensorflow | grep latest > /dev/null || docker pull tensorflow/tensorflow > /dev/null; \
559566
fi
567+
@kubectl config set-context $$(kubectl config current-context) --namespace=$$NAMESPACE
560568
@node_ip=$$(make --no-print-directory kubernetes-ip); \
561569
s3_ip=$$(kubectl get po/storage-0 -o=jsonpath='{.status.hostIP}'); \
562570
s3_port=$$(kubectl get service s3 -o jsonpath='{.spec.ports[0].nodePort}'); \

README.md

Lines changed: 28 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -71,13 +71,21 @@ To know more about the architectural details, please read the [design document](
7171

7272
There are multiple installation paths for installing FfDL locally ("1-click-install") or into an existing Kubernetes cluster. You can visit [Step 5](#5-detailed-installation-instructions) for more details on the deployment instructions.
7373

74+
> If you are using bash shell, you can modify the necessary environment variables in `env.txt` and export all of them using the following commands
75+
> ```shell
76+
> source env.txt
77+
> export $(cut -d= -f1 env.txt)
78+
> ```
79+
7480
### 1.1 Installation using Kubeadm-DIND
7581
7682
If you have [Kubeadm-DIND](https://github.com/kubernetes-sigs/kubeadm-dind-cluster#using-preconfigured-scripts) installed on your machine, use these commands to deploy the FfDL platform:
7783
``` shell
7884
export VM_TYPE=dind
7985
export PUBLIC_IP=localhost
8086
export SHARED_VOLUME_STORAGE_CLASS="";
87+
export NAMESPACE=default # If your namespace does not exist yet, please create the namespace `kubectl create namespace $NAMESPACE` before running the make commands below
88+
8189
make deploy-plugin
8290
make quickstart-deploy
8391
```
@@ -91,9 +99,11 @@ then deploy the platform services:
9199
``` shell
92100
export VM_TYPE=none
93101
export PUBLIC_IP=<Cluster Public IP>
102+
export NAMESPACE=default # If your namespace does not exist yet, please create the namespace `kubectl create namespace $NAMESPACE` before running the make commands below
94103

95104
# Change the storage class to what's available on your Cloud Kubernetes Cluster.
96105
export SHARED_VOLUME_STORAGE_CLASS="ibmc-file-gold";
106+
97107
make deploy-plugin
98108
make quickstart-deploy
99109
```
@@ -130,29 +140,42 @@ kubectl get pods --all-namespaces | grep tiller-deploy
130140
```
131141

132142
2. Define the necessary environment variables.
143+
> If you are using bash shell, you can modify the necessary environment variables in `env.txt` and export all of them using the following commands
144+
> ```shell
145+
> source env.txt
146+
> export $(cut -d= -f1 env.txt)
147+
> ```
148+
133149
* 2.a. For Kubeadm-DIND Cluster only
134150
```shell
135151
export FFDL_PATH=$(pwd)
136152
export SHARED_VOLUME_STORAGE_CLASS=""
153+
export VM_TYPE=dind
154+
export PUBLIC_IP=localhost
155+
export NAMESPACE=default # If your namespace does not exist yet, please create the namespace `kubectl create namespace $NAMESPACE` before proceeding to the next step
137156
```
138157
139158
* 2.b. For Cloud Kubernetes Cluster
159+
> Note: If you are using IBM Cloud Cluster, you can obtain your k8s public ip using `bx cs workers <cluster-name>`.
160+
140161
```shell
141162
# Change the storage class to what's available on your Cloud Kubernetes Cluster.
142163
export SHARED_VOLUME_STORAGE_CLASS="ibmc-file-gold"
164+
export VM_TYPE=none
165+
export PUBLIC_IP=<Cluster Public IP>
166+
export NAMESPACE=default # If your namespace does not exist yet, please create the namespace `kubectl create namespace $NAMESPACE` before proceeding to the next step
143167
```
144168

145169
3. Install the Object Storage driver using helm install.
146170
* 3.a. For Kubeadm-DIND Cluster only
147171
```shell
148-
export FFDL_PATH=$(pwd)
149172
./bin/s3_driver.sh
150-
helm install storage-plugin --set dind=true,cloud=false
173+
helm install storage-plugin --set dind=true,cloud=false,namespace=$NAMESPACE
151174
```
152175

153176
* 3.b. For Cloud Kubernetes Cluster
154177
```shell
155-
helm install storage-plugin
178+
helm install storage-plugin --set namespace=$NAMESPACE
156179
```
157180

158181
4. Create a static volume to store any metadata from FfDL.
@@ -168,13 +191,14 @@ popd
168191
5. Now let's install all the necessary FfDL components using helm install.
169192

170193
``` shell
171-
helm install . --set lcm.shared_volume_storage_class=$SHARED_VOLUME_STORAGE_CLASS
194+
helm install . --set lcm.shared_volume_storage_class=$SHARED_VOLUME_STORAGE_CLASS,namespace=$NAMESPACE
172195
```
173196
> Note: If you want to upgrade an older version of FfDL, run
174197
> `helm upgrade $(helm list | grep ffdl | awk '{print $1}' | head -n 1) .`
175198
176199
Make sure all the FfDL components are installed and running before moving to the next step.
177200
``` shell
201+
kubectl config set-context $(kubectl config current-context) --namespace=$NAMESPACE
178202
kubectl get pods
179203
# NAME READY STATUS RESTARTS AGE
180204
# alertmanager-7cf6b988b9-h9q6q 1/1 Running 0 5h
@@ -207,22 +231,7 @@ s3_port=$(kubectl get service s3 -o jsonpath='{.spec.ports[0].nodePort}')
207231
```
208232

209233
7. Run the following commands to configure Grafana for monitoring FfDL using the logging information from prometheus.
210-
* 7.a. For Kubeadm-DIND Cluster only
211234
```shell
212-
export VM_TYPE=none
213-
export PUBLIC_IP=localhost
214-
215-
./bin/grafana.init.sh
216-
```
217-
218-
219-
* 7.b. For Cloud Kubernetes Cluster.
220-
> Note: If you are using IBM Cloud Cluster, you can obtain your k8s public ip using `bx cs workers <cluster-name>`.
221-
222-
``` shell
223-
export VM_TYPE=none
224-
export PUBLIC_IP=<Cluster Public IP>
225-
226235
./bin/grafana.init.sh
227236
```
228237

bin/create_static_volumes.sh

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,13 +13,15 @@
1313
SHARED_VOLUME_STORAGE_CLASS="${SHARED_VOLUME_STORAGE_CLASS:-""}"
1414

1515
volumeNum=${1:-1}
16+
Namespace=${Namespace:-default}
1617

1718
echo "Creating persistent volume claim $volumeNum"
1819
(kubectl apply -f - <<EOF
1920
kind: PersistentVolumeClaim
2021
apiVersion: v1
2122
metadata:
2223
name: static-volume-$volumeNum
24+
namespace: $Namespace
2325
annotations:
2426
volume.beta.kubernetes.io/storage-class: "$SHARED_VOLUME_STORAGE_CLASS"
2527
labels:

bin/create_static_volumes_config.sh

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -9,17 +9,18 @@
99

1010
CONFIGMAP_NAME=static-volumes
1111
volumeType=${1:-dlaas-static-volume}
12+
NAMESPACE=${NAMESPACE:-default}
1213

1314
# Delete configmap
14-
if kubectl get cm | grep static-volumes &> /dev/null; then kubectl delete configmap ${CONFIGMAP_NAME}; else echo "No need to delete ${CONFIGMAP_NAME} since it doesn't exist."; fi
15+
if kubectl get cm -n ${NAMESPACE} | grep static-volumes &> /dev/null; then kubectl delete configmap ${CONFIGMAP_NAME} -n ${NAMESPACE}; else echo "No need to delete ${CONFIGMAP_NAME} since it doesn't exist."; fi
1516

1617
# Create new configmap
1718
echo
1819
echo "Using volumes with label type=$volumeType:"
19-
kubectl get pvc --selector type=${volumeType}
20+
kubectl get pvc --selector type=${volumeType} -n ${NAMESPACE}
2021
echo
21-
kubectl create configmap ${CONFIGMAP_NAME} --from-file=PVCs.yaml=<(
22-
kubectl get pvc --selector type=${volumeType} -o yaml
22+
kubectl create configmap ${CONFIGMAP_NAME} -n ${NAMESPACE} --from-file=PVCs.yaml=<(
23+
kubectl get pvc --selector type=${volumeType} -n ${NAMESPACE} -o yaml
2324
)
2425

2526
CONFIGMAP_NAME2=static-volumes-v2
@@ -30,9 +31,9 @@ CONFIGMAP_NAME2=static-volumes-v2
3031
# Create new configmap
3132
echo
3233
echo "Using volumes with label type=$volumeType"
33-
kubectl get pvc --selector type=${volumeType}
34+
kubectl get pvc --selector type=${volumeType} -n ${NAMESPACE}
3435
echo
3536

36-
kubectl get pvc --selector type="dlaas-static-volume" -o jsonpath='{"static-volumes-v2:"}{range .items[*]}{"\n - name: "}{.metadata.name}{"\n zlabel: "}{.metadata.name}{"\n status: active\n"}' > PVCs-v2.yaml
37+
kubectl get pvc --selector type="dlaas-static-volume" -n ${NAMESPACE} -o jsonpath='{"static-volumes-v2:"}{range .items[*]}{"\n - name: "}{.metadata.name}{"\n zlabel: "}{.metadata.name}{"\n status: active\n"}' > PVCs-v2.yaml
3738

38-
kubectl create configmap ${CONFIGMAP_NAME2} --from-file=PVCs-v2.yaml
39+
kubectl create configmap ${CONFIGMAP_NAME2} -n ${NAMESPACE} --from-file=PVCs-v2.yaml

bin/grafana.init.sh

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,8 @@ fi
1717

1818
[ -z ${node_ip} ] && echo "Can't get node_ip for grafana, \$VM_TYPE == \"$VM_TYPE\""
1919

20-
grafana_port=$(kubectl get service grafana -o jsonpath='{.spec.ports[0].nodePort}')
20+
NAMESPACE=${NAMESPACE:-default}
21+
grafana_port=$(kubectl get service grafana -o jsonpath='{.spec.ports[0].nodePort}' -n $NAMESPACE)
2122
grafana_url="http://$node_ip:$grafana_port"
2223

2324
echo "wait until the grafana service is up (grafana_url=${grafana_url})"

docs/developer-guide.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@ export DOCKER_NAMESPACE=<NAMESPACE_ON_IBM_CLOUD> # Container Registry Namespace
3232
export DOCKER_PULL_POLICY=Always # Keep IfNotPresent if not pushing to registry, e.g. for Minikube
3333
export VM_TYPE=none
3434
export HAS_STATIC_VOLUMES=True
35+
export NAMESPACE=default # If your namespace does not exist yet, please create the namespace `kubectl create namespace $NAMESPACE` before proceeding to the next step
3536
```
3637

3738
Compile the code, generate certificates, and build the Docker images via:
@@ -50,6 +51,7 @@ make docker-push # Push built Docker images to registry, not used for Minikube
5051
Make sure `kubectl` points to the right target context/namespace, then deploy the services to your Kubernetes
5152
environment (using `helm`):
5253
```shell
54+
kubectl config set-context $(kubectl config current-context) --namespace=$NAMESPACE # Set your current-context to the FfDL namespace
5355
make create-volumes # Create static volumes for sharing across pods
5456
make deploy-plugin # Deploy S3 storage plugin
5557
make deploy # Deploy FfDL

docs/troubleshooting.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ Need to adapt tensorflow version in manifest to what is specified on https://git
3737

3838
# DIND
3939
## Deploy
40-
* ffdl-lcm, ffdl-restapi, ffdl-trainer, ffdl-trainingdata and ffdl-ui pods show ImagePullBackOff: See if Kubernetes secret regcred exists via `kubectl get secret | grep regcred`. If it does not (output empty), create it with `kubectl create secret docker-registry regcred --docker-server=${DOCKER_REPO} --docker-username=${DOCKER_REPO_USER} --docker-password=${DOCKER_REPO_PASS} [email protected]`.
40+
* ffdl-lcm, ffdl-restapi, ffdl-trainer, ffdl-trainingdata and ffdl-ui pods show ImagePullBackOff: See if Kubernetes secret regcred exists via `kubectl get secret | grep regcred`. If it does not (output empty), create it with `kubectl create secret docker-registry regcred --docker-server=${DOCKER_REPO} --docker-username=${DOCKER_REPO_USER} --docker-password=${DOCKER_REPO_PASS} [email protected] -n ${NAMESPACE}`.
4141

4242
## Training
43-
* If you start a job and `lhelper` and `jobmonitor` pods get to `Running` state, but the corresponding `learner` remains stuck in `ContainerCreating`, please take a look at `kubectl describe pod <learner-pod>`. It is possible that your storage configuration in your manifest is invalid and if so, you should see events that point out the issues.
43+
* If you start a job and `lhelper` and `jobmonitor` pods get to `Running` state, but the corresponding `learner` remains stuck in `ContainerCreating`, please take a look at `kubectl describe pod <learner-pod>`. It is possible that your storage configuration in your manifest is invalid and if so, you should see events that point out the issues.

env.txt

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
VM_TYPE=dind # Kubernetes deployment environment (dind or none)
2+
PUBLIC_IP=localhost # Kubernetes External IP
3+
SHARED_VOLUME_STORAGE_CLASS=""; # Storage Class available on your Kubernetes Cluster
4+
NAMESPACE=default # The namespace that you want to deploy FfDL on
5+
FFDL_PATH=$(pwd) # Current path of your FfDL directory

storage-plugin/templates/volume.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ apiVersion: v1
33
kind: PersistentVolume
44
metadata:
55
name: local-volume-1
6+
namespace: {{.Values.namespace}}
67
labels:
78
type: local
89
spec:

storage-plugin/values.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,3 +7,4 @@ image:
77
pullPolicy: IfNotPresent
88
dind: false
99
cloud: true
10+
namespace: default

templates/infrastructure/etcd.yml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ apiVersion: v1
22
kind: Service
33
metadata:
44
name: etcd
5+
namespace: {{.Values.namespace}}
56
spec:
67
ports:
78
- port: 2379
@@ -19,6 +20,7 @@ metadata:
1920
app: etcd
2021
etcd_node: etcd0
2122
name: etcd0
23+
namespace: {{.Values.namespace}}
2224
spec:
2325
containers:
2426
- command:
@@ -57,6 +59,7 @@ metadata:
5759
labels:
5860
etcd_node: etcd0
5961
name: etcd0
62+
namespace: {{.Values.namespace}}
6063
spec:
6164
ports:
6265
- name: client

templates/infrastructure/mongo.yml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ apiVersion: apps/v1beta1
22
kind: StatefulSet
33
metadata:
44
name: mongo
5+
namespace: {{.Values.namespace}}
56
spec:
67
serviceName: mongo
78
replicas: 1
@@ -34,6 +35,7 @@ apiVersion: v1
3435
kind: Service
3536
metadata:
3637
name: mongo
38+
namespace: {{.Values.namespace}}
3739
labels:
3840
environment: local
3941
spec:

templates/infrastructure/storage.yml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ apiVersion: v1
22
kind: Service
33
metadata:
44
name: s3
5+
namespace: {{.Values.namespace}}
56
spec:
67
{{ if .Values.services.expose_node_port }}
78
type: NodePort
@@ -19,6 +20,7 @@ apiVersion: v1
1920
kind: Service
2021
metadata:
2122
name: elasticsearch
23+
namespace: {{.Values.namespace}}
2224
labels:
2325
component: elasticsearch
2426
spec:
@@ -39,6 +41,7 @@ apiVersion: apps/v1beta1
3941
kind: StatefulSet
4042
metadata:
4143
name: storage
44+
namespace: {{.Values.namespace}}
4245
spec:
4346
serviceName: storage
4447
replicas: 1

0 commit comments

Comments
 (0)