Skip to content

Commit 008ddbb

Browse files
authored
Merge pull request #764 from Mirantis/ivan4th/block-pv-e2e
Add e2e test for block PVs
2 parents 4d3ad75 + 2ee9550 commit 008ddbb

File tree

10 files changed

+572
-195
lines changed

10 files changed

+572
-195
lines changed

.circleci/config.yml

+3
Original file line numberDiff line numberDiff line change
@@ -175,6 +175,9 @@ e2e: &e2e
175175
NO_VM_CONSOLE=1 \
176176
INJECT_LOCAL_IMAGE=1 \
177177
VIRTLET_DEMO_BRANCH=master \
178+
ENABLE_CEPH=1 \
179+
FEATURE_GATES="BlockVolume=true" \
180+
KUBELET_FEATURE_GATES="BlockVolume=true" \
178181
BASE_LOCATION="$PWD" \
179182
deploy/demo.sh
180183
- run:

docs/design-proposals/pv.md

+6-1
Original file line numberDiff line numberDiff line change
@@ -412,7 +412,12 @@ MON_IP=$(docker exec kube-master route | grep default | awk '{print $2}')
412412
CEPH_PUBLIC_NETWORK=${MON_IP}/16
413413
docker run -d --net=host -e MON_IP=${MON_IP} \
414414
-e CEPH_PUBLIC_NETWORK=${CEPH_PUBLIC_NETWORK} \
415-
--name ceph_cluster docker.io/ceph/demo
415+
-e CEPH_DEMO_UID=foo \
416+
-e CEPH_DEMO_ACCESS_KEY=foo \
417+
-e CEPH_DEMO_SECRET_KEY=foo \
418+
-e CEPH_DEMO_BUCKET=foo \
419+
-e DEMO_DAEMONS="osd mds" \
420+
--name ceph_cluster docker.io/ceph/daemon demo
416421
```
417422

418423
Create a pool there:

tests/e2e/block_pv_test.go

+95
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,95 @@
1+
/*
2+
Copyright 2018 Mirantis
3+
4+
Licensed under the Apache License, Version 2.0 (the "License");
5+
you may not use this file except in compliance with the License.
6+
You may obtain a copy of the License at
7+
8+
http://www.apache.org/licenses/LICENSE-2.0
9+
10+
Unless required by applicable law or agreed to in writing, software
11+
distributed under the License is distributed on an "AS IS" BASIS,
12+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
See the License for the specific language governing permissions and
14+
limitations under the License.
15+
*/
16+
17+
package e2e
18+
19+
import (
20+
"github.com/Mirantis/virtlet/tests/e2e/framework"
21+
. "github.com/Mirantis/virtlet/tests/e2e/ginkgo-ext"
22+
)
23+
24+
var _ = Describe("Block PVs", func() {
25+
var (
26+
vm *framework.VMInterface
27+
ssh framework.Executor
28+
)
29+
30+
Context("[Local]", func() {
31+
var (
32+
virtletNodeName string
33+
devPath string
34+
)
35+
36+
withLoopbackBlockDevice(&virtletNodeName, &devPath)
37+
38+
AfterEach(func() {
39+
if ssh != nil {
40+
ssh.Close()
41+
}
42+
if vm != nil {
43+
deleteVM(vm)
44+
}
45+
})
46+
47+
It("Should be accessible from within the VM", func() {
48+
vm = makeVMWithMountAndSymlinkScript(virtletNodeName, []framework.PVCSpec{
49+
{
50+
Name: "block-pv",
51+
Size: "10M",
52+
NodeName: virtletNodeName,
53+
Block: true,
54+
LocalPath: devPath,
55+
ContainerPath: "/dev/testpvc",
56+
},
57+
}, nil)
58+
ssh = waitSSH(vm)
59+
expectToBeUsableForFilesystem(ssh, "/dev/testpvc")
60+
})
61+
})
62+
63+
Context("[Ceph RBD]", func() {
64+
var monitorIP string
65+
withCeph(&monitorIP, nil, "ceph-admin")
66+
67+
AfterEach(func() {
68+
if ssh != nil {
69+
ssh.Close()
70+
}
71+
if vm != nil {
72+
deleteVM(vm)
73+
}
74+
})
75+
76+
// FIXME: the test is marked Disruptive because rbd
77+
// hangs on CircleCI for some reason.
78+
It("[Disruptive] Should be accessible from within the VM", func() {
79+
vm = makeVMWithMountAndSymlinkScript("", []framework.PVCSpec{
80+
{
81+
Name: "block-pv",
82+
Size: "10M",
83+
Block: true,
84+
CephRBDImage: "rbd-test-image1",
85+
CephMonitorIP: monitorIP,
86+
CephRBDPool: "libvirt-pool",
87+
CephSecretName: "ceph-admin",
88+
ContainerPath: "/dev/testpvc",
89+
},
90+
}, nil)
91+
ssh = waitSSH(vm)
92+
expectToBeUsableForFilesystem(ssh, "/dev/testpvc")
93+
})
94+
})
95+
})

tests/e2e/ceph_test.go

+16-139
Original file line numberDiff line numberDiff line change
@@ -22,30 +22,18 @@ import (
2222

2323
. "github.com/onsi/gomega"
2424
"k8s.io/api/core/v1"
25-
"k8s.io/apimachinery/pkg/api/resource"
26-
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2725

2826
"github.com/Mirantis/virtlet/tests/e2e/framework"
2927
. "github.com/Mirantis/virtlet/tests/e2e/ginkgo-ext"
3028
)
3129

32-
const cephContainerName = "ceph_cluster"
33-
3430
var _ = Describe("Ceph volumes tests", func() {
3531
var (
3632
monitorIP string
3733
secret string
3834
)
3935

40-
BeforeAll(func() {
41-
monitorIP, secret = setupCeph()
42-
})
43-
44-
AfterAll(func() {
45-
container, err := controller.DockerContainer(cephContainerName)
46-
Expect(err).NotTo(HaveOccurred())
47-
container.Delete()
48-
})
36+
withCeph(&monitorIP, &secret, "")
4937

5038
Context("RBD volumes", func() {
5139
var (
@@ -87,7 +75,7 @@ var _ = Describe("Ceph volumes tests", func() {
8775
scheduleWaitSSH(&vm, &ssh)
8876

8977
It("Must be accessible from within OS", func() {
90-
checkFilesystemAccess(ssh)
78+
expectToBeUsableForFilesystem(ssh, "/dev/vdb")
9179
})
9280
})
9381
})
@@ -98,147 +86,41 @@ var _ = Describe("Ceph volumes tests", func() {
9886
)
9987

10088
BeforeAll(func() {
101-
pv := &v1.PersistentVolume{
102-
ObjectMeta: metav1.ObjectMeta{
103-
Name: "rbd-pv-virtlet",
104-
},
105-
Spec: v1.PersistentVolumeSpec{
106-
Capacity: v1.ResourceList{
107-
v1.ResourceStorage: resource.MustParse("10M"),
108-
},
109-
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
110-
PersistentVolumeSource: v1.PersistentVolumeSource{
111-
FlexVolume: cephPersistentVolumeSource("rbd-test-image-pv", monitorIP, secret),
112-
},
113-
},
114-
}
115-
do(controller.PersistentVolumesClient().Create(pv))
116-
117-
pvc := &v1.PersistentVolumeClaim{
118-
ObjectMeta: metav1.ObjectMeta{
119-
Name: "rbd-claim",
120-
},
121-
Spec: v1.PersistentVolumeClaimSpec{
122-
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
123-
Resources: v1.ResourceRequirements{
124-
Requests: v1.ResourceList{
125-
v1.ResourceStorage: resource.MustParse("10M"),
126-
},
127-
},
128-
},
129-
}
130-
do(controller.PersistentVolumeClaimsClient().Create(pvc))
131-
13289
vm = controller.VM("cirros-vm-rbd-pv")
133-
podCustomization := func(pod *framework.PodInterface) {
134-
pod.Pod.Spec.Volumes = append(pod.Pod.Spec.Volumes, v1.Volume{
135-
Name: "test",
136-
VolumeSource: v1.VolumeSource{
137-
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
138-
ClaimName: "rbd-claim",
139-
},
90+
opts := VMOptions{
91+
PVCs: []framework.PVCSpec{
92+
{
93+
Name: "rbd-pv-virtlet",
94+
Size: "10M",
95+
FlexVolumeOptions: cephOptions("rbd-test-image-pv", monitorIP, secret),
14096
},
141-
})
142-
}
143-
144-
Expect(vm.CreateAndWait(VMOptions{}.ApplyDefaults(), time.Minute*5, podCustomization)).To(Succeed())
97+
},
98+
}.ApplyDefaults()
99+
Expect(vm.CreateAndWait(opts, time.Minute*5, nil)).To(Succeed())
145100
_ = do(vm.Pod()).(*framework.PodInterface)
146101
})
147102

148103
AfterAll(func() {
149104
deleteVM(vm)
150-
controller.PersistentVolumeClaimsClient().Delete("rbd-claim", nil)
151-
controller.PersistentVolumesClient().Delete("rbd-pv-virtlet", nil)
152105
})
153106

154107
It("Must be attached to libvirt domain", func() {
155108
out := do(vm.VirshCommand("domblklist", "<domain>")).(string)
156109
Expect(regexp.MustCompile("(?m:rbd-test-image-pv$)").MatchString(out)).To(BeTrue())
157110
})
158111

159-
Context("Mounted volumes", func() {
160-
var ssh framework.Executor
161-
scheduleWaitSSH(&vm, &ssh)
162-
163-
It("Must be accessible from within OS", func() {
164-
checkFilesystemAccess(ssh)
165-
})
112+
It("Must be accessible from within the VM", func() {
113+
ssh := waitSSH(vm)
114+
expectToBeUsableForFilesystem(ssh, "/dev/vdb")
166115
})
167116
})
168117
})
169118

170-
func checkFilesystemAccess(ssh framework.Executor) {
171-
do(framework.RunSimple(ssh, "sudo /usr/sbin/mkfs.ext2 /dev/vdb"))
172-
do(framework.RunSimple(ssh, "sudo mount /dev/vdb /mnt"))
173-
out := do(framework.RunSimple(ssh, "ls -l /mnt")).(string)
174-
Expect(out).To(ContainSubstring("lost+found"))
175-
}
176-
177-
func setupCeph() (string, string) {
178-
nodeExecutor, err := controller.DinDNodeExecutor("kube-master")
179-
Expect(err).NotTo(HaveOccurred())
180-
181-
route, err := framework.RunSimple(nodeExecutor, "route", "-n")
182-
Expect(err).NotTo(HaveOccurred())
183-
184-
match := regexp.MustCompile(`(?:default|0\.0\.0\.0)\s+([\d.]+)`).FindStringSubmatch(route)
185-
Expect(match).To(HaveLen(2))
186-
187-
monIP := match[1]
188-
cephPublicNetwork := monIP + "/16"
189-
190-
container, err := controller.DockerContainer(cephContainerName)
191-
Expect(err).NotTo(HaveOccurred())
192-
193-
container.Delete()
194-
Expect(container.PullImage("docker.io/ceph/daemon:v3.1.0-stable-3.1-mimic-centos-7")).To(Succeed())
195-
Expect(container.Run("docker.io/ceph/daemon:v3.1.0-stable-3.1-mimic-centos-7",
196-
map[string]string{
197-
"MON_IP": monIP,
198-
"CEPH_PUBLIC_NETWORK": cephPublicNetwork,
199-
"CEPH_DEMO_UID": "foo",
200-
"CEPH_DEMO_ACCESS_KEY": "foo",
201-
"CEPH_DEMO_SECRET_KEY": "foo",
202-
"CEPH_DEMO_BUCKET": "foo",
203-
"DEMO_DAEMONS": "osd mds",
204-
},
205-
"host", nil, false, "demo")).To(Succeed())
206-
207-
cephContainerExecutor := container.Executor(false, "")
208-
By("Waiting for ceph cluster")
209-
Eventually(func() error {
210-
_, err := framework.RunSimple(cephContainerExecutor, "ceph", "-s")
211-
return err
212-
}).Should(Succeed())
213-
By("Ceph cluster started")
214-
215-
var out string
216-
commands := []string{
217-
// Adjust ceph configs
218-
`echo -e "rbd default features = 1\nrbd default format = 2" >> /etc/ceph/ceph.conf`,
219-
220-
// Add rbd pool and volume
221-
`ceph osd pool create libvirt-pool 8 8`,
222-
`rbd create rbd-test-image1 --size 10M --pool libvirt-pool --image-feature layering`,
223-
`rbd create rbd-test-image2 --size 10M --pool libvirt-pool --image-feature layering`,
224-
`rbd create rbd-test-image-pv --size 10M --pool libvirt-pool --image-feature layering`,
225-
226-
// Add user for virtlet
227-
`ceph auth get-or-create client.libvirt`,
228-
`ceph auth caps client.libvirt mon "allow *" osd "allow *"`,
229-
`ceph auth get-key client.libvirt`,
230-
}
231-
for _, cmd := range commands {
232-
out = do(framework.RunSimple(cephContainerExecutor, "/bin/bash", "-c", cmd)).(string)
233-
}
234-
return monIP, out
235-
}
236-
237119
func cephOptions(volume, monitorIP, secret string) map[string]string {
238120
return map[string]string{
239121
"type": "ceph",
240122
"monitor": monitorIP + ":6789",
241-
"user": "libvirt",
123+
"user": "admin",
242124
"secret": secret,
243125
"volume": volume,
244126
"pool": "libvirt-pool",
@@ -252,9 +134,4 @@ func cephVolumeSource(volume, monitorIP, secret string) *v1.FlexVolumeSource {
252134
}
253135
}
254136

255-
func cephPersistentVolumeSource(volume, monitorIP, secret string) *v1.FlexPersistentVolumeSource {
256-
return &v1.FlexPersistentVolumeSource{
257-
Driver: "virtlet/flexvolume_driver",
258-
Options: cephOptions(volume, monitorIP, secret),
259-
}
260-
}
137+
// TODO: use client.admin instead of client.libvirt

0 commit comments

Comments
 (0)