diff --git a/admin/package-files/rsyslog/flocker.conf b/admin/package-files/rsyslog/flocker.conf index 5af23450b3..f511f6ac97 100644 --- a/admin/package-files/rsyslog/flocker.conf +++ b/admin/package-files/rsyslog/flocker.conf @@ -3,4 +3,3 @@ # should be discarded by rsyslog. if $programname == 'flocker-control' then ~ if $programname == 'flocker-dataset-agent' then ~ -if $programname == 'flocker-container-agent' then ~ diff --git a/admin/package-files/systemd/flocker-container-agent.service b/admin/package-files/systemd/flocker-container-agent.service deleted file mode 100644 index 9b31687c91..0000000000 --- a/admin/package-files/systemd/flocker-container-agent.service +++ /dev/null @@ -1,11 +0,0 @@ -[Unit] -Description=Flocker Container Agent -After=docker.service -Wants=docker.service - -[Service] -ExecStart=/usr/sbin/flocker-container-agent --journald -Restart=always - -[Install] -WantedBy=multi-user.target diff --git a/admin/package-files/upstart/flocker-container-agent.conf b/admin/package-files/upstart/flocker-container-agent.conf deleted file mode 100644 index 3711d12a71..0000000000 --- a/admin/package-files/upstart/flocker-container-agent.conf +++ /dev/null @@ -1,25 +0,0 @@ -# flocker-container-agent - flocker-container-agent job file - -description "Flocker Container Agent service" -author "ClusterHQ " - -start on runlevel [2345] -stop on runlevel [016] - -respawn - -pre-start script - if [ ! -r /etc/flocker/agent.yml ]; then - echo "Cannot read configuration file '/etc/flocker/agent.yml'." - exit 1 - fi - while [ ! -S /var/run/docker.sock ]; - do - echo "Cannot find Docker daemon (required)." - sleep 5 - done -end script - -script - exec /usr/sbin/flocker-container-agent --logfile=/var/log/flocker/flocker-container-agent.log -end script diff --git a/admin/packaging.py b/admin/packaging.py index 09d2227f68..696a5d6363 100644 --- a/admin/packaging.py +++ b/admin/packaging.py @@ -587,7 +587,6 @@ def __str__(self): # Upstart control files are not installed as conffiles. 'non-conffile-in-etc /etc/init/flocker-dataset-agent.conf', - 'non-conffile-in-etc /etc/init/flocker-container-agent.conf', 'non-conffile-in-etc /etc/init/flocker-control.conf', 'non-conffile-in-etc /etc/init/flocker-docker-plugin.conf', @@ -673,7 +672,6 @@ def __str__(self): # Upstart control files are not installed as conffiles. 'file-in-etc-not-marked-as-conffile etc/init/flocker-dataset-agent.conf', # noqa - 'file-in-etc-not-marked-as-conffile etc/init/flocker-container-agent.conf', # noqa 'file-in-etc-not-marked-as-conffile etc/init/flocker-control.conf', 'file-in-etc-not-marked-as-conffile etc/init/flocker-docker-plugin.conf', # noqa @@ -998,8 +996,6 @@ def omnibus_package_builder( flocker_node_path), (FilePath('/opt/flocker/bin/flocker-control'), flocker_node_path), - (FilePath('/opt/flocker/bin/flocker-container-agent'), - flocker_node_path), (FilePath('/opt/flocker/bin/flocker-dataset-agent'), flocker_node_path), (FilePath('/opt/flocker/bin/flocker-diagnostics'), diff --git a/admin/test/test_packaging.py b/admin/test/test_packaging.py index 3653a75d30..51f0d55b77 100644 --- a/admin/test/test_packaging.py +++ b/admin/test/test_packaging.py @@ -984,8 +984,6 @@ def fake_make_dependencies( flocker_node_path), (FilePath('/opt/flocker/bin/flocker-control'), flocker_node_path), - (FilePath('/opt/flocker/bin/flocker-container-agent'), - flocker_node_path), (FilePath('/opt/flocker/bin/flocker-dataset-agent'), flocker_node_path), (FilePath('/opt/flocker/bin/flocker-diagnostics'), diff --git a/build.yaml b/build.yaml index dd89e4c5e7..7d6117f63b 100644 --- a/build.yaml +++ b/build.yaml @@ -841,51 +841,6 @@ job_type: timeout: 30 directories_to_delete: *run_trial_directories_to_delete - run_trial_on_AWS_CentOS_7_flocker.node.functional.test_docker: - # FLOC-3903: docker on centos use loop-devmapper - # by default. That makes it much slower than Ubuntu - # with aufs. It leads to timeouts, but seems to do - # a bit better on the medium instance - on_nodes_with_labels: 'aws-centos-7-SELinux-T2Medium' - module: flocker.node.functional.test_docker - with_steps: - - { type: 'shell', cli: *run_trial_cli } - archive_artifacts: *flocker_artifacts - publish_test_results: true - coverage_report: true - clean_repo: true - # Increase the timeout due to FLOC-3903 - timeout: 45 - directories_to_delete: *run_trial_directories_to_delete - - # Split out just to do the CentOS version above, - # for the reasons outlined in that section - run_trial_on_AWS_Ubuntu_Trusty_flocker.node.functional.test_docker: - on_nodes_with_labels: 'aws-ubuntu-trusty-T2Medium' - module: flocker.node.functional.test_docker - with_steps: - - { type: 'shell', cli: *run_trial_cli } - archive_artifacts: *flocker_artifacts - publish_test_results: true - coverage_report: true - clean_repo: true - timeout: 30 - directories_to_delete: *run_trial_directories_to_delete - - # Split out just to do the CentOS version above, - # for the reasons outlined in that section - run_trial_on_AWS_Ubuntu_Xenial_flocker.node.functional.test_docker: - on_nodes_with_labels: 'aws-ubuntu-xenial-T2Medium' - module: flocker.node.functional.test_docker - with_steps: - - { type: 'shell', cli: *run_trial_cli } - archive_artifacts: *flocker_artifacts - publish_test_results: true - coverage_report: true - clean_repo: true - timeout: 30 - directories_to_delete: *run_trial_directories_to_delete - run_trial_for_storage_driver: run_trial_for_ebs_storage_driver_on_CentOS_7: on_nodes_with_labels: 'aws-centos-7-SELinux-T2Medium' diff --git a/flocker/acceptance/endtoend/test_diagnostics.py b/flocker/acceptance/endtoend/test_diagnostics.py index 3496ca6332..744a229593 100644 --- a/flocker/acceptance/endtoend/test_diagnostics.py +++ b/flocker/acceptance/endtoend/test_diagnostics.py @@ -13,7 +13,6 @@ from ...common.runner import run_ssh, download from ...testtools import AsyncTestCase, async_runner from ..testtools import require_cluster, ACCEPTANCE_TEST_TIMEOUT -from testtools.matchers import MatchesAny, Equals class DiagnosticsTests(AsyncTestCase): @@ -23,12 +22,7 @@ class DiagnosticsTests(AsyncTestCase): run_tests_with = async_runner(timeout=ACCEPTANCE_TEST_TIMEOUT) - # This only requires the container agent to check - # that its log is collected. We still care about - # that working, so we run it. We should stop - # running it for this test when we get closer - # to never running it in production. - @require_cluster(1, require_container_agent=True) + @require_cluster(1) def test_export(self, cluster): """ ``flocker-diagnostics`` creates an archive of all Flocker service logs @@ -62,20 +56,15 @@ def download_archive(remote_archive_path): def verify_archive(local_archive_path): with tarfile.open(local_archive_path.path) as f: - actual_basenames = set() + actual_filenames = set() for name in f.getnames(): basename = os.path.basename(name) if name == basename: # Ignore the directory entry continue - actual_basenames.add(basename) + actual_filenames.add(basename) - container_agent_basenames = set([ - 'flocker-container-agent_startup.gz', - 'flocker-container-agent_eliot.gz', - ]) - - expected_basenames = set([ + expected_filenames = set([ 'flocker-control_startup.gz', 'flocker-control_eliot.gz', 'flocker-dataset-agent_startup.gz', @@ -95,13 +84,11 @@ def verify_archive(local_archive_path): 'fdisk', 'lshw', ]) - self.expectThat( - actual_basenames, - MatchesAny( - Equals(expected_basenames), - Equals(expected_basenames.union( - container_agent_basenames)), - ) + # Missing expected filenames will show up as differences. + # Unexpected filenames will be ignored. + self.assertEqual( + set(), + expected_filenames.difference(actual_filenames) ) verifying = downloading.addCallback(verify_archive) diff --git a/flocker/acceptance/integration/test_mongodb.py b/flocker/acceptance/integration/test_mongodb.py index e049d28022..42789fcded 100644 --- a/flocker/acceptance/integration/test_mongodb.py +++ b/flocker/acceptance/integration/test_mongodb.py @@ -28,7 +28,7 @@ def insert_data(test_case, host, port): def got_client(client): database = client.example - database.posts.insert({u"the data": u"it moves"}) + database.posts.insert_one({u"the data": u"it moves"}) d.addCallback(got_client) return d diff --git a/flocker/acceptance/integration/testtools.py b/flocker/acceptance/integration/testtools.py index 25f4514129..9a42f6db3b 100644 --- a/flocker/acceptance/integration/testtools.py +++ b/flocker/acceptance/integration/testtools.py @@ -3,11 +3,163 @@ """ Testing infrastructure for integration tests. """ +from docker.errors import NotFound as DockerNotFound +from pyrsistent import PClass, field +from twisted.internet.defer import DeferredLock +from twisted.internet.threads import deferToThread -from ..testtools import require_cluster, create_dataset +from ..testtools import require_cluster, create_dataset, get_docker_client from ...testtools import AsyncTestCase, random_name +DOCKER_CLIENT_LOCK = DeferredLock() + + +def deferToThreadWithLock(lock, f, *args, **kwargs): + """ + Like ``deferToThread``, but acquires ``lock`` before calling ``f`` and + releases the lock when ``f`` returns or raises. + """ + locking = lock.acquire() + + def locked(lock): + return deferToThread(f, *args, **kwargs) + + calling = locking.addCallback(locked) + + def called(result): + lock.release() + return result + + unlocking = calling.addBoth(called) + return unlocking + + +class Container(PClass): + """ + Perform asynchronous docker-py start and remove operations on a Docker + container that has been created. + Docker-py operations are performed in a threadpool and with a lock in case + DockerClient is not thread safe. + + :attr DockerClient client: A DockerClient connected to a specific docker + server endpoint. + :attr container_id: The unique ID of the container to operate on. + """ + client = field() + container_id = field() + + def start(self): + """ + :returns: A ``Deferred`` that fires when the docker API start call + completes. + """ + return deferToThreadWithLock( + DOCKER_CLIENT_LOCK, + self.client.start, + container=self.container_id + ) + + def remove(self): + """ + Forcefully remove the container, even if it is still running. + + :returns: A ``Deferred`` that fires when the docker API remove call + completes. + """ + return deferToThreadWithLock( + DOCKER_CLIENT_LOCK, + self.client.remove_container, + self.container_id, + force=True, + ) + + +def create_container(client, create_arguments): + """ + Create a Docker container and return a ``Container`` with which to perform + start and remove operations. + + :param DockerClient client: The DockerClient which will be used to create + the container. + :param dict create_arguments: Keyword arguments to pass to + DockerClient.create_container. + :returns: A ``Container``. + """ + container_data = client.create_container(**create_arguments) + container_id = container_data["Id"] + return Container( + client=client, + container_id=container_id, + ) + + +def stateful_container_for_test(test, cluster, node, image_name, + dataset, internal_path, internal_port, + external_port): + """ + Create and start a ``Container`` on ``node``. + Clean it up when the ``test`` has completed. + + :param TestCase test: The test. + :param Cluster cluster: The ``Cluster`` with certificates for + authenticating with the docker daemon on ``node``. + :param unicode image_name: A name of the Docker image to use. + :param Dataset dataset: The mounted Flocker dataset to bind mount into the + container. + :param FilePath internal_path: The path inside the container where + ``dataset`` will be mounted. + :param int internal_port: The port inside the container where + ``image_name`` listens. + :param int external_port: A port on the ``node`` which will be mapped to + the ``internal_port``. + :returns: A ``Deferred`` that fires with a ``Container``. + """ + client = get_docker_client( + cluster, + node.public_address + ) + arguments = { + u"name": random_name(test), + u"image": image_name, + u"host_config": client.create_host_config( + binds=[ + u"{}:{}".format( + dataset.path.path, + internal_path.path + ), + ], + port_bindings={internal_port: external_port}, + restart_policy={u'Name': u'never'}, + ), + u"ports": [internal_port], + } + + d = deferToThreadWithLock( + DOCKER_CLIENT_LOCK, + create_container, + client=client, + create_arguments=arguments + ) + + def try_cleanup(container): + d = container.remove() + # The container may have been deliberately removed in the test. + d.addErrback( + lambda failure: failure.trap(DockerNotFound) + ) + return d + + def register_cleanup_and_start(container): + test.addCleanup(try_cleanup, container) + d = container.start() + d.addCallback(lambda ignored_start_result: container) + return d + + d.addCallback(register_cleanup_and_start) + return d + + def make_dataset_integration_testcase(image_name, volume_path, internal_port, insert_data, assert_inserted): """ @@ -32,39 +184,7 @@ class IntegrationTests(AsyncTestCase): Test that the given application can start and restart with Flocker datasets as volumes. """ - def _start_container(self, name, dataset_id, external_port, cluster, - cleanup=True): - """ - Start a container with a volume. - - :param unicode name: The container name. - :param UUID dataset_id: The dataset ID. - :param cluster: The ``Cluster``. - :param int external_port: External port to expose on the container. - :param bool cleanup: If true, delete container when test is over. - - :return: ``Deferred`` that fires when the container has been - started. - """ - app = { - u"name": name, - u"node_uuid": cluster.nodes[0].uuid, - u"image": image_name, - u"ports": [{u"internal": internal_port, - u"external": external_port}], - u'restart_policy': {u'name': u'never'}, - u"volumes": [{u"dataset_id": unicode(dataset_id), - u"mountpoint": volume_path.path}], - } - created = cluster.create_container(app) - if cleanup: - created.addCallback(lambda _: self.addCleanup( - cluster.remove_container, name)) - return created - - # TODO: this test doesn't actually require the container agent, it just - # uses it do to the setup. It should be ported to the docker API. - @require_cluster(1, require_container_agent=True) + @require_cluster(1) def test_start(self, cluster): """ The specified application can be started with a Docker dataset @@ -75,52 +195,108 @@ def test_start(self, cluster): containers can require a completely empty volume, or one that is writeable by non-root users, etc.. """ - host = cluster.nodes[0].public_address + node = cluster.nodes[0] port = 12345 creating_dataset = create_dataset(self, cluster) - creating_dataset.addCallback( - lambda dataset: self._start_container(random_name(self), - dataset.dataset_id, - port, cluster)) - creating_dataset.addCallback( - lambda _: insert_data(self, host, port)) - creating_dataset.addCallback( - lambda _: assert_inserted(self, host, port)) - return creating_dataset - - # TODO: this test don't actually require the container agent, it just - # uses it do to the setup. It should be ported to the docker API. - @require_cluster(1, require_container_agent=True) + + def create_container(dataset): + return stateful_container_for_test( + test=self, + cluster=cluster, + node=node, + image_name=image_name, + dataset=dataset, + internal_path=volume_path, + internal_port=internal_port, + external_port=port, + ) + creating_container = creating_dataset.addCallbacks( + create_container, + self.fail, + ) + + def begin_insert_data(container): + return insert_data(self, node.public_address, port) + inserting_data = creating_container.addCallbacks( + begin_insert_data, + self.fail, + ) + + def check(ignored): + return assert_inserted( + self, node.public_address, port + ) + checking = inserting_data.addCallback(check) + + return checking + + @require_cluster(1) def test_restart(self, cluster): """ The specified application can be started with a Docker dataset configured as its volume that has already been used by the same application previously. """ - host = cluster.nodes[0].public_address + datasets = [] + node = cluster.nodes[0] port = 12345 another_port = 12366 - first_container = random_name(self) creating_dataset = create_dataset(self, cluster) - def created(dataset): - started = self._start_container(first_container, - dataset.dataset_id, - port, cluster, cleanup=False) - started.addCallback( - lambda _: insert_data(self, host, port)) - - restarting = started.addCallback( - lambda _: cluster.remove_container(first_container)) - restarting.addCallback( - lambda _: self._start_container(random_name(self), - dataset.dataset_id, - another_port, cluster)) - return restarting - creating_dataset.addCallback(created) - - creating_dataset.addCallback( - lambda _: assert_inserted(self, host, another_port)) - return creating_dataset + def create_container(dataset): + datasets.append(dataset) + return stateful_container_for_test( + test=self, + cluster=cluster, + node=node, + image_name=image_name, + dataset=dataset, + internal_path=volume_path, + internal_port=internal_port, + external_port=port, + ) + creating_container = creating_dataset.addCallbacks( + create_container, + self.fail, + ) + + def begin_insert_data(container): + d = insert_data(self, node.public_address, port) + d.addCallback(lambda ignored: container) + return d + inserting_data = creating_container.addCallbacks( + begin_insert_data, + self.fail, + ) + + def remove_container(container): + return container.remove() + removing_container = inserting_data.addCallbacks( + remove_container, + self.fail, + ) + + def create_another_container(ignored): + [dataset] = datasets + return stateful_container_for_test( + test=self, + cluster=cluster, + node=node, + image_name=image_name, + dataset=dataset, + internal_path=volume_path, + internal_port=internal_port, + external_port=another_port, + ) + recreating = removing_container.addCallbacks( + create_another_container, + self.fail, + ) + + def check(container): + return assert_inserted(self, node.public_address, another_port) + checking = recreating.addCallback(check) + + return checking return IntegrationTests diff --git a/flocker/acceptance/obsolete/__init__.py b/flocker/acceptance/obsolete/__init__.py deleted file mode 100644 index 3f8cdc8226..0000000000 --- a/flocker/acceptance/obsolete/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright ClusterHQ Inc. See LICENSE file for details. - -""" -Acceptance tests for ``flocker`` components that are either deprecated -or planned for deprecation in the near future. -""" diff --git a/flocker/acceptance/obsolete/test_containers.py b/flocker/acceptance/obsolete/test_containers.py deleted file mode 100644 index f37f252417..0000000000 --- a/flocker/acceptance/obsolete/test_containers.py +++ /dev/null @@ -1,361 +0,0 @@ -# Copyright ClusterHQ Inc. See LICENSE file for details. - -""" -Tests for the control service REST API. -""" - -from json import loads, dumps -from time import sleep -from datetime import timedelta - -from testtools import run_test_with - -from twisted.internet import reactor - -from ...common import loop_until -from ...testtools import AsyncTestCase, async_runner, random_name -from ..testtools import ( - require_cluster, require_moving_backend, create_dataset, - create_python_container, verify_socket, post_http_server, - assert_http_server, query_http_server, is_process_running, - ACCEPTANCE_TEST_TIMEOUT -) -from ..scripts import SCRIPTS - - -class ContainerAPITests(AsyncTestCase): - """ - Tests for the container API. - """ - - run_tests_with = async_runner(timeout=ACCEPTANCE_TEST_TIMEOUT) - - def _create_container(self, cluster, script): - """ - Create a container listening on port 8080. - - :return: ``Deferred`` firing with a container dictionary once the - container is up and running. - """ - d = create_python_container( - self, cluster, { - u"ports": [{u"internal": 8080, u"external": 8080}], - u"node_uuid": cluster.nodes[0].uuid, - }, script) - - def check_result(response): - dl = verify_socket(cluster.nodes[0].public_address, 8080) - dl.addCallback(lambda _: response) - return dl - - d.addCallback(check_result) - return d - - @require_cluster(1, require_container_agent=True) - def test_create_container_with_ports(self, cluster): - """ - Create a container including port mappings on a single-node cluster. - """ - return self._create_container(cluster, SCRIPTS.child(b"hellohttp.py")) - - @require_cluster(1, require_container_agent=True) - def test_create_container_restart_stopped(self, cluster): - """ - A container is restarted if it is stopped. - """ - responses = [] - - def query_and_save(): - querying = query_http_server( - cluster.nodes[0].public_address, 8080 - ) - querying.addCallback(responses.append) - return querying - - created = self._create_container( - cluster, SCRIPTS.child(b"exitinghttp.py") - ) - - # `query_http_server` will kill the server first time round. - created.addCallback(lambda ignored: query_and_save()) - - # Call it again and see that the container is running again. - created.addCallback(lambda ignored: query_and_save()) - - # Verify one of the assumptions ... That the container restarted in - # between requests. exitinghttp.py gives back a process-unique random - # value as the response body. - def check_different_response(ignored): - self.assertNotEqual( - responses[0], - responses[1], - "Responses to two requests were the same, " - "container probably did not restart.", - ) - created.addCallback(check_different_response) - - return created - - @require_cluster(1, require_container_agent=True) - def test_create_container_with_environment(self, cluster): - """ - If environment variables are specified when creating a container, - those variables are available in the container's environment. - """ - environment = {u"XBLOO": u"YBLAH", u"ZBLOO": u"ZEBRA"} - - d = create_python_container( - self, cluster, { - u"ports": [{u"internal": 8080, u"external": 8080}], - u"node_uuid": cluster.nodes[0].uuid, - u"environment": environment, - }, SCRIPTS.child(b"envhttp.py")) - - def checked(_): - host = cluster.nodes[0].public_address - d = query_http_server(host, 8080) - d.addCallback(lambda data: dict(loads(data))) - return d - d.addCallback(checked) - - d.addCallback( - lambda response: - self.assertDictContainsSubset(environment, response) - ) - return d - - @require_moving_backend - @run_test_with(async_runner(timeout=timedelta(minutes=6))) - @require_cluster(2, require_container_agent=True) - def test_move_container_with_dataset(self, cluster): - """ - Create a container with an attached dataset, issue API call - to move the container. Wait until we can connect to the running - container on the new host and verify the data has moved with it. - """ - data = {u"the data": u"it moves"} - post_data = {"data": dumps(data)} - node1, node2 = cluster.nodes - container_name = random_name(self) - creating_dataset = create_dataset(self, cluster) - - def create_container(dataset): - d = create_python_container( - self, cluster, { - u"name": container_name, - u"ports": [{u"internal": 8080, u"external": 8080}], - u"node_uuid": node1.uuid, - u"volumes": [{u"dataset_id": unicode(dataset.dataset_id), - u"mountpoint": u"/data"}], - }, SCRIPTS.child(b"datahttp.py"), - additional_arguments=[u"/data"], - ) - return d - creating_dataset.addCallback(create_container) - creating_dataset.addCallback( - lambda _: post_http_server( - self, node1.public_address, 8080, post_data) - ) - - def move_container(_): - moved = cluster.move_container( - container_name, node2.uuid - ) - return moved - creating_dataset.addCallback(move_container) - creating_dataset.addCallback( - lambda _: assert_http_server( - self, node2.public_address, 8080, - expected_response=post_data["data"]) - ) - - return creating_dataset - - @require_cluster(1, require_container_agent=True) - def test_create_container_with_dataset(self, cluster): - """ - Create a container with an attached dataset, write some data, - shut it down, create a new container with same dataset, make sure - the data is still there. - """ - data = {u"the data": u"sample written data"} - post_data = {"data": dumps(data)} - node = cluster.nodes[0] - container_name = random_name(self) - creating_dataset = create_dataset(self, cluster) - self.dataset_id = None - - def create_container(dataset): - self.dataset_id = unicode(dataset.dataset_id) - d = create_python_container( - self, cluster, { - u"name": container_name, - u"ports": [{u"internal": 8080, u"external": 8080}], - u"node_uuid": node.uuid, - u"volumes": [{u"dataset_id": self.dataset_id, - u"mountpoint": u"/data"}], - }, SCRIPTS.child(b"datahttp.py"), - additional_arguments=[u"/data"], - cleanup=False, - ) - return d - creating_dataset.addCallback(create_container) - creating_dataset.addCallback( - lambda _: post_http_server( - self, node.public_address, 8080, post_data) - ) - creating_dataset.addCallback( - lambda _: assert_http_server( - self, node.public_address, 8080, - expected_response=post_data["data"]) - ) - creating_dataset.addCallback( - lambda _: cluster.remove_container(container_name)) - - def create_second_container(_): - d = create_python_container( - self, cluster, { - u"ports": [{u"internal": 8080, u"external": 8081}], - u"node_uuid": node.uuid, - u"volumes": [{u"dataset_id": self.dataset_id, - u"mountpoint": u"/data"}], - }, SCRIPTS.child(b"datahttp.py"), - additional_arguments=[u"/data"], - ) - return d - creating_dataset.addCallback(create_second_container) - creating_dataset.addCallback( - lambda _: assert_http_server( - self, node.public_address, 8081, - expected_response=post_data["data"]) - ) - return creating_dataset - - @require_cluster(1, require_container_agent=True) - def test_current(self, cluster): - """ - The current container endpoint includes a currently running container. - """ - creating = self._create_container( - cluster, SCRIPTS.child(b"hellohttp.py") - ) - - def created(data): - data[u"running"] = True - - def in_current(): - current = cluster.current_containers() - current.addCallback(lambda result: data in result) - return current - return loop_until(reactor, in_current) - creating.addCallback(created) - return creating - - @require_cluster(1, require_container_agent=True) - def test_non_root_container_can_access_dataset(self, cluster): - """ - A container running as a user that is not root can write to a - dataset attached as a volume. - """ - node = cluster.nodes[0] - creating_dataset = create_dataset(self, cluster) - - def created_dataset(dataset): - return create_python_container( - self, cluster, { - u"ports": [{u"internal": 8080, u"external": 8080}], - u"node_uuid": node.uuid, - u"volumes": [{u"dataset_id": unicode(dataset.dataset_id), - u"mountpoint": u"/data"}], - }, SCRIPTS.child(b"nonrootwritehttp.py"), - additional_arguments=[u"/data"]) - creating_dataset.addCallback(created_dataset) - - creating_dataset.addCallback( - lambda _: assert_http_server(self, node.public_address, 8080)) - return creating_dataset - - @run_test_with(async_runner(timeout=timedelta(minutes=20))) - @require_cluster(2, require_container_agent=True) - def test_reboot(self, cluster): - """ - After a reboot the containers are only started once all datasets are - available locally. - - We disable the dataset agent during reboot in order to ensure as - much as possible that the container agent gets stale data from the - control service. - """ - # Find a node which is not running the control service. - # If the control node is rebooted, we won't get stale dataset state. - node = [node for node in cluster.nodes if - node.public_address != cluster.control_node.public_address][0] - - def query(): - d = query_http_server(node.public_address, 8080) - d.addCallback(loads) - return d - - creating_dataset = create_dataset(self, cluster, node=node) - - def created_dataset(dataset): - return create_python_container( - self, cluster, { - u"ports": [{u"internal": 8080, u"external": 8080}], - u"node_uuid": node.uuid, - u"volumes": [{u"dataset_id": unicode(dataset.dataset_id), - u"mountpoint": u"/data"}], - }, SCRIPTS.child(b"remember_boot_id.py"), - additional_arguments=[u"/data"]) - creating_dataset.addCallback(created_dataset) - - creating_dataset.addCallback(lambda _: query()) - - def got_initial_result(initial_result): - self.addCleanup(node.run_script, "enable_service", - "flocker-dataset-agent") - # Disable the service to force container agent to always get stale - # data from the container agent: - d = node.run_script("disable_service", "flocker-dataset-agent") - d.addCallback(lambda _: node.reboot()) - # Wait for reboot to be far enough along that everything - # should be shutdown: - d.addCallback(lambda _: sleep(20)) - # Wait until server is back up: - changed = d.addCallback(lambda _: - verify_socket(node.public_address, 22)) - - # Wait until container agent is back up: - changed.addCallback( - lambda _: loop_until(reactor, lambda: is_process_running( - node, b'flocker-container-agent'))) - - # Start up dataset agent so container agent can proceed: - def up_again(_): - # Give it a few seconds for container agent to get stale data - # from control service: - sleep(10) - # Now start up dataset agent again: - return node.run_script( - "enable_service", "flocker-dataset-agent") - changed.addCallback(up_again) - - changed.addCallback(lambda _: loop_until( - reactor, lambda: query().addCallback( - lambda result: result != initial_result))) - changed.addCallback(lambda _: query()) - - def result_changed(new_result): - self.assertEqual( - dict(current_same=(new_result["current"] == - initial_result["current"]), - written_same=(new_result["written"] == - initial_result["written"])), - dict(current_same=False, # post-reboot expect new boot_id - written_same=True, # written data survived reboot - )) - changed.addCallback(result_changed) - return changed - - creating_dataset.addCallback(got_initial_result) - return creating_dataset diff --git a/flocker/acceptance/scripts/envhttp.py b/flocker/acceptance/scripts/envhttp.py deleted file mode 100644 index c35632dba4..0000000000 --- a/flocker/acceptance/scripts/envhttp.py +++ /dev/null @@ -1,23 +0,0 @@ -""" -HTTP server that returns its environment variables as JSON. -""" - -from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer -from json import dumps -from os import environ - - -class Handler(BaseHTTPRequestHandler): - """ - Return the current environment in HTTP response. - """ - def do_GET(s): - s.send_response(200) - s.send_header("content-type", "text/json") - s.end_headers() - s.wfile.write(dumps(environ.items())) - s.wfile.close() - - -httpd = HTTPServer((b"0.0.0.0", 8080), Handler) -httpd.serve_forever() diff --git a/flocker/acceptance/scripts/exitinghttp.py b/flocker/acceptance/scripts/exitinghttp.py deleted file mode 100644 index b48527bb49..0000000000 --- a/flocker/acceptance/scripts/exitinghttp.py +++ /dev/null @@ -1,25 +0,0 @@ -""" -HTTP server that exits after responding to a GET request. -""" - -from os import urandom - -from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer - -PROCESS_UNIQUE_VALUE = urandom(32).encode("hex") - - -class Handler(BaseHTTPRequestHandler): - def do_GET(s): - s.send_response(200) - s.send_header( - b"content-length", - u"{}".format(len(PROCESS_UNIQUE_VALUE)).encode("ascii") - ) - s.end_headers() - s.wfile.write(PROCESS_UNIQUE_VALUE) - s.wfile.flush() - s.wfile.close() - -httpd = HTTPServer((b"0.0.0.0", 8080), Handler) -httpd.handle_request() diff --git a/flocker/acceptance/scripts/hellohttp.py b/flocker/acceptance/scripts/hellohttp.py deleted file mode 100644 index a754106e1f..0000000000 --- a/flocker/acceptance/scripts/hellohttp.py +++ /dev/null @@ -1,16 +0,0 @@ -""" -HTTP server that returns a fixed string. -""" - -from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer - - -class Handler(BaseHTTPRequestHandler): - def do_GET(s): - s.send_response(200) - s.end_headers() - s.wfile.write(b"hi") - s.wfile.close() - -httpd = HTTPServer((b"0.0.0.0", 8080), Handler) -httpd.serve_forever() diff --git a/flocker/acceptance/scripts/nonrootwritehttp.py b/flocker/acceptance/scripts/nonrootwritehttp.py deleted file mode 100644 index de35abe183..0000000000 --- a/flocker/acceptance/scripts/nonrootwritehttp.py +++ /dev/null @@ -1,28 +0,0 @@ -""" -HTTP server that ensures it can write to given directory as a non-root -user, then returns "hi". -""" - -from sys import argv -from os import setuid -from pwd import getpwnam -from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer - - -class Handler(BaseHTTPRequestHandler): - def do_GET(s): - s.send_response(200) - s.end_headers() - # Ensure we can write to given directory: - try: - with open(argv[1] + "/test", "w") as f: - f.write(b"testing 123") - except Exception as e: - s.wfile.write(str(e.__class__) + ": " + str(e)) - else: - s.wfile.write(b"hi") - s.wfile.close() - -setuid(getpwnam("nobody")[2]) -httpd = HTTPServer((b"0.0.0.0", 8080), Handler) -httpd.serve_forever() diff --git a/flocker/acceptance/scripts/proxyhttp.py b/flocker/acceptance/scripts/proxyhttp.py deleted file mode 100644 index b7c03c1ba0..0000000000 --- a/flocker/acceptance/scripts/proxyhttp.py +++ /dev/null @@ -1,23 +0,0 @@ -""" -HTTP server that proxies requests to a remote server based on Docker -linking environment variables. -""" - -from urllib import urlopen -from os import getenv -from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer - -DEST_URL = "http://%s:%s/" % ( - getenv("DEST_PORT_80_TCP_ADDR"), getenv("DEST_PORT_80_TCP_PORT")) - - -class Handler(BaseHTTPRequestHandler): - def do_GET(s): - s.send_response(200) - s.end_headers() - s.wfile.write(urlopen(DEST_URL).read()) - s.wfile.close() - - -httpd = HTTPServer((b"0.0.0.0", 8081), Handler) -httpd.serve_forever() diff --git a/flocker/acceptance/scripts/remember_boot_id.py b/flocker/acceptance/scripts/remember_boot_id.py deleted file mode 100644 index ee2426acd7..0000000000 --- a/flocker/acceptance/scripts/remember_boot_id.py +++ /dev/null @@ -1,35 +0,0 @@ -""" -HTTP server that reports both current and previously recorded boot_id, -the latter stored persistently on disk. -""" - -from sys import argv -from json import dumps -import os - -from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer - - -# Try to write out file ASAP, to increase chances of hitting race -# condition with dataset setup: -with file("/proc/sys/kernel/random/boot_id") as boot_f: - boot_id = boot_f.read() - -file_path = os.path.join(argv[1], "written.json") -if not os.path.exists(file_path): - with file(file_path, "w") as f: - f.write(boot_id) -written = file(file_path).read() - - -class Handler(BaseHTTPRequestHandler): - def do_GET(self): - self.send_response(200) - self.end_headers() - self.wfile.write(dumps({"current": boot_id, - "written": written})) - self.wfile.close() - - -httpd = HTTPServer((b"0.0.0.0", 8080), Handler) -httpd.serve_forever() diff --git a/flocker/acceptance/testtools.py b/flocker/acceptance/testtools.py index 91f376102d..d179070d46 100644 --- a/flocker/acceptance/testtools.py +++ b/flocker/acceptance/testtools.py @@ -5,7 +5,6 @@ """ from datetime import timedelta from functools import wraps -from json import dumps from os import environ, close from unittest import SkipTest, skipUnless from uuid import uuid4, UUID @@ -19,12 +18,10 @@ from docker.tls import TLSConfig -from twisted.internet import defer -from twisted.web.http import OK, CREATED +from twisted.web.http import OK from twisted.python.filepath import FilePath from twisted.internet import reactor from twisted.internet.error import ProcessTerminated -from twisted.internet.task import deferLater from eliot import start_action, Message, write_failure from eliot.twisted import DeferredContext @@ -34,7 +31,7 @@ from pyrsistent import PClass, field, CheckedPVector, pmap from ..control import ( - Application, AttachedVolume, DockerImage, Manifestation, Dataset, + AttachedVolume, Manifestation, Dataset, ) from ..common import gather_deferreds, loop_until, timeout, retry_failure @@ -45,7 +42,6 @@ from ..control.httpapi import REST_API_PORT from ..ca import treq_with_authentication, UserCredential -from ..testtools import random_name from ..apiclient import FlockerClient, DatasetState from ..node.backends import backend_loader from ..node.script import get_api @@ -64,8 +60,7 @@ __all__ = [ 'require_cluster', - 'MONGO_APPLICATION', 'MONGO_IMAGE', 'get_mongo_application', - 'create_application', 'create_attached_volume', + 'create_attached_volume', 'get_docker_client', 'ACCEPTANCE_TEST_TIMEOUT' ] @@ -79,12 +74,6 @@ PYMONGO_INSTALLED, "PyMongo not installed") -# XXX The MONGO_APPLICATION will have to be removed because it does not match -# the tutorial yml files, and the yml should be testably the same: -# https://clusterhq.atlassian.net/browse/FLOC-947 -MONGO_APPLICATION = u"mongodb-example-application" -MONGO_IMAGE = u"clusterhq/mongodb" - DOCKER_PORT = 2376 @@ -132,32 +121,6 @@ def get_path(name): ) -def get_mongo_application(): - """ - Return a new ``Application`` with a name and image corresponding to - the MongoDB tutorial example: - - http://doc-dev.clusterhq.com/gettingstarted/tutorial/index.html - """ - return Application( - name=MONGO_APPLICATION, - image=DockerImage.from_string(MONGO_IMAGE + u':latest'), - ) - - -def create_application(name, image, ports=frozenset(), volume=None, - links=frozenset(), environment=None, memory_limit=None, - cpu_shares=None): - """ - Instantiate an ``Application`` with the supplied parameters and return it. - """ - return Application( - name=name, image=DockerImage.from_string(image + u':latest'), - ports=ports, volume=volume, links=links, environment=environment, - memory_limit=memory_limit, cpu_shares=cpu_shares - ) - - def create_attached_volume(dataset_id, mountpoint, maximum_size=None, metadata=pmap()): """ @@ -333,7 +296,10 @@ def get_mongo_client(host, port=27017): """ def create_mongo_client(): try: - client = MongoClient(host=host, port=port) + # Ensure that writes are only acknowledged once they've been + # written to disk. + # http://api.mongodb.com/python/current/api/pymongo/mongo_client.html + client = MongoClient(host=host, port=port, fsync=True) client.areyoualive.posts.insert({"ping": 1}) return client except PyMongoError: @@ -613,131 +579,6 @@ def got_results(results): waiting = loop_until(reactor, created) return waiting - @log_method - def create_container(self, properties): - """ - Create a container with the specified properties. - - :param dict properties: A ``dict`` mapping to the API request fields - to create a container. - - :returns: A ``Deferred`` which fires with an API response when the - container with the supplied properties has been persisted to the - cluster configuration. - """ - request = self.treq.post( - self.base_url + b"/configuration/containers", - data=dumps(properties), - headers={b"content-type": b"application/json"}, - ) - - request.addCallback(check_and_decode_json, CREATED) - return request - - @log_method - def move_container(self, name, node_uuid): - """ - Move a container. - - :param unicode name: The name of the container to move. - :param unicode node_uuid: The UUID to which the container should - be moved. - :returns: A ``Deferred`` which fires with an API response when the - container move has been persisted to the cluster configuration. - """ - request = self.treq.post( - self.base_url + b"/configuration/containers/" + - name.encode("ascii"), - data=dumps({u"node_uuid": node_uuid}), - headers={b"content-type": b"application/json"}, - ) - - request.addCallback(check_and_decode_json, OK) - return request - - @log_method - def remove_container(self, name): - """ - Remove a container. - - :param unicode name: The name of the container to remove. - - :returns: A ``Deferred`` which fires with an API response when the - container removal has been persisted to the cluster configuration. - """ - request = self.treq.delete( - self.base_url + b"/configuration/containers/" + - name.encode("ascii"), - ) - - request.addCallback(check_and_decode_json, OK) - return request - - @log_method - def configured_containers(self): - """ - Get current containers from configuration. - - :return: A ``Deferred`` firing with a tuple (cluster instance, API - response). - """ - request = self.treq.get( - self.base_url + b"/configuration/containers", - ) - - request.addCallback(check_and_decode_json, OK) - return request - - @log_method - def current_containers(self): - """ - Get current containers. - - :return: A ``Deferred`` firing with a tuple (cluster instance, API - response). - """ - request = self.treq.get( - self.base_url + b"/state/containers", - ) - - request.addCallback(check_and_decode_json, OK) - return request - - @log_method - def wait_for_container(self, container_properties): - """ - Poll the container state API until a container exists with all the - supplied ``container_properties``. - - :param dict container_properties: The attributes of the container that - we're waiting for. All the keys, values and those of nested - dictionaries must match. - :returns: A ``Deferred`` which fires with an API response when a - container with the supplied properties appears in the cluster. - """ - def created(): - """ - Check the container state list for the expected container - properties. - """ - request = self.current_containers() - - def got_response(containers): - expected_container = container_properties.copy() - for container in containers: - container_items = container.items() - if all([ - item in container_items - for item in expected_container.items() - ]): - # Return cluster and container state - return container - return False - request.addCallback(got_response) - return request - - return loop_until(reactor, created) - @log_method def current_nodes(self): """ @@ -876,18 +717,6 @@ def cleanup_all_containers(_): for container in client.containers(): client.remove_container(container["Id"], force=True) - def cleanup_flocker_containers(_): - cleaning_containers = api_clean_state( - u"containers", - self.configured_containers, - self.current_containers, - lambda item: self.remove_container(item[u"name"]), - ) - return timeout( - reactor, cleaning_containers, 60, - Exception("Timed out cleaning up Flocker containers"), - ) - def cleanup_datasets(_): cleaning_datasets = api_clean_state( u"datasets", @@ -954,7 +783,6 @@ def detach_and_destroy_volume(volume): ) d = DeferredContext(cleanup_leases()) - d.addCallback(cleanup_flocker_containers) if remove_foreign_containers: d.addCallback(cleanup_all_containers) d.addCallback(cleanup_datasets) @@ -1087,8 +915,7 @@ def _get_test_cluster(reactor): ) -def require_cluster(num_nodes, required_backend=None, - require_container_agent=False): +def require_cluster(num_nodes, required_backend=None): """ A decorator which will call the supplied test_method when a cluster with the required number of nodes is available. @@ -1144,19 +971,6 @@ def clean(cluster): waiting_for_cluster.addCallback(clean) - def enable_container_agent(cluster): - # This should ideally be some sort of fixture/testresources - # thing, but the APIs aren't quite right today. - def configure_container_agent(node): - return ensure_container_agent_enabled( - node, require_container_agent) - d = defer.gatherResults( - map(configure_container_agent, cluster.nodes), - consumeErrors=True) - d.addCallback(lambda _: cluster) - return d - - waiting_for_cluster.addCallback(enable_container_agent) calling_test_method = waiting_for_cluster.addCallback( call_test_method_with_cluster, test_case, args, kwargs @@ -1166,155 +980,6 @@ def configure_container_agent(node): return decorator -def is_container_agent_running(node): - """ - Check if the container agent is running on the specified node. - - :param Node node: the node to check. - :return Deferred[bool]: a Deferred that will fire when - with whether the container agent is runnning. - """ - d = node.run_script("service_running", "flocker-container-agent") - - def not_existing(failure): - failure.trap(ProcessTerminated) - return False - d.addCallbacks(lambda result: True, not_existing) - return d - - -def set_container_agent_enabled_on_node(node, enabled): - """ - Ensure the container agent is enabled/disabled as specified. - - :param Node node: the node on which to ensure the container - agent's state - :param bool enabled: True to ensure the container agent - is enabled and running, false to ensure the opposite. - :return Deferred[None]: a Deferred that will fire when - the container agent is in the desired state. - """ - if enabled: - d = node.run_script("enable_service", "flocker-container-agent") - else: - d = node.run_script("disable_service", "flocker-container-agent") - # If the agent was disabled We have to reboot to clear the control cache. - # If we want to avoid the reboot we could add an API to do this. - if not enabled: - d.addCallback(lambda _: node.reboot()) - # Wait for reboot to be far enough along that everything - # should be shutdown: - d.addCallback(lambda _: deferLater(reactor, 20, lambda: None)) - # Wait until server is back up: - d = d.addCallback(lambda _: - verify_socket(node.public_address, 22)) - d.addCallback(lambda _: loop_until( - reactor, lambda: is_process_running( - node, b'flocker-dataset-agent'))) - d.addCallback( - lambda _: - node.run_script("disable_service", "flocker-dataset-agent")) - d.addCallback( - lambda _: - node.run_script("enable_service", "flocker-dataset-agent")) - d.addCallback(lambda _: loop_until( - reactor, lambda: is_process_running( - node, b'flocker-dataset-agent'))) - # Hide the value in the callback as it could come from - # different places and shouldn't be used. - d.addCallback(lambda _: None) - return d - - -def is_process_running(node, name): - """ - Check if the process `name` is running on `node`. - - :param Node node: the node to check. - :param bytes name: the name of the process to look for. - :return Deferred[bool]: a deferred that will fire - with whether at least one process named `name` is running - on `node`. - """ - # pidof will return the pid if the processes is - # running else exit with status 1 which triggers the - # errback chain. - command = [b'pidof', b'-x', name] - d = node.run_as_root(command) - - def not_existing(failure): - failure.trap(ProcessTerminated) - return False - d.addCallbacks(lambda result: True, not_existing) - return d - - -def ensure_container_agent_enabled(node, to_enable): - """ - Ensure the container agent is enabled/disabled as specified. - - Doesn't make any changes if the agent is already in the - desired state. - - :param Node node: the node on which to ensure the container - agent's state - :param bool to_enable: True to ensure the container agent - is enabled and running, False to ensure the opposite. - :return Deferred[None]: a Deferred that will fire when - the container agent is in the desired state. - """ - # If the agent is enabled but stopped, and the test - # requests no container agent, then if the test rebooted - # the node it would get a running container agent after - # that point. This means that a test that fails in a - # particular way could cause incorrect results in later - # tests that rely on reboots. This function could change - # to check the enabled status as well. - d = is_container_agent_running(node) - - def change_if_needed(enabled): - if enabled != to_enable: - return set_container_agent_enabled_on_node(node, to_enable) - d.addCallback(change_if_needed) - return d - - -def create_python_container(test_case, cluster, parameters, script, - cleanup=True, additional_arguments=()): - """ - Create a Python container that runs a given script. - - :param TestCase test_case: The current test. - :param Cluster cluster: The cluster to run on. - :param dict parameters: Parameters for the ``create_container`` JSON - query, beyond those provided by this function. - :param FilePath script: Python code to run. - :param bool cleanup: If true, remove container when test is over. - :param additional_arguments: Additional arguments to pass to the - script. - - :return: ``Deferred`` that fires when the configuration has been updated. - """ - parameters = parameters.copy() - parameters[u"image"] = u"python:2.7-slim" - parameters[u"command_line"] = [u"python2.7", u"-c", - script.getContent().decode("ascii")] + list( - additional_arguments) - if u"restart_policy" not in parameters: - parameters[u"restart_policy"] = {u"name": u"never"} - if u"name" not in parameters: - parameters[u"name"] = random_name(test_case) - creating = cluster.create_container(parameters) - - def created(response): - if cleanup: - test_case.addCleanup(cluster.remove_container, parameters[u"name"]) - test_case.assertEqual(response, parameters) - return response - creating.addCallback(created) - return creating - - def extract_external_port( client, container_identifier, internal_port ): diff --git a/flocker/node/__init__.py b/flocker/node/__init__.py index e72545b258..3b8501e753 100644 --- a/flocker/node/__init__.py +++ b/flocker/node/__init__.py @@ -13,7 +13,6 @@ ILocalState, NodeLocalState, ) -from ._container import ApplicationNodeDeployer, NOOP_SLEEP_TIME from ._p2p import P2PManifestationDeployer from .backends import BackendDescription @@ -24,9 +23,8 @@ __all__ = [ 'IDeployer', 'ILocalState', 'NodeLocalState', 'IStateChange', - 'NoOp', 'NOOP_SLEEP_TIME', + 'NoOp', 'P2PManifestationDeployer', - 'ApplicationNodeDeployer', 'run_state_change', 'in_parallel', 'sequentially', 'BackendDescription', 'DeployerType', diff --git a/flocker/node/_container.py b/flocker/node/_container.py deleted file mode 100644 index 5ea3811284..0000000000 --- a/flocker/node/_container.py +++ /dev/null @@ -1,626 +0,0 @@ -# Copyright ClusterHQ Inc. See LICENSE file for details. -# -*- test-case-name: flocker.node.test.test_container -*- - -""" -Deploy applications on nodes. -""" - -from warnings import warn -from datetime import timedelta - -from zope.interface import implementer - -from pyrsistent import PClass, field - -from eliot import Message, Logger, start_action - -from twisted.internet.defer import succeed - -from . import IStateChange, in_parallel, sequentially -from ._docker import DockerClient, PortMap, Environment, Volume as DockerVolume - -from ..control._model import ( - Application, AttachedVolume, NodeState, DockerImage, Port, Link, - RestartNever, ip_to_uuid, - ) - -from ._deploy import IDeployer, NodeLocalState - - -_logger = Logger() - - -NOOP_SLEEP_TIME = timedelta(seconds=5) - - -def _eliot_system(part): - return u"flocker:node:container_deployer:" + part - - -@implementer(IStateChange) -class StartApplication(PClass): - """ - Launch the supplied application as a container. - - :ivar Application application: The ``Application`` to create and - start. - - :ivar NodeState node_state: The state of the node the ``Application`` - is running on. - """ - application = field(type=Application, mandatory=True) - node_state = field(type=NodeState, mandatory=True) - - # This (and other eliot_action implementations) uses `start_action` because - # it was easier than defining a new `ActionType` with a bunch of fields. - # It might be worth doing that work eventually, though. Also, this can - # turn into a regular attribute when the `_logger` argument is no longer - # required by Eliot. - @property - def eliot_action(self): - return start_action( - _logger, _eliot_system(u"startapplication"), - name=self.application.name, - ) - - def run(self, deployer, state_persister): - application = self.application - - volumes = [] - if application.volume is not None: - dataset_id = application.volume.manifestation.dataset_id - volumes.append(DockerVolume( - container_path=application.volume.mountpoint, - node_path=self.node_state.paths[dataset_id])) - - if application.ports is not None: - port_maps = map(lambda p: PortMap(internal_port=p.internal_port, - external_port=p.external_port), - application.ports) - else: - port_maps = [] - - environment = {} - - for link in application.links: - environment.update(_link_environment( - protocol=u"tcp", - alias=link.alias, - local_port=link.local_port, - hostname=self.node_state.hostname, - remote_port=link.remote_port, - )) - - if application.environment is not None: - environment.update(application.environment) - - if environment: - docker_environment = Environment( - variables=frozenset(environment.iteritems())) - else: - docker_environment = None - - return deployer.docker_client.add( - application.name, - application.image.full_name, - ports=port_maps, - environment=docker_environment, - volumes=volumes, - mem_limit=application.memory_limit, - cpu_shares=application.cpu_shares, - # The only supported policy is "never". See FLOC-2449. - restart_policy=RestartNever(), - command_line=application.command_line, - ) - - -def _link_environment(protocol, alias, local_port, hostname, remote_port): - """ - Generate the environment variables used for defining a docker link. - - Docker containers expect an enviroment variable - `_PORT__TCP`` which contains the URL of the remote end - of a link, as well as parsed variants ``_ADDR``, ``_PORT``, ``_PROTO``. - - :param unicode protocol: The protocol used for the link. - :param unicode alias: The name of the link. - :param int local_port: The port the local application expects to access. - :param unicode hostname: The remote hostname to connect to. - :param int remote_port: The remote port to connect to. - """ - alias = alias.upper() - base = u'%s_PORT_%d_%s' % (alias, local_port, protocol.upper()) - - return { - base: u'%s://%s:%d' % (protocol, hostname, remote_port), - base + u'_ADDR': hostname, - base + u'_PORT': u'%d' % (remote_port,), - base + u'_PROTO': protocol, - } - - -@implementer(IStateChange) -class StopApplication(PClass): - """ - Stop and disable the given application. - - :ivar Application application: The ``Application`` to stop. - """ - application = field(type=Application, mandatory=True) - - @property - def eliot_action(self): - return start_action( - _logger, _eliot_system(u"stopapplication"), - name=self.application.name, - ) - - def run(self, deployer, state_persister): - application = self.application - unit_name = application.name - return deployer.docker_client.remove(unit_name) - - -@implementer(IDeployer) -class ApplicationNodeDeployer(object): - """ - Discover and calculate changes for applications running on a node. - - :ivar unicode hostname: The hostname of the node that this is running - on. - :ivar IDockerClient docker_client: The Docker client API to use in - deployment operations. Default ``DockerClient``. - """ - def __init__(self, hostname, docker_client=None, node_uuid=None): - if node_uuid is None: - # To be removed in https://clusterhq.atlassian.net/browse/FLOC-1795 - warn("UUID is required, this is for backwards compat with existing" - " tests only. If you see this in production code that's " - "a bug.", DeprecationWarning, stacklevel=2) - node_uuid = ip_to_uuid(hostname) - self.node_uuid = node_uuid - self.hostname = hostname - if docker_client is None: - docker_client = DockerClient() - self.docker_client = docker_client - - def _attached_volume_for_container( - self, container, path_to_manifestations - ): - """ - Infer the Flocker manifestation which is in use by the given container. - - :param flocker.node._docker.Unit container: The container to inspect. - :param dict path_to_manifestations: A mapping from mount points (as - ``FilePath``) to identifiers (as ``unicode``) of the datasets that - are mounted there. - - :return: ``None`` if no Flocker manifestation can be associated with - the container state. Otherwise an ``AttachedVolume`` referring to - that manifestation and the location in the container where it is - mounted. - """ - if container.volumes: - # XXX https://clusterhq.atlassian.net/browse/FLOC-49 - # we only support one volume per container - # at this time - # XXX https://clusterhq.atlassian.net/browse/FLOC-773 - # we assume all volumes are datasets - docker_volume = list(container.volumes)[0] - try: - manifestation = path_to_manifestations[ - docker_volume.node_path] - except KeyError: - # Apparently not a dataset we're managing, give up. - return None - else: - return AttachedVolume( - manifestation=manifestation, - mountpoint=docker_volume.container_path, - ) - return None - - def _ports_for_container(self, container): - """ - Determine the network ports that are exposed by the given container. - - :param flocker.node._docker.Unit container: The container to inspect. - - :return: A ``list`` of ``Port`` instances. - """ - ports = [] - for portmap in container.ports: - ports.append(Port( - internal_port=portmap.internal_port, - external_port=portmap.external_port - )) - return ports - - def _environment_for_container(self, container): - """ - Get the custom environment specified for the container and infer its - links. - - It would be nice to do these two things separately but links are only - represented in the container's environment so both steps involve - inspecting the environment. - - :param flocker.node._docker.Unit container: The container to inspect. - - :return: A two-tuple of the container's links and environment. Links - are given as a ``list`` of ``Link`` instances. Environment is - given as a ``list`` of two-tuples giving the environment variable - name and value (as ``bytes``). - """ - # Improve the factoring of this later. Separate it into two methods. - links = [] - environment = [] - if container.environment: - environment_dict = container.environment.to_dict() - for label, value in environment_dict.items(): - # _PORT__TCP_PORT= - parts = label.rsplit(b"_", 4) - try: - alias, pad_a, port, pad_b, pad_c = parts - local_port = int(port) - except ValueError: - # _PORT__TCP - parts = label.rsplit(b"_", 3) - try: - alias, pad_a, port, pad_b = parts - except ValueError: - environment.append((label, value)) - continue - if not (pad_a, pad_b) == (b"PORT", b"TCP"): - environment.append((label, value)) - continue - if (pad_a, pad_b, pad_c) == (b"PORT", b"TCP", b"PORT"): - links.append(Link( - local_port=local_port, - remote_port=int(value), - alias=alias, - )) - return links, environment - - def _applications_from_containers( - self, containers, path_to_manifestations - ): - """ - Reconstruct the original application state from the container state - that resulted from it. - - :param list containers: The Docker containers that exist here. - :param path_to_manifestations: See ``_attached_volume_for_container``. - - :return: A ``list`` of ``Application`` instances inferred from - ``containers`` and ``path_to_manifestations``. - """ - applications = [] - for container in containers: - image = DockerImage.from_string(container.container_image) - volume = self._attached_volume_for_container( - container, path_to_manifestations, - ) - ports = self._ports_for_container(container) - links, environment = self._environment_for_container(container) - applications.append(Application( - name=unicode(container.name), - image=image, - ports=frozenset(ports), - volume=volume, - environment=environment if environment else None, - links=frozenset(links), - memory_limit=container.mem_limit, - cpu_shares=container.cpu_shares, - restart_policy=container.restart_policy, - running=(container.activation_state == u"active"), - command_line=container.command_line, - )) - return applications - - def _nodestate_from_applications(self, applications): - """ - Construct a ``NodeState`` representing the state of this node given a - particular set of applications. - - :param list applications: ``Application`` instances representing the - applications on this node. - - :return: A ``NodeLocalState`` with shared_state_changes() that - are composed of a single ``NodeState`` representing the application - state only of this node. - """ - return NodeLocalState( - node_state=NodeState( - uuid=self.node_uuid, - hostname=self.hostname, - applications=applications, - manifestations=None, - paths=None, - ) - ) - - def discover_state(self, cluster_state, persistent_state): - """ - List all the ``Application``\ s running on this node. - - The given local state is used to figure out if applications have - attached volumes that are specific manifestations. If no - manifestations are known then discovery isn't done and ignorance - is claimed about applications. This ensures that the information - returned is accurate, and therefore that convergence is done - correctly. - - This does mean you can't run an application agent without a - dataset agent. See - https://clusterhq.atlassian.net/browse/FLOC-1646. - - :return: A ``Deferred`` which fires with a list containing a - ``NodeState`` instance with information only about - ``Application`` and ports. ``NodeState.manifestations`` and - ``NodeState.paths`` will not be filled in. - """ - local_state = cluster_state.get_node(self.node_uuid, - hostname=self.hostname) - if local_state.manifestations is None: - # Without manifestations we don't know if local applications' - # volumes are manifestations or not. Rather than return - # incorrect information leading to possibly erroneous - # convergence actions, just declare ignorance. Eventually the - # convergence agent for datasets will discover the information - # and then we can proceed. - return succeed( - NodeLocalState( - node_state=NodeState( - uuid=self.node_uuid, - hostname=self.hostname, - applications=None, - manifestations=None, - paths=None, - ) - ) - ) - - path_to_manifestations = { - path: local_state.manifestations[dataset_id] - for (dataset_id, path) - in local_state.paths.items() - } - - applications = self.docker_client.list() - applications.addCallback( - self._applications_from_containers, path_to_manifestations - ) - applications.addCallback(self._nodestate_from_applications) - return applications - - def _restart_for_volume_change(self, node_state, state, configuration): - """ - Determine whether the current volume state of an application is - divergent from the volume configuration for that application in a way - that merits an application restart right now. - - Many actual divergences are allowed and ignored: - - - The volume metadata. This metadata only exists in the - configuration. It is always missing from the state object. - - - The volume size. The dataset agent is not reliably capable of - performing resizes (if we wait for the actual and configured - sizes to match, we might have to wait forever). - - - The volume's deleted state. The application will be allowed to - continue to use a volume that has been marked for deletion until - the application is explicitly stopped. - - :param NodeState node_state: The known local state of this node. - :param AttachedVolume state: The known state of the volume of an - application being considered. Or ``None`` if it is known not to - have a volume. - :param AttachedVolume configuration: The configured state of the volume - of the application being considered. Or ``None`` if it is - configured to not have a volume. - - :return: If the state differs from the configuration in a way which - needs to be corrected by the convergence agent (for example, the - application is configured with a volume but is running without - one), ``True``. If it does not differ or only differs in the - allowed ways mentioned above, ``False``. - """ - def log(restart, reason=None): - Message.new( - message_type=_eliot_system(u"restart_for_volume_change"), - restart=restart, - state_is_none=state is None, - configuration_is_none=configuration is None, - reason=reason, - ).write() - return restart - - def restart_if_available(dataset_id): - """ - Considering that we would like to restart the application with a - volume using the given dataset_id, determine whether we can - actually do so at this time. - - If the indicated dataset has no manifestation on this node, we will - not be able to start the application again after stopping it. So - leave it running until such a manifestation exists. - - :param unicode dataset_id: The identifier of the dataset we want. - - :return: If there is a manifestation of the given dataset on this - node, ``True``. Otherwise, ``False``. - """ - if dataset_id in node_state.manifestations: - # We want it and we have it. - return log(True, "have configured dataset") - else: - # We want it but we don't have it. - return log(False, "missing configured dataset") - - state_id = getattr( - getattr(state, "manifestation", None), "dataset_id", None - ) - config_id = getattr( - getattr(configuration, "manifestation", None), "dataset_id", None - ) - - if state_id == config_id: - return log(False, "dataset matches") - elif config_id is None: - return log(True, "volume removed") - else: - return restart_if_available(config_id) - - def _restart_for_application_change( - self, node_state, state, configuration - ): - """ - Determine whether the current state of an application is divergent from - the configuration for that application in a way that merits an - application restart right now. - - Certain differences are not considered divergences: - - - Certain volume differences. See ``_restart_for_volume_change``. - - :param NodeState node_state: The known local state of this node. - :param Application state: The current state of the application. - :param Application configuration: The desired configuration for the - application. - - :return: If the state differs from the configuration in a way which - needs to be corrected by the convergence agent (for example, - different network ports should be exposed), ``True``. If it does - not differ or only differs in the allowed ways mentioned above, - ``False``. - """ - volume_state = state.volume - volume_configuration = configuration.volume - - restart_state = state.restart_policy - # The volume comparison is too complicated to leave up to `!=` below. - # Check volumes separately. - # Restart policies don't implement comparison usefully. See FLOC-2500 - # XXX This is an optimization to assign both values with a single call - # to ``set``. ``set`` is slow. - comparable_state = state.set( - volume=None, - restart_policy=RestartNever(), - ) - comparable_configuration = configuration.set( - volume=None, - restart_policy=RestartNever(), - ) - - return ( - comparable_state != comparable_configuration or - - # Restart policies were briefly supported but they interact poorly - # with system restarts. They're disabled now (except for the - # default policy, "never"). Ignore the Application's configured - # policy and enforce the "never" policy. This will change any - # existing container that was configured with a different policy. - # See FLOC-2449. - # - # Also restart policies don't implement comparison usefully. See - # FLOC-2500. - not isinstance(restart_state, RestartNever) or - - self._restart_for_volume_change( - node_state, volume_state, volume_configuration - ) - ) - - def calculate_changes(self, desired_configuration, current_cluster_state, - local_state): - """ - Work out which changes need to happen to the local state to match - the given desired state. - - Currently this involves the following phases: - - 1. Change proxies to point to new addresses (should really be - last, see https://clusterhq.atlassian.net/browse/FLOC-380) - 2. Stop all relevant containers. - 3. Start and restart any containers that should be running - locally, so long as their required datasets are available. - """ - # We are a node-specific IDeployer: - current_node_state = current_cluster_state.get_node( - self.node_uuid, hostname=self.hostname) - if current_node_state.applications is None: - # We don't know current application state, so can't calculate - # anything. This will be the case if we don't know the local - # datasets' state yet; see notes in discover_state(). - return sequentially(changes=[]) - - phases = [] - - for node in desired_configuration.nodes.values(): - if node.uuid == self.node_uuid: - desired_node_applications = node.applications - break - else: - desired_node_applications = {} - - all_applications = current_node_state.applications.values() - - # Compare the applications being changed by name only. Other - # configuration changes aren't important at this point. - local_application_names = {app.name for app in all_applications} - desired_local_state = frozenset(desired_node_applications.keys()) - # Don't start applications that exist on this node but aren't running; - # Docker is in charge of restarts (and restarts aren't supported yet - # anyway; see FLOC-2449): - start_names = desired_local_state.difference(local_application_names) - stop_names = {app.name for app in all_applications}.difference( - desired_local_state) - - start_containers = [ - StartApplication(application=app, node_state=current_node_state) - for app in desired_node_applications.values() - if ((app.name in start_names) and - # If manifestation isn't available yet, don't start: - # XXX in FLOC-1240 non-primaries should be checked. - (app.volume is None or - app.volume.manifestation.dataset_id in - current_node_state.manifestations)) - ] - stop_containers = [ - StopApplication(application=app) for app in all_applications - if app.name in stop_names - ] - - restart_containers = [] - - applications_to_inspect = ( - {app.name for app in all_applications} & desired_local_state) - current_applications_dict = dict(zip( - [a.name for a in all_applications], all_applications - )) - desired_applications_dict = desired_node_applications - for application_name in applications_to_inspect: - inspect_desired = desired_applications_dict[application_name] - inspect_current = current_applications_dict[application_name] - - if self._restart_for_application_change( - current_node_state, inspect_current, inspect_desired - ): - restart_containers.append(sequentially(changes=[ - StopApplication(application=inspect_current), - StartApplication(application=inspect_desired, - node_state=current_node_state), - ])) - - if stop_containers: - phases.append(in_parallel(changes=stop_containers)) - start_restart = start_containers + restart_containers - if start_restart: - phases.append(in_parallel(changes=start_restart)) - - return sequentially( - changes=phases, - sleep_when_empty=NOOP_SLEEP_TIME - ) diff --git a/flocker/node/_docker.py b/flocker/node/_docker.py index 98bb14bea3..e05b7eefbd 100644 --- a/flocker/node/_docker.py +++ b/flocker/node/_docker.py @@ -7,379 +7,22 @@ from __future__ import absolute_import -from datetime import timedelta - from errno import ECONNREFUSED from socket import error as socket_error from functools import partial -from itertools import repeat -from time import sleep - -from zope.interface import Interface, implementer from docker import Client -from docker.errors import APIError, NotFound - -from eliot import Message, MessageType, Field, start_action - -from repoze.lru import LRUCache +from docker.errors import APIError -from pyrsistent import field, PClass, pset - -from requests import Response from requests.exceptions import ConnectionError from requests.packages.urllib3.exceptions import ProtocolError -from characteristic import with_cmp - -from twisted.python.components import proxyForInterface -from twisted.python.filepath import FilePath -from twisted.internet.defer import succeed, fail -from twisted.internet.threads import deferToThread -from twisted.web.http import NOT_FOUND, INTERNAL_SERVER_ERROR +from twisted.web.http import INTERNAL_SERVER_ERROR from ..common import ( - poll_until, retry_if, decorate_methods, with_retry, get_default_retry_steps, ) -from ..control._model import ( - RestartNever, RestartAlways, RestartOnFailure, pset_field, pvector_field) - - -LOG_CACHED_IMAGE = MessageType( - u"flocker:node:docker:image_from_cache", - [Field.for_types(u"image", [unicode], "The image ID.")], - "An image was retrieved from the cache." -) - - -class AlreadyExists(Exception): - """A unit with the given name already exists.""" - - -@with_cmp(["address", "apierror"]) -class AddressInUse(Exception): - """ - The listen address for an exposed port was in use and could not be bound. - """ - def __init__(self, address, apierror): - """ - :param tuple address: The conventional Python representation of the - address which could not be bound (eg, an (ipv4 address, port - number) pair for IPv4 addresses). - :param APIError apierror: The original Docker API error indicating this - problem. Or ``None`` if the error was not derived from the result - of a Docker API call. - """ - Exception.__init__(self, address, apierror) - self.address = address - self.apierror = apierror - - -class Environment(PClass): - """ - A collection of environment variables. - - :ivar frozenset variables: A ``frozenset`` of tuples containing - key and value pairs representing the environment variables. - """ - variables = field(mandatory=True) - - def to_dict(self): - """ - Convert to a dictionary suitable for serialising to JSON and then on to - the Docker API. - - :return: ``dict`` mapping keys to values. - """ - return dict(self.variables) - - -class Volume(PClass): - """ - A Docker volume. - - :ivar FilePath node_path: The volume's path on the node's - filesystem. - - :ivar FilePath container_path: The volume's path within the - container. - """ - node_path = field(mandatory=True, type=FilePath) - container_path = field(mandatory=True, type=FilePath) - - -class PortMap(PClass): - """ - A record representing the mapping between a port exposed internally by a - docker container and the corresponding external port on the host. - - :ivar int internal_port: The port number exposed by the container. - :ivar int external_port: The port number exposed by the host. - """ - internal_port = field(mandatory=True, type=int) - external_port = field(mandatory=True, type=int) - - -class ImageDataCache(PClass): - """ - A record representing cached image data. The cache only stores - the data we care about from an inspected image. - - :ivar list command: The image command. - :ivar list environment: A list of unicode strings representing - the image's environment variables. - """ - command = field(mandatory=True, type=(list, type(None))) - environment = field(mandatory=True, type=(list, type(None))) - - -class Unit(PClass): - """ - Information about a unit managed by Docker. - - XXX "Unit" is geard terminology, and should be renamed. See - https://clusterhq.atlassian.net/browse/FLOC-819 - - :ivar unicode name: The name of the unit, which may not be the same as - the container name. - - :ivar unicode container_name: The name of the container where the - application is running. - - :ivar unicode activation_state: The state of the - container. ``u"active"`` indicates it is running, ``u"inactive"`` - indicates it is not running. See - https://clusterhq.atlassian.net/browse/FLOC-187 about using - constants instead of strings and other improvements. - - :ivar unicode container_image: The docker image name associated with this - container. - - :ivar PSet ports: The ``PortMap`` instances which define how - connections to ports on the host are routed to ports exposed in - the container. - - :ivar Environment environment: An ``Environment`` whose variables - will be supplied to the Docker container or ``None`` if there are no - environment variables for this container. - - :ivar PSet volumes: ``Volume`` instances, the container's volumes. - - :ivar int mem_limit: The number of bytes to which to limit the in-core - memory allocations of this unit. Or ``None`` to apply no limits. The - behavior when the limit is encountered depends on the container - execution driver but the likely behavior is for the container process - to be killed (and therefore the container to exit). Docker most likely - maps this value onto the cgroups ``memory.limit_in_bytes`` value. - - :ivar int cpu_shares: The number of CPU shares to allocate to this unit. - Or ``None`` to let it have the default number of shares. Docker maps - this value onto the cgroups ``cpu.shares`` value (the default of which - is probably 1024). - - :ivar IRestartPolicy restart_policy: The restart policy of the container. - - :ivar command_line: Custom command to run using the image, a ``PVector`` - of ``unicode``. ``None`` means use default. - - :ivar swappiness: Tunable swappiness of the container. - Default of 0 disables swap. - """ - name = field(mandatory=True) - container_name = field(mandatory=True) - activation_state = field(mandatory=True) - container_image = field(mandatory=True, initial=None) - ports = pset_field(PortMap) - environment = field(mandatory=True, initial=None) - volumes = pset_field(Volume) - mem_limit = field(mandatory=True, initial=None) - cpu_shares = field(mandatory=True, initial=None) - restart_policy = field(mandatory=True, initial=RestartNever()) - command_line = pvector_field(unicode, optional=True, initial=None) - swappiness = field(mandatory=False, initial=0, type=int) - - -class IDockerClient(Interface): - """ - A client for the Docker HTTP API. - - Note the difference in semantics between the results of ``add()`` - (firing does not indicate application started successfully) - vs. ``remove()`` (firing indicates application has finished shutting - down). - """ - - def add(unit_name, image_name, ports=None, environment=None, volumes=(), - mem_limit=None, cpu_shares=None, restart_policy=RestartNever(), - command_line=None, swappiness=0): - """ - Install and start a new unit. - - Note that callers should not assume success indicates the unit has - finished starting up. In addition to asynchronous nature of Docker, - even if container is up and running the application within it might - still be starting up, e.g. it may not have bound the external ports - yet. As a result the final success of application startup is out of - scope for this method. - - :param unicode unit_name: The name of the unit to create. - :param unicode image_name: The Docker image to use for the unit. - :param list ports: A list of ``PortMap``\ s mapping ports exposed in - the container to ports exposed on the host. Default ``None`` means - that no port mappings will be configured for this unit. If a - ``PortMap`` instance's ``external_port`` is set to ``0`` a free - port will automatically be assigned. The assigned port will be - reported for the container in the result of ``IDockerClient.list``. - :param Environment environment: Environment variables for the - container. Default ``None`` means that no environment variables - will be supplied to the unit. - :param volumes: A sequence of ``Volume`` instances to mount. - :param int mem_limit: The number of bytes to which to limit the in-core - memory allocations of the new unit. Or ``None`` to apply no - limits. - :param int cpu_shares: The number of CPU shares to allocate to the new - unit. Or ``None`` to let it have the default number of shares. - Docker maps this value onto the cgroups ``cpu.shares`` value (the - default of which is probably 1024). - :param IRestartPolicy restart_policy: The restart policy of the - container. - :param command_line: Custom command to run using the image, a sequence - of ``unicode``, or ``None`` to use default image command line. - :param swappiness: Tune container's memory swappiness. - Default of 0 disables swap. - - :return: ``Deferred`` that fires on success, or errbacks with - :class:`AlreadyExists` if a unit by that name already exists. - """ - - def exists(unit_name): - """ - Check whether the unit exists. - - :param unicode unit_name: The name of the unit whose existence - we're checking. - - :return: ``Deferred`` that fires with ``True`` if unit exists, - otherwise ``False``. - """ - - def remove(unit_name): - """ - Stop and delete the given unit. - - This can be done multiple times in a row for the same unit. - - :param unicode unit_name: The name of the unit to stop. - - :return: ``Deferred`` that fires once the unit has been stopped - and removed. - """ - - def list(): - """ - List all known units. - - :return: ``Deferred`` firing with ``set`` of :class:`Unit`. - """ - - -def make_response(code, message): - """ - Create a ``requests.Response`` with the given response code and message. - - :param int code: The HTTP response code to include in the fake response. - :param unicode message: The HTTP response message to include in the fake - response. The message will be encoded using ASCII. - """ - response = Response() - response.status_code = code - response.reason = message - return response - - -@implementer(IDockerClient) -class FakeDockerClient(object): - """ - In-memory fake that simulates talking to a docker daemon. - - The state the the simulated units is stored in memory. - - :ivar dict _units: See ``units`` of ``__init__``\ . - :ivar pset _used_ports: A set of integers giving the port numbers which - will be considered in use. Attempts to add containers which use these - ports will fail. - """ - - def __init__(self, units=None): - """ - :param dict units: A dictionary of canned ``Unit``\ s which will be - manipulated and returned by the methods of this - ``FakeDockerClient``. - :type units: ``dict`` mapping `unit_name` to ``Unit``\ . - """ - if units is None: - units = {} - self._units = units - self._used_ports = pset() - - def add(self, unit_name, image_name, ports=frozenset(), environment=None, - volumes=frozenset(), mem_limit=None, cpu_shares=None, - restart_policy=RestartNever(), command_line=None, swappiness=0): - if unit_name in self._units: - return fail(AlreadyExists(unit_name)) - for port in ports: - if port.external_port in self._used_ports: - raise AddressInUse( - address=(b"0.0.0.0", port.external_port), - apierror=APIError( - 'fake api response from server', - response=make_response(500, 'fake response')), - ) - - all_ports = set(range(2 ** 15, 2 ** 16)) - assigned_ports = [] - for port in ports: - if port.external_port == 0: - available_ports = pset(all_ports) - self._used_ports - assigned = next(iter(available_ports)) - port = port.set(external_port=assigned) - assigned_ports.append(port) - self._used_ports = self._used_ports.add(port.external_port) - - self._units[unit_name] = Unit( - name=unit_name, - container_name=unit_name, - container_image=image_name, - ports=frozenset(assigned_ports), - environment=environment, - volumes=frozenset(volumes), - activation_state=u'active', - mem_limit=mem_limit, - cpu_shares=cpu_shares, - restart_policy=restart_policy, - command_line=command_line, - swappiness=swappiness - ) - return succeed(None) - - def exists(self, unit_name): - return succeed(unit_name in self._units) - - def remove(self, unit_name): - if unit_name in self._units: - del self._units[unit_name] - return succeed(None) - - def list(self): - units = set(self._units.values()) - return succeed(units) - - -# Basic namespace for Flocker containers: -BASE_NAMESPACE = u"flocker--" - class TimeoutClient(Client): """ @@ -472,548 +115,3 @@ def dockerpy_client(**kwargs): steps=get_default_retry_steps(), ), ) - - -@implementer(IDockerClient) -class DockerClient(object): - """ - Talk to the real Docker server directly. - - Some operations can take a while (e.g. stopping a container), so we - use a thread pool. See https://clusterhq.atlassian.net/browse/FLOC-718 - for using a custom thread pool. - - :ivar unicode namespace: A namespace prefix to add to container names - so we don't clobber other applications interacting with Docker. - :ivar str base_url: URL for connection to the Docker server. - :ivar int long_timeout: Maximum time in seconds to wait for - long-running operations, particularly pulling an image. - :ivar LRUCache _image_cache: Mapped cache of image IDs to their data. - """ - def __init__( - self, namespace=BASE_NAMESPACE, base_url=None, - long_timeout=600): - self.namespace = namespace - self._client = dockerpy_client( - version="1.15", base_url=base_url, - long_timeout=timedelta(seconds=long_timeout), - ) - self._image_cache = LRUCache(100) - - def _to_container_name(self, unit_name): - """ - Add the namespace to the container name. - - :param unicode unit_name: The unit's name. - - :return unicode: The container's name. - """ - return self.namespace + unit_name - - def _parse_container_ports(self, data): - """ - Parse the ports from a data structure representing the Ports - configuration of a Docker container in the format returned by - ``self._client.inspect_container`` and return a list containing - ``PortMap`` instances mapped to the container and host exposed ports. - - :param dict data: The data structure for the representation of - container and host port mappings in a single container. - This takes the form of the ``NetworkSettings.Ports`` portion - of a container's state and configuration as returned by inspecting - the container. This is a dictionary mapping container ports to a - list of host bindings, e.g. - "3306/tcp": [{"HostIp": "0.0.0.0","HostPort": "53306"}, - {"HostIp": "0.0.0.0","HostPort": "53307"}] - - :return list: A list that is either empty or contains ``PortMap`` - instances. - """ - ports = [] - for internal, hostmap in data.items(): - internal_map = internal.split(u'/') - internal_port = internal_map[0] - internal_port = int(internal_port) - if hostmap: - for host in hostmap: - external_port = host[u"HostPort"] - external_port = int(external_port) - portmap = PortMap(internal_port=internal_port, - external_port=external_port) - ports.append(portmap) - return ports - - def _parse_restart_policy(self, data): - """ - Parse the restart policy from the configuration of a Docker container - in the format returned by ``self._client.inspect_container`` and return - an ``IRestartPolicy``. - - :param dict data: The data structure representing the restart policy of - a container, e.g. - - {"Name": "policy-name", "MaximumRetryCount": 0} - - :return IRestartPolicy: The model of the restart policy. - - :raises ValueError: if an unknown policy is passed. - """ - POLICIES = { - u"": lambda data: - RestartNever(), - u"always": lambda data: - RestartAlways(), - u"on-failure": lambda data: - RestartOnFailure( - maximum_retry_count=data[u"MaximumRetryCount"] or None) - } - try: - # docker will treat an unknown plolicy as "never". - # We error out here, in case new policies are added. - return POLICIES[data[u"Name"]](data) - except KeyError: - raise ValueError("Unknown restart policy: %r" % (data[u"Name"],)) - - def _serialize_restart_policy(self, restart_policy): - """ - Serialize the restart policy from an ``IRestartPolicy`` to the format - expected by the docker API. - - :param IRestartPolicy restart_policy: The model of the restart policy. - - :returns: A dictionary suitable to pass to docker - - :raises ValueError: if an unknown policy is passed. - """ - SERIALIZERS = { - RestartNever: lambda policy: - {u"Name": u""}, - RestartAlways: lambda policy: - {u"Name": u"always"}, - RestartOnFailure: lambda policy: - {u"Name": u"on-failure", - u"MaximumRetryCount": policy.maximum_retry_count or 0}, - } - try: - return SERIALIZERS[restart_policy.__class__](restart_policy) - except KeyError: - raise ValueError("Unknown restart policy: %r" % (restart_policy,)) - - def _image_not_found(self, apierror): - """ - Inspect a ``docker.errors.APIError`` to determine if it represents a - failure to start a container because the container's image wasn't - found. - - :return: ``True`` if this is the case, ``False`` if the error has - another cause. - :rtype: ``bool`` - """ - return apierror.response.status_code == NOT_FOUND - - def _address_in_use(self, apierror): - """ - Inspect a ``docker.errors.APIError`` to determine if it represents a - failure to start a container because the container is configured to use - ports that are already in use on the system. - - :return: If this is the reason, an exception to raise describing the - problem. Otherwise, ``None``. - """ - # Recognize an error (without newline) like: - # - # Cannot start container : Error starting userland proxy: - # listen tcp :: bind: address already in use - # - # Or (without newline) like: - # - # Cannot start container : Bind for : failed: - # port is already allocated - # - # because Docker can't make up its mind about which format to use. - parts = apierror.explanation.split(b": ") - if parts[-1] == b"address already in use": - ip, port = parts[-3].split()[-1].split(b":") - elif parts[-1] == b"port is already allocated": - ip, port = parts[-2].split()[2].split(b":") - else: - return None - return AddressInUse(address=(ip, int(port)), apierror=apierror) - - def _image_data(self, image): - """ - Supply data about an image, by either inspecting it or returning - cached data if available. - - :param unicode image: The ID of the image. - - :return: ``dict`` representing data about the image properties. - """ - cached_image = self._image_cache.get(image) - if cached_image is not None: - LOG_CACHED_IMAGE(image=image).write() - return cached_image - try: - image_data = self._client.inspect_image(image) - Message.new( - message_type="flocker:node:docker:image_inspected", - image=image - ).write() - except APIError as e: - if e.response.status_code == NOT_FOUND: - # Image has been deleted, so just fill in some - # stub data so we can return *something*. This - # should happen only for stopped containers so - # some inaccuracy is acceptable. - # We won't cache stub data though. - Message.new( - message_type="flocker:node:docker:image_not_found", - image=image - ).write() - image_data = {u"Config": {u"Env": [], u"Cmd": []}} - else: - raise - cached_data = ImageDataCache( - command=image_data[u"Config"][u"Cmd"], - environment=image_data[u"Config"][u"Env"] - ) - self._image_cache.put(image, cached_data) - Message.new( - message_type="flocker:node:docker:image_data_cached", - image=image - ).write() - return cached_data - - def add(self, unit_name, image_name, ports=None, environment=None, - volumes=(), mem_limit=None, cpu_shares=None, - restart_policy=RestartNever(), command_line=None, - swappiness=0): - container_name = self._to_container_name(unit_name) - - if environment is not None: - environment = environment.to_dict() - if ports is None: - ports = [] - - restart_policy_dict = self._serialize_restart_policy(restart_policy) - - def _create(): - binds = list( - # The "Z" mode tells Docker to "relabel file objects" on the - # volume. This makes things work when SELinux is enabled, at - # least in the default configuration on CentOS 7. See - # , in the - # `--volumes-from` section (or just search for SELinux). - u"{}:{}:Z".format( - volume.node_path.path, volume.container_path.path - ) - for volume in volumes - ) - port_bindings = { - p.internal_port: p.external_port - for p in ports - } - host_config = self._client.create_host_config( - binds=binds, - port_bindings=port_bindings, - restart_policy=restart_policy_dict, - ) - # We're likely to get e.g. pvector, so make sure we're passing - # in something JSON serializable: - command_line_values = command_line - if command_line_values is not None: - command_line_values = list(command_line_values) - - memswap_limit = -1 - if swappiness != 0: - memswap_limit = mem_limit + mem_limit * swappiness - - self._client.create_container( - name=container_name, - image=image_name, - command=command_line_values, - environment=environment, - ports=[p.internal_port for p in ports], - mem_limit=mem_limit, - cpu_shares=cpu_shares, - host_config=host_config, - memswap_limit=memswap_limit, - ) - - def _add(): - try: - _create() - except APIError as e: - if self._image_not_found(e): - # Pull it and try again - self._client.pull(image_name) - _create() - else: - # Unrecognized, just raise it. - raise - - # Just because we got a response doesn't mean Docker has - # actually updated any internal state yet! So if e.g. we did a - # start on this container Docker might well complain it knows - # not the container of which we speak. To prevent this we poll - # until it does exist. - while True: - try: - self._client.start(container_name) - except NotFound: - sleep(0.01) - else: - break - - d = deferToThread(_add) - - def _extract_error(failure): - failure.trap(APIError) - code = failure.value.response.status_code - if code == 409: - raise AlreadyExists(unit_name) - - in_use = self._address_in_use(failure.value) - if in_use is not None: - # We likely can't start the container because its - # configuration conflicts with something else happening on - # the system. Reflect this failure condition in a more - # easily recognized way. - raise in_use - - return failure - d.addErrback(_extract_error) - return d - - def _blocking_exists(self, container_name): - """ - Blocking API to check if container exists. - - :param unicode container_name: The name of the container whose - existence we're checking. - - :return: ``True`` if unit exists, otherwise ``False``. - """ - try: - self._client.inspect_container(container_name) - return True - except APIError: - return False - - def exists(self, unit_name): - container_name = self._to_container_name(unit_name) - return deferToThread(self._blocking_exists, container_name) - - def _stop_container(self, container_name): - """Attempt to stop the given container. - - There is a race condition between a process dying and - Docker noticing that fact: - - https://github.com/docker/docker/issues/5165#issuecomment-65753753 - - If we get an error indicating that this race condition happened, - return False. This means the caller should try again. If we *do* - successfully stop the container, return True. - - :raise APIError: If the container failed to stop for some unknown - reason. - :return: True if we stopped the container, False otherwise. - - """ - try: - with start_action( - action_type='flocker:docker:container_stop', - container=container_name - ): - self._client.stop(container_name) - except APIError as e: - if e.response.status_code == NOT_FOUND: - # If the container doesn't exist, we swallow the error, - # since this method is supposed to be idempotent. - return True - elif e.response.status_code == INTERNAL_SERVER_ERROR: - # Docker returns this if the process had died, but - # hasn't noticed it yet. - return False - else: - raise - return True - - def _remove_container(self, container_name): - """ - Attempt to remove a container. - - Assumes the given container has already been stopped. - - :param unicode container_name: The fully-namespaced name of the - container. - :return: True if we removed the container, False otherwise. - """ - try: - # The ``docker.Client.stop`` method sometimes returns a - # 404 error, even though the container exists. - # See https://github.com/docker/docker/issues/13088 - # Wait until the container has actually stopped running - # before attempting to remove it. Otherwise we are - # likely to see: 'docker.errors.APIError: 409 Client - # Error: Conflict ("Conflict, You cannot remove a - # running container. Stop the container before - # attempting removal or use -f")' - # This code should probably be removed once the above - # issue has been resolved. See [FLOC-1850] - self._client.wait(container_name) - - with start_action( - action_type='flocker:docker:container_remove', - container=container_name - ): - self._client.remove_container(container_name) - except APIError as e: - if e.response.status_code == NOT_FOUND: - # If the container doesn't exist, we swallow the error, - # since this method is supposed to be idempotent. - return True - elif e.response.status_code == INTERNAL_SERVER_ERROR: - # Failure to remove container - see FLOC-3262 for an example. - return False - else: - raise - return True - - def remove(self, unit_name): - container_name = self._to_container_name(unit_name) - - def _remove(): - # Previously, this looped forever and didn't pause between loops. - # We've arbitrarily chosen a wait interval of 0.001 seconds and - # 1000 retries (i.e. a second of polling). These values may need - # tuning. - poll_until( - partial(self._stop_container, container_name), - repeat(0.001, 1000)) - - # Previously, the container remove was only tried once. Again, - # these parameters may need tuning. - poll_until( - partial(self._remove_container, container_name), - repeat(0.001, 1000)) - - d = deferToThread(_remove) - return d - - def list(self): - def _list(): - result = set() - ids = [d[u"Id"] for d in - self._client.containers(quiet=True, all=True)] - for i in ids: - - try: - data = self._client.inspect_container(i) - except APIError as e: - # The container ID returned by the list API call above, may - # have been removed in another thread. - if e.response.status_code == NOT_FOUND: - continue - else: - raise - - state = (u"active" if data[u"State"][u"Running"] - else u"inactive") - name = data[u"Name"] - # Since tags (e.g. "busybox") aren't stable, ensure we're - # looking at the actual image by using the hash: - image = data[u"Image"] - image_tag = data[u"Config"][u"Image"] - command = data[u"Config"][u"Cmd"] - with start_action( - action_type=u"flocker:node:docker:inspect_image", - container=i, - running=data[u"State"][u"Running"] - ): - image_data = self._image_data(image) - if image_data.command == command: - command = None - port_bindings = data[u"NetworkSettings"][u"Ports"] - if port_bindings is not None: - ports = self._parse_container_ports(port_bindings) - else: - ports = list() - volumes = [] - binds = data[u"HostConfig"]['Binds'] - if binds is not None: - for bind_config in binds: - parts = bind_config.split(':', 2) - node_path, container_path = parts[:2] - volumes.append( - Volume(container_path=FilePath(container_path), - node_path=FilePath(node_path)) - ) - if name.startswith(u"/" + self.namespace): - name = name[1 + len(self.namespace):] - else: - continue - # Retrieve environment variables for this container, - # disregarding any environment variables that are part - # of the image, rather than supplied in the configuration. - unit_environment = [] - container_environment = data[u"Config"][u"Env"] - if image_data.environment is None: - image_environment = [] - else: - image_environment = image_data.environment - if container_environment is not None: - for environment in container_environment: - if environment not in image_environment: - env_key, env_value = environment.split('=', 1) - unit_environment.append((env_key, env_value)) - unit_environment = ( - Environment(variables=frozenset(unit_environment)) - if unit_environment else None - ) - # Our Unit model counts None as the value for cpu_shares and - # mem_limit in containers without specified limits, however - # Docker returns the values in these cases as zero, so we - # manually convert. - cpu_shares = data[u"Config"][u"CpuShares"] - cpu_shares = None if cpu_shares == 0 else cpu_shares - mem_limit = data[u"Config"][u"Memory"] - mem_limit = None if mem_limit == 0 else mem_limit - restart_policy = self._parse_restart_policy( - data[U"HostConfig"][u"RestartPolicy"]) - result.add(Unit( - name=name, - container_name=self._to_container_name(name), - activation_state=state, - container_image=image_tag, - ports=frozenset(ports), - volumes=frozenset(volumes), - environment=unit_environment, - mem_limit=mem_limit, - cpu_shares=cpu_shares, - restart_policy=restart_policy, - command_line=command) - ) - return result - return deferToThread(_list) - - -class NamespacedDockerClient(proxyForInterface(IDockerClient, "_client")): - """ - A Docker client that only shows and creates containers in a given - namespace. - - Unlike ``DockerClient``, whose namespace is there to prevent conflicts - with other Docker users, this class deals with Flocker's internal - concept of namespaces. I.e. if hypothetically Docker container names - supported path-based namespaces then ``DockerClient`` would look at - containers in ``/flocker/`` and this class would look at containers in - in ``/flocker//``. - """ - def __init__(self, namespace, base_url=None): - """ - :param unicode namespace: Namespace to restrict containers to. - """ - self._client = DockerClient( - namespace=BASE_NAMESPACE + namespace + u"--") diff --git a/flocker/node/functional/env-docker/Dockerfile b/flocker/node/functional/env-docker/Dockerfile deleted file mode 100644 index 933236b7f6..0000000000 --- a/flocker/node/functional/env-docker/Dockerfile +++ /dev/null @@ -1,4 +0,0 @@ -# Docker image that writes out the provided environment to /data/env and then -# exits. Existence of the file /data/env indicates the file is fully written. -FROM busybox -CMD ["/bin/sh", "-c", "env > /data/temp && mv /data/temp /data/env"] diff --git a/flocker/node/functional/sendbytes-docker/Dockerfile.in b/flocker/node/functional/sendbytes-docker/Dockerfile.in deleted file mode 100644 index c4f0386adc..0000000000 --- a/flocker/node/functional/sendbytes-docker/Dockerfile.in +++ /dev/null @@ -1,6 +0,0 @@ -FROM busybox -MAINTAINER ClusterHQ -ADD . / -# If installed via wheel, the installed file will not have +x set. -RUN ["chmod", "+x", "/run.sh"] -CMD ["/bin/sh", "-e", "run.sh", "{host}", "{port}", "{bytes}", "{timeout}"] diff --git a/flocker/node/functional/sendbytes-docker/run.sh b/flocker/node/functional/sendbytes-docker/run.sh deleted file mode 100755 index edb427521e..0000000000 --- a/flocker/node/functional/sendbytes-docker/run.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/bin/sh -set -e -help() { - cat <&2 - echo "ERROR: Unknown option: $1" >&2 - exit 1 - ;; - *) - break - ;; - esac -done - -HOST=${1:?"Error: Missing parameter 1:HOST"} -PORT=${2:?"Error: Missing parameter 2:PORT"} -BYTES=${3:?"Error: Missing parameter 3:BYTES"} -TIMEOUT=${4:?"Error: Missing parameter 3:TIMEOUT"} - -start_time=$(date +"%s") -# Attempt to connect -# NB nc -w 10 means connection timeout after 10s -while ! echo -n "${BYTES}" | nc -w 10 "${HOST}" "${PORT}"; do - usleep 100000 - if test "$(date +'%s')" -gt "$((start_time+${TIMEOUT}))"; then - echo "ERROR: unable to connect to after ${TIMEOUT} seconds." >&2 - break - fi -done diff --git a/flocker/node/functional/test_deploy.py b/flocker/node/functional/test_deploy.py deleted file mode 100644 index 631d5dd27e..0000000000 --- a/flocker/node/functional/test_deploy.py +++ /dev/null @@ -1,463 +0,0 @@ -# Copyright ClusterHQ Inc. See LICENSE file for details. - -""" -Functional tests for ``flocker.node._deploy``. -""" - -from uuid import uuid4 - -from pyrsistent import pmap, pvector, pset - -from eliot import Message - -from twisted.internet import reactor -from twisted.python.filepath import FilePath - -from .. import ( - NodeLocalState, P2PManifestationDeployer, ApplicationNodeDeployer, - sequentially -) -from ...common import loop_until -from ...control._model import ( - Deployment, Application, DockerImage, Node, AttachedVolume, Link, - Manifestation, Dataset, DeploymentState, NodeState, - PersistentState, -) -from .._docker import DockerClient -from ..testtools import wait_for_unit_state, if_docker_configured -from ...testtools import ( - random_name, DockerImageBuilder, assertContainsAll, flaky, - AsyncTestCase, -) -from ...volume.testtools import create_volume_service -from .. import run_state_change -from ...control.testtools import InMemoryStatePersister - - -class P2PNodeDeployer(object): - """ - Combination of ZFS and container deployer. - - Should really be gotten rid of: - https://clusterhq.atlassian.net/browse/FLOC-1732 - """ - def __init__(self, hostname, volume_service, docker_client=None, - node_uuid=None): - self.manifestations_deployer = P2PManifestationDeployer( - hostname, volume_service, node_uuid=node_uuid) - self.applications_deployer = ApplicationNodeDeployer( - hostname, docker_client, node_uuid=node_uuid) - self.hostname = hostname - self.node_uuid = node_uuid - self.volume_service = self.manifestations_deployer.volume_service - self.docker_client = self.applications_deployer.docker_client - - def discover_state(self, cluster_state, persistent_state): - d = self.manifestations_deployer.discover_state( - cluster_state, persistent_state=persistent_state) - - def got_manifestations_state(manifestations_local_state): - manifestations_state = manifestations_local_state.node_state - app_discovery = self.applications_deployer.discover_state( - DeploymentState(nodes={manifestations_state}), - persistent_state=PersistentState(), - ) - - def got_app_local_state(app_local_state): - app_state = app_local_state.node_state - new_app_local_state = NodeLocalState( - node_state=( - app_state.evolver() - .set("manifestations", - manifestations_state.manifestations) - .set("paths", manifestations_state.paths) - .set("devices", - manifestations_state.devices).persistent())) - return new_app_local_state - app_discovery.addCallback(got_app_local_state) - return app_discovery - d.addCallback(got_manifestations_state) - return d - - def calculate_changes(self, configuration, cluster_state, local_state): - """ - Combine changes from the application and ZFS agents. - """ - return sequentially(changes=[ - self.applications_deployer.calculate_changes( - configuration, cluster_state, local_state), - self.manifestations_deployer.calculate_changes( - configuration, cluster_state, local_state), - ]) - - -def change_node_state(deployer, desired_configuration): - """ - Change the local state to match the given desired state. - - :param IDeployer deployer: Deployer to discover local state and - calculate changes. - :param Deployment desired_configuration: The intended configuration of all - nodes. - :return: ``Deferred`` that fires when the necessary changes are done. - """ - state_persister = InMemoryStatePersister() - - def converge(): - d = deployer.discover_state( - DeploymentState(nodes={ - NodeState(hostname=deployer.hostname, uuid=deployer.node_uuid, - applications=[], - manifestations={}, paths={}, devices={}), - }), - persistent_state=state_persister.get_state(), - ) - - def got_changes(local_state): - changes = local_state.shared_state_changes() - cluster_state = DeploymentState() - for change in changes: - cluster_state = change.update_cluster_state(cluster_state) - return deployer.calculate_changes( - desired_configuration, cluster_state, local_state) - d.addCallback(got_changes) - d.addCallback(lambda change: run_state_change( - change, deployer=deployer, - state_persister=state_persister)) - return d - # Repeat a few times until things settle down: - result = converge() - result.addCallback(lambda _: converge()) - result.addCallback(lambda _: converge()) - return result - - -def find_unit(units, unit_name): - Message.new( - message_type="flocker:node:functional:deploy:find_unit", - units=list(unit.name for unit in units), desired_unit=unit_name - ).write() - for unit in units: - if unit.name == unit_name: - return unit - - -class DeployerTests(AsyncTestCase): - """ - Functional tests for ``Deployer``. - """ - - @if_docker_configured - def test_environment(self): - """ - The environment specified in an ``Application`` is passed to the - container. - """ - expected_variables = frozenset({ - 'key1': 'value1', - 'key2': 'value2', - }.items()) - - docker_dir = FilePath(__file__).sibling('env-docker') - volume_service = create_volume_service(self) - - image = DockerImageBuilder(test=self, source_dir=docker_dir) - d = image.build() - - def image_built(image_name): - application_name = random_name(self) - - docker_client = DockerClient() - self.addCleanup(docker_client.remove, application_name) - - deployer = P2PNodeDeployer( - u"localhost", volume_service, docker_client, - node_uuid=uuid4()) - - dataset = Dataset( - dataset_id=unicode(uuid4()), - metadata=pmap({"name": application_name})) - manifestation = Manifestation(dataset=dataset, primary=True) - desired_state = Deployment(nodes=frozenset([ - Node(uuid=deployer.node_uuid, - applications=frozenset([Application( - name=application_name, - image=DockerImage.from_string( - image_name), - environment=expected_variables, - volume=AttachedVolume( - manifestation=manifestation, - mountpoint=FilePath('/data'), - ), - links=frozenset(), - )]), - manifestations={ - manifestation.dataset_id: manifestation})])) - return change_node_state(deployer, desired_state) - - d.addCallback(image_built) - d.addCallback(lambda _: volume_service.enumerate()) - d.addCallback( - lambda volumes: - list(volumes)[0].get_filesystem().get_path().child(b'env')) - - def got_result_path(result_path): - d = loop_until(reactor, result_path.exists) - d.addCallback(lambda _: result_path) - return d - d.addCallback(got_result_path) - - def started(result_path): - contents = result_path.getContent() - - assertContainsAll( - haystack=contents, - test_case=self, - needles=['{}={}\n'.format(k, v) - for k, v in expected_variables]) - d.addCallback(started) - return d - - @if_docker_configured - def test_links(self): - """ - The links specified in an ``Application`` are passed to the - container as environment variables. - """ - expected_variables = frozenset({ - 'ALIAS_PORT_80_TCP': 'tcp://localhost:8080', - 'ALIAS_PORT_80_TCP_PROTO': 'tcp', - 'ALIAS_PORT_80_TCP_ADDR': 'localhost', - 'ALIAS_PORT_80_TCP_PORT': '8080', - }.items()) - - volume_service = create_volume_service(self) - - docker_dir = FilePath(__file__).sibling('env-docker') - image = DockerImageBuilder(test=self, source_dir=docker_dir) - d = image.build() - - def image_built(image_name): - application_name = random_name(self) - - docker_client = DockerClient() - self.addCleanup(docker_client.remove, application_name) - - deployer = P2PNodeDeployer( - u"localhost", volume_service, docker_client, - node_uuid=uuid4()) - - link = Link(alias=u"alias", - local_port=80, - remote_port=8080) - - dataset = Dataset( - dataset_id=unicode(uuid4()), - metadata=pmap({"name": application_name})) - manifestation = Manifestation(dataset=dataset, primary=True) - desired_state = Deployment(nodes=frozenset([ - Node(uuid=deployer.node_uuid, - applications=frozenset([Application( - name=application_name, - image=DockerImage.from_string( - image_name), - links=frozenset([link]), - volume=AttachedVolume( - manifestation=manifestation, - mountpoint=FilePath('/data'), - ), - )]), - manifestations={ - manifestation.dataset_id: manifestation})])) - - return change_node_state(deployer, desired_state) - - d.addCallback(image_built) - d.addCallback(lambda _: volume_service.enumerate()) - d.addCallback(lambda volumes: - list(volumes)[0].get_filesystem().get_path().child( - b'env')) - - def got_result_path(result_path): - d = loop_until(reactor, result_path.exists) - d.addCallback(lambda _: result_path) - return d - d.addCallback(got_result_path) - - def started(result_path): - contents = result_path.getContent() - - assertContainsAll( - haystack=contents, - test_case=self, - needles=['{}={}\n'.format(k, v) - for k, v in expected_variables]) - d.addCallback(started) - return d - - def _start_container_for_introspection(self, **kwargs): - """ - Configure and deploy a busybox container with the given options. - - :param **kwargs: Additional arguments to pass to - ``Application.__init__``. - - :return: ``Deferred`` that fires after convergence loop has been - run with the state_changes results of state discovery. - """ - application_name = random_name(self) - docker_client = DockerClient() - self.addCleanup(docker_client.remove, application_name) - - deployer = ApplicationNodeDeployer( - u"localhost", docker_client, - node_uuid=uuid4()) - - application = Application( - name=application_name, - image=DockerImage.from_string(u"busybox"), - **kwargs) - desired_configuration = Deployment(nodes=[ - Node(uuid=deployer.node_uuid, - applications=[application])]) - d = change_node_state(deployer, desired_configuration) - d.addCallback(lambda _: deployer.discover_state( - DeploymentState(nodes={ - NodeState(hostname=deployer.hostname, uuid=deployer.node_uuid, - applications=[], - manifestations={}, paths={}, devices={}), - }), - persistent_state=PersistentState(), - )) - return d - - @if_docker_configured - def test_links_lowercase(self): - """ - Lower-cased link aliases do not result in lack of covergence. - - Environment variables introspected by the Docker client for links - are all upper-case, a source of potential problems in detecting - the state. - """ - link = Link(alias=u"alias", - local_port=80, - remote_port=8080) - d = self._start_container_for_introspection( - links=[link], - command_line=[u"nc", u"-l", u"-p", u"8080"]) - d.addCallback( - lambda results: self.assertIn( - pset([link]), - [app.links for app in results.node_state.applications.values()] - )) - return d - - @if_docker_configured - def test_command_line_introspection(self): - """ - Checking the command-line status results in same command-line we - passed in. - """ - command_line = pvector([u"nc", u"-l", u"-p", u"8080"]) - d = self._start_container_for_introspection(command_line=command_line) - d.addCallback( - lambda results: self.assertIn( - command_line, - [app.command_line for app in - results.node_state.applications.values()])) - return d - - @if_docker_configured - def test_memory_limit(self): - """ - The memory limit number specified in an ``Application`` is passed to - the container. - """ - EXPECTED_MEMORY_LIMIT = 100000000 - image = DockerImage.from_string(u"openshift/busybox-http-app") - - application_name = random_name(self) - - docker_client = DockerClient() - self.addCleanup(docker_client.remove, application_name) - - deployer = ApplicationNodeDeployer( - u"localhost", docker_client, - node_uuid=uuid4()) - - desired_state = Deployment(nodes=frozenset([ - Node(uuid=deployer.node_uuid, - applications=frozenset([Application( - name=application_name, - image=image, - memory_limit=EXPECTED_MEMORY_LIMIT - )]))])) - - d = change_node_state(deployer, desired_state) - d.addCallback(lambda _: wait_for_unit_state( - reactor, - docker_client, - application_name, - [u'active']) - ) - - def inspect_application(_): - deferred_list = docker_client.list() - - def app_memory(unit): - self.assertEqual(unit.mem_limit, EXPECTED_MEMORY_LIMIT) - - deferred_list.addCallback(find_unit, application_name) - deferred_list.addCallback(app_memory) - return deferred_list - d.addCallback(inspect_application) - return d - - @flaky(u'FLOC-3330') - @if_docker_configured - def test_cpu_shares(self): - """ - The CPU shares number specified in an ``Application`` is passed to the - container. - """ - EXPECTED_CPU_SHARES = 512 - - image = DockerImage.from_string(u"openshift/busybox-http-app") - - application_name = random_name(self) - - docker_client = DockerClient() - self.addCleanup(docker_client.remove, application_name) - - deployer = ApplicationNodeDeployer( - u"localhost", docker_client, - node_uuid=uuid4()) - - desired_state = Deployment(nodes=frozenset([ - Node(uuid=deployer.node_uuid, - applications=frozenset([Application( - name=application_name, - image=image, - cpu_shares=EXPECTED_CPU_SHARES - )]))])) - - d = change_node_state(deployer, desired_state) - d.addCallback(lambda _: wait_for_unit_state( - reactor, - docker_client, - application_name, - [u'active']) - ) - - def inspect_application(_): - deferred_list = docker_client.list() - - def app_cpu_shares(unit): - self.assertEqual(unit.cpu_shares, EXPECTED_CPU_SHARES) - - deferred_list.addCallback(find_unit, application_name) - deferred_list.addCallback(app_cpu_shares) - return deferred_list - d.addCallback(inspect_application) - return d diff --git a/flocker/node/functional/test_docker.py b/flocker/node/functional/test_docker.py deleted file mode 100644 index 96398a90ed..0000000000 --- a/flocker/node/functional/test_docker.py +++ /dev/null @@ -1,1328 +0,0 @@ -# Copyright ClusterHQ Inc. See LICENSE file for details. - -""" -Functional tests for :module:`flocker.node._docker`. -""" - -from __future__ import absolute_import - -from datetime import timedelta -from functools import partial -import time -import socket - -from eliot.testing import capture_logging, assertHasMessage - -from requests.exceptions import ReadTimeout -from docker.errors import APIError - -from twisted.python.monkey import MonkeyPatcher -from twisted.python.filepath import FilePath -from twisted.internet import reactor -from twisted.internet.defer import succeed, gatherResults -from twisted.internet.error import ConnectionRefusedError -from twisted.web.client import ResponseNeverReceived - -from treq import request, content - -from pyrsistent import PClass, pvector, field - -from ...common import loop_until -from ...testtools import ( - find_free_port, flaky, DockerImageBuilder, assertContainsAll, - random_name, - async_runner, TestCase, AsyncTestCase, -) - -from ..test.test_docker import ANY_IMAGE, make_idockerclient_tests -from .._docker import ( - DockerClient, PortMap, Environment, NamespacedDockerClient, - BASE_NAMESPACE, Volume, AddressInUse, make_response, - LOG_CACHED_IMAGE, dockerpy_client, -) -from ...control import ( - RestartNever, RestartAlways, RestartOnFailure, DockerImage -) -from ..testtools import ( - if_docker_configured, wait_for_unit_state, require_docker_version, - add_with_port_collision_retry, -) - - -def namespace_for_test(test_case): - return u"ns-" + random_name(test_case) - - -class IDockerClientTests(make_idockerclient_tests( - lambda test_case: DockerClient( - namespace=namespace_for_test(test_case) - ), -)): - """ - ``IDockerClient`` tests for ``DockerClient``. - """ - @if_docker_configured - def setUp(self): - super(IDockerClientTests, self).setUp() - - -class IDockerClientNamespacedTests(make_idockerclient_tests( - lambda test_case: NamespacedDockerClient( - namespace=namespace_for_test(test_case) - ) -)): - """ - ``IDockerClient`` tests for ``NamespacedDockerClient``. - """ - @if_docker_configured - def setUp(self): - super(IDockerClientNamespacedTests, self).setUp() - - @flaky([u'FLOC-2628', u'FLOC-2874']) - def test_added_is_listed(self): - return super(IDockerClientNamespacedTests, self).test_added_is_listed() - - -class Registry(PClass): - """ - Describe a Docker image registry. - - :ivar host: The IP address on which the registry is listening. - :ivar port: The port number on which the registry is listening. - :ivar name: The name of the container in which the registry is running. - """ - host = field(mandatory=True, type=bytes, initial=b"127.0.0.1") - port = field(mandatory=True, type=int) - name = field(mandatory=True, type=unicode) - - @property - def repository(self): - """ - The string to use as an image name prefix to direct Docker to find that - image in this registry instead of the default. - """ - return "{host}:{port}".format(host=self.host, port=self.port) - - -class GenericDockerClientTests(AsyncTestCase): - """ - Functional tests for ``DockerClient`` and other clients that talk to - real Docker. - """ - clientException = APIError - - # FLOC-3935: These tests (and the ones in NamespacedDockerClientTests) are - # often timing out, sometimes in weird ways that cause interference with - # other tests. Until we can identify the cause, effectively disable - # timeouts on these tests and rely on the Jenkins timeout (or the limited - # patience of developers) to ensure they halt. - run_tests_with = async_runner(timeout=timedelta(hours=1)) - - @if_docker_configured - def setUp(self): - super(GenericDockerClientTests, self).setUp() - self.namespacing_prefix = namespace_for_test(self) - - def make_client(self): - return DockerClient(namespace=self.namespacing_prefix) - - def create_container(self, client, name, image): - """ - Create (but don't start) a container via the supplied client. - - :param DockerClient client: The Docker API client. - :param unicode name: The container name. - :param unicode image: The image name. - """ - container_name = client._to_container_name(name) - client._client.create_container( - name=container_name, image=image) - - def start_container(self, unit_name, - image_name=u"openshift/busybox-http-app:latest", - ports=None, expected_states=(u'active',), - environment=None, volumes=(), - mem_limit=None, cpu_shares=None, - restart_policy=RestartNever(), - command_line=None, - retry_on_port_collision=False): - """ - Start a unit and wait until it reaches the `active` state or the - supplied `expected_state`. - - :param unicode unit_name: See ``IDockerClient.add``. - :param unicode image_name: See ``IDockerClient.add``. - :param list ports: See ``IDockerClient.add``. - :param expected_states: A sequence of activation states to wait for. - :param environment: See ``IDockerClient.add``. - :param volumes: See ``IDockerClient.add``. - :param mem_limit: See ``IDockerClient.add``. - :param cpu_shares: See ``IDockerClient.add``. - :param restart_policy: See ``IDockerClient.add``. - :param command_line: See ``IDockerClient.add``. - - :return: ``Deferred`` that fires with the ``DockerClient`` when - the unit reaches the expected state. - """ - client = self.make_client() - - if retry_on_port_collision: - add = partial(add_with_port_collision_retry, client) - else: - add = client.add - - d = add( - unit_name=unit_name, - image_name=image_name, - ports=ports, - environment=environment, - volumes=volumes, - mem_limit=mem_limit, - cpu_shares=cpu_shares, - restart_policy=restart_policy, - command_line=command_line, - ) - self.addCleanup(client.remove, unit_name) - - d.addCallback(lambda _: wait_for_unit_state(reactor, client, unit_name, - expected_states)) - d.addCallback(lambda _: client) - - return d - - def test_custom_base_url_tcp_http(self): - """ - ``DockerClient`` instantiated with a custom base URL for a TCP - connection has a client HTTP url after the connection is made. - """ - client = DockerClient(base_url=b"tcp://127.0.0.1:2375") - self.assertEqual(client._client.base_url, b"http://127.0.0.1:2375") - - def test_add_starts_container(self): - """ - ``DockerClient.add`` starts the container. - """ - name = random_name(self) - return self.start_container(name) - - def test_correct_image_used(self): - """ - ``DockerClient.add`` creates a container with the specified image. - """ - image_name = u"openshift/busybox-http-app:latest" - name = random_name(self) - d = self.start_container(name, image_name=image_name) - - def started(_): - docker = dockerpy_client() - data = docker.inspect_container(self.namespacing_prefix + name) - self.assertEqual( - image_name, - data[u"Config"][u"Image"], - ) - d.addCallback(started) - return d - - @capture_logging(assertHasMessage, LOG_CACHED_IMAGE) - def test_list_image_data_cached(self, logger): - """ - ``DockerClient.list`` will only an inspect an image ID once, caching - the resulting data. - """ - name = random_name(self) - d = self.start_container(name, image_name=ANY_IMAGE) - - def started(client): - listing = client.list() - - def listed(_): - class FakeAPIError(Exception): - pass - - def fake_inspect_image(image): - raise FakeAPIError( - "Tried to inspect image {} twice.".format(image)) - # This is kind of nasty, but NamespacedDockerClient represents - # its client via a proxying attribute. - if isinstance(client, NamespacedDockerClient): - docker_client = client._client._client - else: - docker_client = client._client - self.patch(docker_client, "inspect_image", fake_inspect_image) - # If image is not retrieved from the cache, list() here will - # attempt to call inspect_image again, resulting in a call to - # the fake_inspect_image function that will raise an exception. - cached_listing = client.list() - cached_listing.addCallback(lambda _: None) - return cached_listing - - listing.addCallback(listed) - return listing - - d.addCallback(started) - return d - - @require_docker_version( - '1.6.0', - 'This test uses the registry:2 image ' - 'which requires Docker-1.6.0 or newer. ' - 'See https://docs.docker.com/registry/deploying/ for details.' - ) - def test_private_registry_image(self): - """ - ``DockerClient.add`` can start containers based on an image from a - private registry. - - A private registry is started in a container according to the - instructions at: - * https://docs.docker.com/registry/deploying/ - - An image is pushed to that private registry and then a Flocker - application is started that uses that private repository image name. - - Docker can pull from a private registry without any TLS configuration - as long as it's running on the local host. - """ - registry_listening = self.run_registry() - - def tag_and_push_image(registry): - client = dockerpy_client() - image_name = ANY_IMAGE - # The image will normally have been pre-pulled on build slaves, but - # may not already be available when running tests locally. - client.pull(image_name) - - registry_image = self.push_to_registry(image_name, registry) - - # And the image will (hopefully) have been downloaded again from - # the private registry in the next step, so cleanup that local - # image once the test finishes. - self.addCleanup( - client.remove_image, - image=registry_image.full_name - ) - - return registry_image - - pushing_image = registry_listening.addCallback(tag_and_push_image) - - def start_registry_image(registry_image): - return self.start_container( - unit_name=random_name(self), - image_name=registry_image.full_name, - ) - starting_registry_image = pushing_image.addCallback( - start_registry_image - ) - return starting_registry_image - - def test_add_error(self): - """ - ``DockerClient.add`` returns a ``Deferred`` that errbacks with - ``APIError`` if response code is not a success response code. - """ - client = self.make_client() - # add() calls exists(), and we don't want exists() to be the one - # failing since that's not the code path we're testing, so bypass - # it: - client.exists = lambda _: succeed(False) - # Illegal container name should make Docker complain when we try to - # install the container: - d = client.add(u"!!!###!!!", u"busybox:latest") - return self.assertFailure(d, self.clientException) - - def test_dead_is_listed(self): - """ - ``DockerClient.list()`` includes dead units. - - We use a `busybox` image here, because it will exit immediately and - reach an `inactive` substate of `dead`. - - There are no assertions in this test, because it will fail with a - timeout if the unit with that expected state is never listed or if that - unit never reaches that state. - """ - name = random_name(self) - d = self.start_container(unit_name=name, image_name="busybox:latest", - expected_states=(u'inactive',)) - return d - - def test_list_with_missing_image(self): - """ - ``DockerClient.list()`` can list containers whose image is missing. - - The resulting output may be inaccurate, but that's OK: this only - happens for non-running containers, who at worst we're going to - restart anyway. - """ - path = FilePath(self.mktemp()) - path.makedirs() - path.child(b"Dockerfile.in").setContent( - b"FROM busybox\nCMD /bin/true\n") - builder = DockerImageBuilder(test=self, source_dir=path, cleanup=False) - d = builder.build() - - def image_built(image_name): - name = random_name(self) - d = self.start_container( - unit_name=name, image_name=image_name, - expected_states=(u'inactive',)) - return d.addCallback(lambda ignored: (name, image_name)) - d.addCallback(image_built) - - def stopped_container_exists((name, image_name)): - # Remove the image: - docker_client = dockerpy_client() - docker_client.remove_image(image_name, force=True) - - # Should be able to still list the container: - client = self.make_client() - listed = client.list() - listed.addCallback(lambda results: self.assertIn( - (name, "inactive"), - [(unit.name, unit.activation_state) for unit in results])) - return listed - d.addCallback(stopped_container_exists) - - return d - - def test_dead_is_removed(self): - """ - ``DockerClient.remove()`` removes dead units without error. - - We use a `busybox` image here, because it will exit immediately and - reach an `inactive` substate of `dead`. - """ - name = random_name(self) - d = self.start_container(unit_name=name, image_name="busybox:latest", - expected_states=(u'inactive',)) - - def remove_container(client): - client.remove(name) - d.addCallback(remove_container) - return d - - def request_until_response(self, port): - """ - Resend a test HTTP request until a response is received. - - The container may have started, but the webserver inside may take a - little while to start serving requests. - - :param int port: The localhost port to which an HTTP request will be - sent. - - :return: A ``Deferred`` which fires with the result of the first - successful HTTP request. - """ - def send_request(): - """ - Send an HTTP request in a loop until the request is answered. - """ - response = request( - b"GET", b"http://127.0.0.1:%d" % (port,), - persistent=False) - - def check_error(failure): - """ - Catch ConnectionRefused errors and response timeouts and return - False so that loop_until repeats the request. - - Other error conditions will be passed down the errback chain. - """ - failure.trap(ConnectionRefusedError, ResponseNeverReceived) - return False - response.addErrback(check_error) - return response - - return loop_until(reactor, send_request) - - def test_non_docker_port_collision(self): - """ - ``DockerClient.add`` returns a ``Deferred`` that fails with - ``AddressInUse`` if the external port of one of the ``PortMap`` - instances passed for ``ports`` is already in use on the system by - something other than a Docker container. - """ - address_user = socket.socket() - self.addCleanup(address_user.close) - - address_user.bind(('', 0)) - used_address = address_user.getsockname() - - name = random_name(self) - d = self.start_container( - name, ports=[ - PortMap(internal_port=10000, external_port=used_address[1]), - ], - ) - return self.assertFailure(d, AddressInUse) - - def test_add_with_port(self): - """ - ``DockerClient.add`` accepts a ports argument which is passed to - Docker to expose those ports on the unit. - - Assert that the busybox-http-app returns the expected "Hello world!" - response. - - XXX: We should use a stable internal container instead. See - https://clusterhq.atlassian.net/browse/FLOC-120 - - XXX: The busybox-http-app returns headers in the body of its response, - hence this over complicated custom assertion. See - https://github.com/openshift/geard/issues/213 - """ - expected_response = b'Hello world!\n' - external_port = find_free_port()[1] - name = random_name(self) - d = self.start_container( - name, ports=[PortMap(internal_port=8080, - external_port=external_port)], - retry_on_port_collision=True, - ) - - d.addCallback( - lambda ignored: self.request_until_response(external_port)) - - def started(response): - d = content(response) - d.addCallback(lambda body: self.assertIn(expected_response, body)) - return d - d.addCallback(started) - return d - - def test_add_with_environment(self): - """ - ``DockerClient.add`` accepts an environment object whose ID and - variables are used when starting a docker image. - """ - docker_dir = FilePath(self.mktemp()) - docker_dir.makedirs() - docker_dir.child(b"Dockerfile").setContent( - b'FROM busybox\n' - b'CMD ["/bin/sh", "-c", ' - b'"while true; do env && echo WOOT && sleep 1; done"]' - ) - expected_variables = frozenset({ - 'key1': 'value1', - 'key2': 'value2', - }.items()) - unit_name = random_name(self) - - image = DockerImageBuilder(test=self, source_dir=docker_dir) - d = image.build() - - def image_built(image_name): - return self.start_container( - unit_name=unit_name, - image_name=image_name, - environment=Environment(variables=expected_variables), - ) - d.addCallback(image_built) - - def started(_): - output = "" - client = dockerpy_client() - while True: - output += client.logs(self.namespacing_prefix + unit_name) - if "WOOT" in output: - break - assertContainsAll( - output, test_case=self, - needles=['{}={}\n'.format(k, v) - for k, v in expected_variables], - ) - d.addCallback(started) - return d - - @flaky(u"FLOC-3875") - def test_pull_image_if_necessary(self): - """ - The Docker image is pulled if it is unavailable locally. - """ - client = dockerpy_client() - - path = FilePath(self.mktemp()) - path.makedirs() - path.child(b"Dockerfile.in").setContent( - b"FROM busybox\n" - b"CMD /bin/true\n" - ) - builder = DockerImageBuilder( - test=self, source_dir=path, - # We're going to manipulate the various tags on the image ourselves - # in this test. We'll do (the slightly more complicated) cleanup - # so the builder shouldn't (and will encounter errors if we let - # it). - cleanup=False, - ) - building = builder.build() - registry_listening = self.run_registry() - - def create_container((image_name, registry)): - registry_image = self.push_to_registry(image_name, registry) - - # And the image will (hopefully) have been downloaded again from - # the private registry in the next step, so cleanup that local - # image once the test finishes. - self.addCleanup( - client.remove_image, - image=registry_image.full_name - ) - - name = random_name(self) - docker_client = self.make_client() - self.addCleanup(docker_client.remove, name) - d = docker_client.add(name, registry_image.full_name) - d.addCallback( - lambda _: self.assertTrue( - client.inspect_image(registry_image.full_name) - ) - ) - return d - - d = gatherResults((building, registry_listening)) - d.addCallback(create_container) - return d - - def push_to_registry(self, image_name, registry): - """ - Push an image identified by a local tag to the given registry. - - :param unicode image_name: The local tag which identifies the image to - push. - :param Registry registry: The registry to which to push the image. - - :return: A ``DockerImage`` describing the image in the registry. Note - in particular the tag of the image in the registry will differ from - the local tag of the image. - """ - registry_name = random_name(self).lower() - registry_image = DockerImage( - # XXX: See FLOC-246 for followup improvements to - # ``flocker.control.DockerImage`` to allow parsing of alternative - # registry hostnames and ports. - repository=registry.repository + '/' + registry_name, - tag='latest', - ) - client = dockerpy_client() - - # Tag an image with a repository name matching the given registry. - client.tag( - image=image_name, repository=registry_image.repository, - tag=registry_image.tag, - ) - try: - client.push( - repository=registry_image.repository, - tag=registry_image.tag, - ) - finally: - # Remove the tag created above to make it possible to do the push. - client.remove_image(image=registry_image.full_name) - - return registry_image - - def run_registry(self): - """ - Start a registry in a container. - - The registry will be stopped and destroyed when the currently running - test finishes. - - :return: A ``Registry`` describing the registry which was started. - """ - registry_name = random_name(self) - registry_starting = self.start_container( - unit_name=registry_name, - image_name='registry:2', - ports=[ - PortMap( - internal_port=5000, - # Doesn't matter what port we expose this on. We'll - # discover what was assigned later. - external_port=0, - ), - ], - retry_on_port_collision=True, - ) - - def extract_listening_port(client): - listing = client.list() - - def listed(apps): - [app] = [app for app in apps if app.name == registry_name] - return next(iter(app.ports)).external_port - listing.addCallback(listed) - return listing - - registry_starting.addCallback(extract_listening_port) - - def wait_for_listening(external_port): - registry = Registry( - name=registry_name, port=external_port, - ) - registry_listening = self.request_until_response(registry.port) - registry_listening.addCallback(lambda ignored: registry) - return registry_listening - - registry_starting.addCallback(wait_for_listening) - - return registry_starting - - def _pull_timeout(self): - """ - Attempt to start an application using an image which must be pulled - from a registry but don't give the pull operation enough time to - complete. Assert that the result is a timeout error of some kind. - - :return: A ``Deferred`` firing with a two-tuple of a ``DockerImage`` - and a ``Registry``. The former represents the image we attempted - to use, the latter represents the registry we should have tried to - pull it from. - """ - client = dockerpy_client() - - # Run a local registry - running = self.run_registry() - - # Build a stub image - def build_dummy_image(registry): - path = FilePath(self.mktemp()) - path.makedirs() - path.child(b"Dockerfile.in").setContent( - b"FROM busybox\n" - b"CMD /bin/true\n" - ) - builder = DockerImageBuilder( - test=self, source_dir=path, - # We're going to manipulate the various tags on the image - # ourselves in this test. We'll do (the slightly more - # complicated) cleanup so the builder shouldn't (and will - # encounter errors if we let it). - cleanup=False, - ) - building = builder.build() - building.addCallback(lambda image_name: (image_name, registry)) - return building - running.addCallback(build_dummy_image) - - def cleanup_image(image_name): - for image in client.images(): - if image_name in image["RepoTags"]: - client.remove_image(image_name, force=True) - return - - def cleanup_registry(registry): - try: - client.unpause(self.namespacing_prefix + registry.name) - except APIError: - # Already unpaused - pass - - def setup_image((image_name, registry)): - registry_image = self.push_to_registry(image_name, registry) - - # The image shouldn't be downloaded during the run of this test. - # In case something goes wrong and it is downloaded, though, clean - # it up. - self.addCleanup(cleanup_image, image_name) - - # Pause the registry - client.pause(self.namespacing_prefix + registry.name) - - # Cannot stop paused containers to make sure it gets unpaused. - self.addCleanup(cleanup_registry, registry) - - # Create a DockerClient with a very short timeout - docker_client = DockerClient( - namespace=self.namespacing_prefix, long_timeout=1, - ) - # Add an application using the DockerClient, using the tag from the - # local registry - app_name = random_name(self) - d = docker_client.add(app_name, registry_image.full_name) - - # Assert that the timeout triggers. - # - # requests has a TimeoutError but timeout raises a ConnectionError. - # https://github.com/kennethreitz/requests/issues/2620 - # - # XXX DockerClient.add is our API. We could make it fail with a - # more coherent exception type if we wanted. - self.assertFailure(d, ReadTimeout) - d.addCallback(lambda ignored: (registry_image, registry)) - return d - running.addCallback(setup_image) - return running - - def test_pull_timeout(self): - """ - Pulling an image times-out if it takes longer than a provided timeout. - """ - return self._pull_timeout() - - def test_pull_timeout_pull(self): - """ - Image pull timeout does not affect subsequent pulls. - """ - # Note, this is the same image as test_pull_image_if_necessary, but - # they run at different times. Probably room for some refactoring to - # remove the duplication between them. - - # Run all of the code from test_pull_timeout - timing_out = self._pull_timeout() - - def pull_successfully((registry_image, registry)): - client = dockerpy_client() - # Resume the registry - client.unpause(self.namespacing_prefix + registry.name) - - # Create a DockerClient with the default timeout - docker_client = DockerClient(namespace=self.namespacing_prefix) - - # Add an application using the Client, using the tag from the local - # registry - app_name = random_name(self) - adding = docker_client.add(app_name, registry_image.full_name) - - # Assert that the application runs - return adding - timing_out.addCallback(pull_successfully) - return timing_out - - def test_namespacing(self): - """ - Containers are created with a namespace prefixed to their container - name. - """ - docker = dockerpy_client() - name = random_name(self) - client = self.make_client() - self.addCleanup(client.remove, name) - d = client.add(name, u"busybox:latest") - - def added(_): - self.assertTrue( - docker.inspect_container(self.namespacing_prefix + name)) - d.addCallback(added) - return d - - def test_null_environment(self): - """ - A container that does not include any environment variables contains - an empty ``environment`` in the return ``Unit``. - """ - docker_dir = FilePath(self.mktemp()) - docker_dir.makedirs() - docker_dir.child(b"Dockerfile").setContent( - b'FROM scratch\n' - b'MAINTAINER info@clusterhq.com\n' - b'CMD ["/bin/doesnotexist"]' - ) - name = random_name(self) - image = DockerImageBuilder(test=self, source_dir=docker_dir) - d = image.build() - - def image_built(image_name): - client = self.make_client() - self.create_container(client, name, image_name) - self.addCleanup(client.remove, name) - return client.list() - d.addCallback(image_built) - - def got_list(units): - unit = [unit for unit in units if unit.name == name][0] - self.assertIsNone(unit.environment) - d.addCallback(got_list) - return d - - def test_container_name(self): - """ - The container name stored on returned ``Unit`` instances matches the - expected container name. - """ - client = self.make_client() - name = random_name(self) - self.addCleanup(client.remove, name) - d = client.add(name, u"busybox:latest") - d.addCallback(lambda _: client.list()) - - def got_list(units): - unit = [unit for unit in units if unit.name == name][0] - self.assertEqual(unit.container_name, - self.namespacing_prefix + name) - d.addCallback(got_list) - return d - - def test_empty_environment(self): - """ - When a container with no custom environment variables is launched via - ``DockerClient.add`` the environment in the resulting ``Unit`` returned - from ``DockerClient.list`` will ignore the default HOME and PATH - environment variables, leaving the ``Unit`` with an Environment of - None. - """ - name = random_name(self) - d = self.start_container(name) - - def started(client): - deferred_units = client.list() - - def check_units(units): - unit = [unit for unit in units if unit.name == name][0] - self.assertIsNone(unit.environment) - - deferred_units.addCallback(check_units) - d.addCallback(started) - return d - - def test_list_only_custom_environment(self): - """ - When a container containing custom environment variables is launched - and the image used also injects environment variables, only the custom - variables we injected are returned by ``DockerClient.list``, whereas - variables set by the image are discarded. - - All Docker containers have a PATH environment variable. In addition, - the openshift/busybox-http-app image contains an STI_SCRIPTS_URL - environment variable. These are therefore disregarded the variables - disregarded in this test, whereas our custom environment is listed in - the returned Units. - - https://registry.hub.docker.com/u/openshift/busybox-http/dockerfile/ - """ - name = random_name(self) - environment = { - 'my_variable': 'some value', - 'another_variable': '12345' - } - environment = frozenset(environment.items()) - d = self.start_container( - name, - environment=Environment(variables=environment) - ) - - def started(client): - deferred_units = client.list() - - def check_units(units): - unit = [unit for unit in units if unit.name == name][0] - expected = Environment(variables=environment) - self.assertEqual(unit.environment, expected) - - deferred_units.addCallback(check_units) - - d.addCallback(started) - return d - - def test_add_with_volumes(self): - """ - ``DockerClient.add`` accepts a list of ``Volume`` instances which are - mounted within the container. - """ - docker_dir = FilePath(self.mktemp()) - docker_dir.makedirs() - docker_dir.child(b"Dockerfile").setContent( - b'FROM busybox\n' - b'CMD ["/bin/sh", "-c", ' - b'"touch /mnt1/a; touch /mnt2/b"]' - ) - image = DockerImageBuilder(test=self, source_dir=docker_dir) - d = image.build() - - def image_built(image_name): - unit_name = random_name(self) - - path1 = FilePath(self.mktemp()) - path1.makedirs() - path2 = FilePath(self.mktemp()) - path2.makedirs() - - d = self.start_container( - unit_name=unit_name, - image_name=image_name, - volumes=[ - Volume(node_path=path1, container_path=FilePath(b"/mnt1")), - Volume( - node_path=path2, container_path=FilePath(b"/mnt2"))], - expected_states=(u'inactive',), - ) - return d.addCallback(lambda _: (path1, path2)) - d.addCallback(image_built) - - def started((path1, path2)): - expected1 = path1.child(b"a") - expected2 = path2.child(b"b") - for _ in range(100): - if expected1.exists() and expected2.exists(): - return - else: - time.sleep(0.1) - self.fail("Files never created.") - return d.addCallback(started) - - def test_add_with_memory_limit(self): - """ - ``DockerClient.add`` accepts an integer mem_limit parameter which is - passed to Docker when creating a container as the maximum amount of RAM - available to that container. - """ - MEMORY_100MB = 100000000 - name = random_name(self) - d = self.start_container(name, mem_limit=MEMORY_100MB) - - def started(_): - docker = dockerpy_client() - data = docker.inspect_container(self.namespacing_prefix + name) - self.assertEqual(data[u"Config"][u"Memory"], - MEMORY_100MB) - d.addCallback(started) - return d - - def test_add_with_cpu_shares(self): - """ - ``DockerClient.add`` accepts an integer cpu_shares parameter which is - passed to Docker when creating a container as the CPU shares weight - for that container. This is a relative weight for CPU time versus other - containers and does not directly constrain CPU usage, i.e. a CPU share - constrained container can still use 100% CPU if other containers are - idle. Default shares when unspecified is 1024. - """ - name = random_name(self) - d = self.start_container(name, cpu_shares=512) - - def started(_): - docker = dockerpy_client() - data = docker.inspect_container(self.namespacing_prefix + name) - self.assertEqual(data[u"Config"][u"CpuShares"], 512) - d.addCallback(started) - return d - - def test_add_without_cpu_or_mem_limits(self): - """ - ``DockerClient.add`` when creating a container with no mem_limit or - cpu_shares specified will create a container without these resource - limits, returning integer 0 as the values for Memory and CpuShares from - its API when inspecting such a container. - """ - name = random_name(self) - d = self.start_container(name) - - def started(_): - docker = dockerpy_client() - data = docker.inspect_container(self.namespacing_prefix + name) - self.assertEqual(data[u"Config"][u"Memory"], 0) - self.assertEqual(data[u"Config"][u"CpuShares"], 0) - d.addCallback(started) - return d - - def start_restart_policy_container(self, mode, restart_policy): - """ - Start a container for testing restart policies. - - :param unicode mode: Mode of container. One of - - ``"failure"``: The container will always exit with a failure. - - ``"success-then-sleep"``: The container will exit with success - once, then sleep forever. - - ``"failure-then-sucess"``: The container will exit with failure - once, then with failure. - :param IRestartPolicy restart_policy: The restart policy to use for - the container. - - :returns Deferred: A deferred that fires with the number of times the - container was started. - """ - docker_dir = FilePath(__file__).sibling('retry-docker') - name = random_name(self) - data = FilePath(self.mktemp()) - data.makedirs() - count = data.child('count') - count.setContent("0") - marker = data.child('marker') - - image = DockerImageBuilder(test=self, source_dir=docker_dir) - d = image.build() - - def image_built(image_name): - if mode == u"success-then-sleep": - expected_states = (u'active',) - else: - expected_states = (u'inactive',) - - return self.start_container( - name, image_name=image_name, - restart_policy=restart_policy, - environment=Environment(variables={u'mode': mode}), - volumes=[ - Volume(node_path=data, container_path=FilePath(b"/data"))], - expected_states=expected_states) - d.addCallback(image_built) - - if mode == u"success-then-sleep": - # TODO: if the `run` script fails for any reason, - # then this will loop forever. - - d.addCallback(lambda ignored: loop_until(reactor, marker.exists)) - - d.addCallback(lambda ignored: count.getContent()) - return d - - def test_restart_policy_never(self): - """ - An container with a restart policy of never isn't restarted - after it exits. - """ - d = self.start_restart_policy_container( - mode=u"failure", restart_policy=RestartNever()) - - d.addCallback(self.assertEqual, "1") - return d - - @flaky(u'FLOC-2840') - def test_restart_policy_always(self): - """ - An container with a restart policy of always is restarted - after it exits. - """ - d = self.start_restart_policy_container( - mode=u"success-then-sleep", restart_policy=RestartAlways()) - - d.addCallback(self.assertEqual, "2") - return d - - @flaky([u'FLOC-3742', u'FLOC-3746']) - def test_restart_policy_on_failure(self): - """ - An container with a restart policy of on-failure is restarted - after it exits with a non-zero result. - """ - d = self.start_restart_policy_container( - mode=u"failure-then-success", restart_policy=RestartOnFailure()) - - d.addCallback(self.assertEqual, "2") - return d - - @flaky([u'FLOC-3742', u'FLOC-3746']) - def test_restart_policy_on_failure_maximum_count(self): - """ - A container with a restart policy of on-failure and a maximum - retry count is not restarted if it fails as many times than the - specified maximum. - """ - d = self.start_restart_policy_container( - mode=u"failure", - restart_policy=RestartOnFailure(maximum_retry_count=5)) - - # A Docker change e721ed9b5319e8e7c1daf87c34690f8a4e62c9e3 means that - # this value depends on the version of Docker. - d.addCallback(self.assertIn, ("5", "6")) - return d - - def test_command_line(self): - """ - A container with custom command line is run with those arguments. - """ - external_port = find_free_port()[1] - name = random_name(self) - d = self.start_container( - name, image_name=u"busybox", - # Pass in pvector since this likely to be what caller actually - # passes in: - command_line=pvector([u"sh", u"-c", u"""\ -echo -n '#!/bin/sh -echo -n "HTTP/1.1 200 OK\r\n\r\nhi" -' > /tmp/script.sh; -chmod +x /tmp/script.sh; -nc -ll -p 8080 -e /tmp/script.sh -"""]), - ports=[PortMap(internal_port=8080, - external_port=external_port)]) - - d.addCallback( - lambda ignored: self.request_until_response(external_port)) - - def started(response): - d = content(response) - d.addCallback(lambda body: self.assertEqual(b"hi", body)) - return d - d.addCallback(started) - return d - - -class MakeResponseTests(TestCase): - """ - Tests for ``make_response``. - """ - def test_str(self): - """ - ``str(make_response(...))`` returns a string giving the response code. - """ - self.assertEqual( - str(make_response(123, "Something")), - "", - ) - - def test_apierror_str(self): - """ - A string representation can be constructed of an ``APIError`` - constructed with the response returned by ``make_response``. - """ - self.assertEqual( - str(APIError("", make_response(500, "Simulated server error"))), - "500 Server Error: Simulated server error", - ) - - -class DockerClientTests(AsyncTestCase): - """ - Tests for ``DockerClient`` specifically. - """ - @if_docker_configured - def setUp(self): - super(DockerClientTests, self).setUp() - - def test_default_namespace(self): - """ - The default namespace is `u"flocker--"`. - """ - docker = dockerpy_client() - name = random_name(self) - client = DockerClient() - self.addCleanup(client.remove, name) - d = client.add(name, u"busybox:latest") - d.addCallback(lambda _: self.assertTrue( - docker.inspect_container(u"flocker--" + name))) - return d - - def test_list_removed_containers(self): - """ - ``DockerClient.list`` does not list containers which are removed, - during its operation, from another thread. - """ - patcher = MonkeyPatcher() - - namespace = namespace_for_test(self) - flocker_docker_client = DockerClient(namespace=namespace) - - name1 = random_name(self) - adding_unit1 = flocker_docker_client.add(name1, ANY_IMAGE) - self.addCleanup(flocker_docker_client.remove, name1) - - name2 = random_name(self) - adding_unit2 = flocker_docker_client.add(name2, ANY_IMAGE) - self.addCleanup(flocker_docker_client.remove, name2) - - docker_client = flocker_docker_client._client - docker_client_containers = docker_client.containers - - def simulate_missing_containers(*args, **kwargs): - """ - Remove a container before returning the original list. - """ - containers = docker_client_containers(*args, **kwargs) - container_name1 = flocker_docker_client._to_container_name(name1) - docker_client.remove_container( - container=container_name1, force=True) - return containers - - adding_units = gatherResults([adding_unit1, adding_unit2]) - - def get_list(ignored): - patcher.addPatch( - docker_client, - 'containers', - simulate_missing_containers - ) - patcher.patch() - return flocker_docker_client.list() - - listing_units = adding_units.addCallback(get_list) - - def check_list(units): - patcher.restore() - self.assertEqual( - [name2], sorted([unit.name for unit in units]) - ) - running_assertions = listing_units.addCallback(check_list) - - return running_assertions - - def error_passthrough_test(self, method_name): - """ - If the given method name on the underyling ``Docker`` client has a - non-404 error, that gets passed through to ``Docker.list()``. - - :param str method_name: Method of a docker ``Client``. - :return: ``Deferred`` firing on test success. - """ - name = random_name(self) - client = DockerClient() - self.addCleanup(client.remove, name) - d = client.add(name, u"busybox:latest") - - response = make_response(500, "Simulated error") - - def error(name): - raise APIError("", response) - - def added(_): - # Monekypatch cause triggering non-404 errors from - # inspect_container is hard. - self.patch(client._client, method_name, error) - return client.list() - d.addCallback(added) - return self.assertFailure(d, APIError) - - def test_list_error_inspecting_container(self): - """ - If an error occurs inspecting a container it is passed through. - """ - return self.error_passthrough_test("inspect_container") - - def test_list_error_inspecting_image(self): - """ - If an error occurs inspecting an image it is passed through. - """ - return self.error_passthrough_test("inspect_image") - - -class NamespacedDockerClientTests(GenericDockerClientTests): - """ - Functional tests for ``NamespacedDockerClient``. - """ - @if_docker_configured - def setUp(self): - super(NamespacedDockerClientTests, self).setUp() - self.namespace = namespace_for_test(self) - self.namespacing_prefix = BASE_NAMESPACE + self.namespace + u"--" - - def make_client(self): - return NamespacedDockerClient(self.namespace) - - def create_container(self, client, name, image): - """ - Create (but don't start) a container via the supplied client. - - :param DockerClient client: The Docker API client. - :param unicode name: The container name. - :param unicode image: The image name. - """ - container_name = client._client._to_container_name(name) - client._client._client.create_container( - name=container_name, image=image) - - def test_isolated_namespaces(self): - """ - Containers in one namespace are not visible in another namespace. - """ - client = NamespacedDockerClient(namespace=namespace_for_test(self)) - client2 = NamespacedDockerClient(namespace=namespace_for_test(self)) - name = random_name(self) - - self.addCleanup(client.remove, name) - d = client.add(name, u"busybox:latest") - d.addCallback(lambda _: client2.list()) - d.addCallback(self.assertEqual, set()) - return d diff --git a/flocker/node/functional/test_script.py b/flocker/node/functional/test_script.py index 23a446e3d7..3ce83cab03 100644 --- a/flocker/node/functional/test_script.py +++ b/flocker/node/functional/test_script.py @@ -13,13 +13,6 @@ class FlockerDatasetAgentTests(make_script_tests(b"flocker-dataset-agent")): """ -class FlockerContainerAgentTests( - make_script_tests(b"flocker-container-agent")): - """ - Tests for ``flocker-container-agent``. - """ - - class FlockerDiagnosticsTests( make_script_tests(b"flocker-diagnostics")): """ diff --git a/flocker/node/script.py b/flocker/node/script.py index 4fef469bff..3f60309f8e 100644 --- a/flocker/node/script.py +++ b/flocker/node/script.py @@ -34,7 +34,7 @@ ICommandLineScript, flocker_standard_options, FlockerScriptRunner, main_for_service, enable_profiling, disable_profiling) -from . import P2PManifestationDeployer, ApplicationNodeDeployer +from . import P2PManifestationDeployer from ._loop import AgentLoopService from .exceptions import StorageInitializationError from .diagnostics import ( @@ -55,7 +55,6 @@ __all__ = [ "flocker_dataset_agent_main", - "flocker_container_agent_main", "flocker_diagnostics_main", ] @@ -85,31 +84,6 @@ def flocker_dataset_agent_main(): ).main() -def flocker_container_agent_main(): - """ - Implementation of the ``flocker-container-agent`` command line script. - - This starts a Docker-based container convergence agent. - """ - def deployer_factory(cluster_uuid, **kwargs): - return ApplicationNodeDeployer(**kwargs) - service_factory = AgentServiceFactory( - deployer_factory=deployer_factory - ).get_service - agent_script = AgentScript(service_factory=service_factory) - - # Use CPU time instead of wallclock time. - pr = cProfile.Profile(clock) - - signal.signal(signal.SIGUSR1, partial(enable_profiling, pr)) - signal.signal(signal.SIGUSR2, partial(disable_profiling, pr, 'container')) - - return FlockerScriptRunner( - script=agent_script, - options=ContainerAgentOptions() - ).main() - - LOG_GET_EXTERNAL_IP = ActionType(u"flocker:node:script:get_external_ip", fields(host=unicode, port=int), fields(local_ip=unicode), @@ -265,17 +239,6 @@ class DatasetAgentOptions(_AgentOptions): synopsis = _AgentOptions.synopsis.format("flocker-dataset-agent") -class ContainerAgentOptions(_AgentOptions): - """ - Command line options for ``flocker-container-agent``. - """ - longdesc = """\ - flocker-container-agent runs a container convergence agent on a node. - """ - - synopsis = _AgentOptions.synopsis.format("flocker-container-agent") - - @implementer(ICommandLineScript) class AgentScript(PClass): """ diff --git a/flocker/node/test/test_container.py b/flocker/node/test/test_container.py deleted file mode 100644 index c3257df454..0000000000 --- a/flocker/node/test/test_container.py +++ /dev/null @@ -1,1694 +0,0 @@ -# Copyright ClusterHQ Inc. See LICENSE file for details. - -""" -Tests for ``flocker.node._container``. -""" - -from uuid import UUID, uuid4 - -from pyrsistent import pset, pvector - -from bitmath import GiB - -from twisted.python.filepath import FilePath - -from .. import ( - ApplicationNodeDeployer, NoOp, NOOP_SLEEP_TIME -) -from ..testtools import ( - EMPTY, - EMPTY_STATE, empty_node_local_state, - assert_calculated_changes_for_deployer, to_node, -) -from ...testtools import TestCase -from ...control import ( - Application, DockerImage, Deployment, Node, Port, Link, - NodeState, DeploymentState, RestartNever, RestartAlways, RestartOnFailure -) - -from .. import sequentially, in_parallel - -from .._deploy import ( - NodeLocalState, -) -from .._container import ( - StartApplication, StopApplication, _link_environment -) -from ...control.testtools import InMemoryStatePersister -from ...control._model import ( - AttachedVolume, Dataset, Manifestation, PersistentState, -) -from .._docker import ( - FakeDockerClient, AlreadyExists, Unit, PortMap, Environment, - DockerClient, Volume as DockerVolume) - -from .istatechange import make_istatechange_tests - - -# This models an application without a volume. -APPLICATION_WITHOUT_VOLUME = Application( - name=u"stateless", - image=DockerImage.from_string(u"clusterhq/testing-stateless"), - volume=None, -) - -# This models an application that has a volume. -APPLICATION_WITH_VOLUME_NAME = u"psql-clusterhq" -DATASET_ID = unicode(uuid4()) -DATASET = Dataset(dataset_id=DATASET_ID) -APPLICATION_WITH_VOLUME_MOUNTPOINT = FilePath(b"/var/lib/postgresql") -APPLICATION_WITH_VOLUME_IMAGE = u"clusterhq/postgresql:9.1" -APPLICATION_WITH_VOLUME = Application( - name=APPLICATION_WITH_VOLUME_NAME, - image=DockerImage.from_string(APPLICATION_WITH_VOLUME_IMAGE), - volume=AttachedVolume( - manifestation=Manifestation(dataset=DATASET, primary=True), - mountpoint=APPLICATION_WITH_VOLUME_MOUNTPOINT, - ), - links=frozenset(), -) -MANIFESTATION = APPLICATION_WITH_VOLUME.volume.manifestation - -DATASET_WITH_SIZE = Dataset(dataset_id=DATASET_ID, - metadata=DATASET.metadata, - maximum_size=1024 * 1024 * 100) - -APPLICATION_WITH_VOLUME_SIZE = Application( - name=APPLICATION_WITH_VOLUME_NAME, - image=DockerImage.from_string(APPLICATION_WITH_VOLUME_IMAGE), - volume=AttachedVolume( - manifestation=Manifestation(dataset=DATASET_WITH_SIZE, - primary=True), - mountpoint=APPLICATION_WITH_VOLUME_MOUNTPOINT, - ), - links=frozenset(), -) - - -def assert_application_calculated_changes( - case, node_state, node_config, nonmanifest_datasets, expected_changes, - additional_node_states=frozenset(), additional_node_config=frozenset(), -): - """ - Assert that ``ApplicationNodeDeployer`` calculates certain changes in a - certain circumstance. - - :see: ``assert_calculated_changes_for_deployer``. - """ - deployer = ApplicationNodeDeployer( - hostname=node_state.hostname, - node_uuid=node_state.uuid, - docker_client=FakeDockerClient(), - ) - return assert_calculated_changes_for_deployer( - case, deployer, node_state, node_config, nonmanifest_datasets, - additional_node_states, additional_node_config, expected_changes, - NodeLocalState(node_state=node_state) - ) - - -class ApplicationNodeDeployerAttributesTests(TestCase): - """ - Tests for attributes and initialiser arguments of - `ApplicationNodeDeployer`. - """ - def test_docker_client_default(self): - """ - ``ApplicationNodeDeployer.docker_client`` is a ``DockerClient`` by - default. - """ - self.assertIsInstance( - ApplicationNodeDeployer(u"example.com", None).docker_client, - DockerClient - ) - - def test_docker_override(self): - """ - ``ApplicationNodeDeployer.docker_client`` can be overridden in the - constructor. - """ - dummy_docker_client = object() - self.assertIs( - dummy_docker_client, - ApplicationNodeDeployer( - u'example.com', - docker_client=dummy_docker_client).docker_client - ) - - -StartApplicationIStateChangeTests = make_istatechange_tests( - StartApplication, - dict( - application=APPLICATION_WITH_VOLUME, - node_state=NodeState(hostname="node1.example.com") - ), - dict( - application=APPLICATION_WITH_VOLUME.set(name=u"throwaway-app"), - node_state=NodeState(hostname="node2.example.com") - ) -) -StopApplicationIStageChangeTests = make_istatechange_tests( - StopApplication, - dict(application=APPLICATION_WITH_VOLUME), - dict(application=APPLICATION_WITH_VOLUME.set(name=u"throwaway-app")), -) - - -class StartApplicationTests(TestCase): - """ - Tests for ``StartApplication``. - """ - def test_start(self): - """ - ``StartApplication`` accepts an application object and when ``run()`` - is called returns a ``Deferred`` which fires when the docker container - has been added and started. - """ - fake_docker = FakeDockerClient() - api = ApplicationNodeDeployer(u'example.com', - docker_client=fake_docker) - docker_image = DockerImage(repository=u'clusterhq/flocker', - tag=u'release-14.0') - ports = frozenset([Port(internal_port=80, external_port=8080)]) - application = Application( - name=u'site-example.com', - image=docker_image, - ports=ports, - links=frozenset(), - ) - start_result = StartApplication( - application=application, - node_state=EMPTY_NODESTATE - ).run(api, state_persister=InMemoryStatePersister()) - exists_result = fake_docker.exists(unit_name=application.name) - - port_maps = pset( - [PortMap(internal_port=80, external_port=8080)] - ) - self.assertEqual( - (None, True, docker_image.full_name, port_maps), - (self.successResultOf(start_result), - self.successResultOf(exists_result), - fake_docker._units[application.name].container_image, - fake_docker._units[application.name].ports) - ) - - def test_already_exists(self): - """ - ``StartApplication.run`` returns a `Deferred` which errbacks with - an ``AlreadyExists`` error if there is already a unit with the supplied - application name. - """ - api = ApplicationNodeDeployer(u'example.com', - docker_client=FakeDockerClient()) - application = Application( - name=u'site-example.com', - image=DockerImage(repository=u'clusterhq/flocker', - tag=u'release-14.0'), - links=frozenset(), - ) - - result1 = StartApplication( - application=application, - node_state=EMPTY_NODESTATE - ).run(api, state_persister=InMemoryStatePersister()) - self.successResultOf(result1) - - result2 = StartApplication( - application=application, - node_state=EMPTY_NODESTATE - ).run(api, state_persister=InMemoryStatePersister()) - self.failureResultOf(result2, AlreadyExists) - - def test_environment_supplied_to_docker(self): - """ - ``StartApplication.run()`` passes the environment dictionary of the - application to ``DockerClient.add`` as an ``Environment`` instance. - """ - fake_docker = FakeDockerClient() - deployer = ApplicationNodeDeployer(u'example.com', fake_docker) - - application_name = u'site-example.com' - variables = frozenset({u'foo': u"bar", u"baz": u"qux"}.iteritems()) - application = Application( - name=application_name, - image=DockerImage(repository=u'clusterhq/postgresql', - tag=u'9.3.5'), - environment=variables.copy(), - links=frozenset(), - ports=(), - ) - - StartApplication( - application=application, - node_state=EMPTY_NODESTATE, - ).run(deployer, state_persister=InMemoryStatePersister()) - - expected_environment = Environment(variables=variables.copy()) - - self.assertEqual( - expected_environment, - fake_docker._units[application_name].environment - ) - - def test_environment_not_supplied(self): - """ - ``StartApplication.run()`` only passes an ``Environment`` instance - if the application defines an environment. - """ - fake_docker = FakeDockerClient() - deployer = ApplicationNodeDeployer(u'example.com', fake_docker) - - application_name = u'site-example.com' - application = Application( - name=application_name, - image=DockerImage(repository=u'clusterhq/postgresql', - tag=u'9.3.5'), - environment=None, - links=frozenset(), - ) - - StartApplication( - application=application, - node_state=EMPTY_NODESTATE, - ).run(deployer, state_persister=InMemoryStatePersister()) - - self.assertEqual( - None, - fake_docker._units[application_name].environment - ) - - def test_links(self): - """ - ``StartApplication.run()`` passes environment variables to connect to - the remote application to ``DockerClient.add``. - """ - fake_docker = FakeDockerClient() - deployer = ApplicationNodeDeployer(u'example.com', fake_docker) - - application_name = u'site-example.com' - application = Application( - name=application_name, - image=DockerImage(repository=u'clusterhq/postgresql', - tag=u'9.3.5'), - links=frozenset([Link(alias="alias", local_port=80, - remote_port=8080)])) - - StartApplication( - application=application, - node_state=EMPTY_NODESTATE, - ).run(deployer, state_persister=InMemoryStatePersister()) - - variables = frozenset({ - 'ALIAS_PORT_80_TCP': 'tcp://example.com:8080', - 'ALIAS_PORT_80_TCP_ADDR': 'example.com', - 'ALIAS_PORT_80_TCP_PORT': '8080', - 'ALIAS_PORT_80_TCP_PROTO': 'tcp', - }.iteritems()) - expected_environment = Environment(variables=variables.copy()) - - self.assertEqual( - expected_environment, - fake_docker._units[application_name].environment - ) - - def test_volumes(self): - """ - ``StartApplication.run()`` passes the appropriate volume arguments to - ``DockerClient.add`` based on the application's volume. - """ - DATASET_ID = unicode(uuid4()) - fake_docker = FakeDockerClient() - deployer = ApplicationNodeDeployer(u'example.com', fake_docker) - node_path = FilePath(b"/flocker/" + DATASET_ID.encode("ascii")) - - mountpoint = FilePath(b"/mymount") - application_name = u'site-example.com' - application = Application( - name=application_name, - image=DockerImage(repository=u'clusterhq/postgresql', - tag=u'9.3.5'), - links=frozenset(), - volume=AttachedVolume( - manifestation=Manifestation( - dataset=Dataset(dataset_id=DATASET_ID), - primary=True), - mountpoint=mountpoint)) - - StartApplication( - application=application, - node_state=EMPTY_NODESTATE.set( - "paths", {DATASET_ID: node_path}), - ).run(deployer, state_persister=InMemoryStatePersister()) - - self.assertEqual( - pset([DockerVolume(node_path=node_path, - container_path=mountpoint)]), - fake_docker._units[application_name].volumes - ) - - def test_memory_limit(self): - """ - ``StartApplication.run()`` passes an ``Application``'s mem_limit to - ``DockerClient.add`` which is used when creating a Unit. - """ - EXPECTED_MEMORY_LIMIT = 100000000 - fake_docker = FakeDockerClient() - deployer = ApplicationNodeDeployer(u'example.com', fake_docker) - - application_name = u'site-example.com' - application = Application( - name=application_name, - image=DockerImage(repository=u'clusterhq/postgresql', - tag=u'9.3.5'), - environment=None, - links=frozenset(), - memory_limit=EXPECTED_MEMORY_LIMIT - ) - - StartApplication( - application=application, - node_state=EMPTY_NODESTATE, - ).run(deployer, state_persister=InMemoryStatePersister()) - - self.assertEqual( - EXPECTED_MEMORY_LIMIT, - fake_docker._units[application_name].mem_limit - ) - - def test_cpu_shares(self): - """ - ``StartApplication.run()`` passes an ``Application``'s cpu_shares to - ``DockerClient.add`` which is used when creating a Unit. - """ - EXPECTED_CPU_SHARES = 512 - fake_docker = FakeDockerClient() - deployer = ApplicationNodeDeployer(u'example.com', fake_docker) - - application_name = u'site-example.com' - application = Application( - name=application_name, - image=DockerImage(repository=u'clusterhq/postgresql', - tag=u'9.3.5'), - environment=None, - links=frozenset(), - cpu_shares=EXPECTED_CPU_SHARES - ) - - StartApplication( - application=application, - node_state=EMPTY_NODESTATE, - ).run(deployer, state_persister=InMemoryStatePersister()) - - self.assertEqual( - EXPECTED_CPU_SHARES, - fake_docker._units[application_name].cpu_shares - ) - - def test_restart_policy(self): - """ - ``StartApplication.run()`` passes ``RestartNever`` to - ``DockerClient.add`` which is used when creating a Unit. - - It doesn't pass the ``Application``\ 's ``restart_policy`` because - ``RestartNever`` is the only implemented policy. See FLOC-2449. - """ - policy = RestartAlways() - fake_docker = FakeDockerClient() - deployer = ApplicationNodeDeployer(u'example.com', fake_docker) - - application_name = u'site-example.com' - application = Application( - name=application_name, - image=DockerImage(repository=u'clusterhq/postgresql', - tag=u'9.3.5'), - restart_policy=policy, - ) - - StartApplication( - application=application, - node_state=EMPTY_NODESTATE, - ).run(deployer, state_persister=InMemoryStatePersister()) - - [unit] = self.successResultOf(fake_docker.list()) - self.assertEqual( - unit.restart_policy, - RestartNever()) - - def test_command_line(self): - """ - ``StartApplication.run()`` passes an ``Application``'s - ``command_line`` to ``DockerClient.add``. - """ - command_line = [u"hello", u"there"] - fake_docker = FakeDockerClient() - deployer = ApplicationNodeDeployer(u'example.com', fake_docker) - - application_name = u'site-example.com' - application = Application( - name=application_name, - image=DockerImage.from_string(u"postgresql"), - command_line=command_line) - - StartApplication( - application=application, - node_state=EMPTY_NODESTATE, - ).run(deployer, state_persister=InMemoryStatePersister()) - - self.assertEqual( - fake_docker._units[application_name].command_line, - pvector(command_line), - ) - - -class LinkEnviromentTests(TestCase): - """ - Tests for ``_link_environment``. - """ - - def test_link_environment(self): - """ - ``_link_environment(link)`` returns a dictonary - with keys used by docker to represent links. Specifically - ``_PORT__`` and the broken out variants - ``_ADDR``, ``_PORT`` and ``_PROTO``. - """ - environment = _link_environment( - protocol="tcp", - alias="somealias", - local_port=80, - hostname=u"the-host", - remote_port=8080) - self.assertEqual( - environment, - { - u'SOMEALIAS_PORT_80_TCP': u'tcp://the-host:8080', - u'SOMEALIAS_PORT_80_TCP_PROTO': u'tcp', - u'SOMEALIAS_PORT_80_TCP_ADDR': u'the-host', - u'SOMEALIAS_PORT_80_TCP_PORT': u'8080', - }) - - -class StopApplicationTests(TestCase): - """ - Tests for ``StopApplication``. - """ - def test_stop(self): - """ - ``StopApplication`` accepts an application object and when ``run()`` - is called returns a ``Deferred`` which fires when the container - has been removed. - """ - fake_docker = FakeDockerClient() - api = ApplicationNodeDeployer(u'example.com', - docker_client=fake_docker) - application = Application( - name=u'site-example.com', - image=DockerImage(repository=u'clusterhq/flocker', - tag=u'release-14.0'), - links=frozenset(), - ) - - StartApplication( - application=application, - node_state=EMPTY_NODESTATE - ).run(api, state_persister=InMemoryStatePersister()) - existed = fake_docker.exists(application.name) - stop_result = StopApplication( - application=application, - ).run(api, state_persister=InMemoryStatePersister()) - exists_result = fake_docker.exists(unit_name=application.name) - - self.assertEqual( - (None, True, False), - (self.successResultOf(stop_result), - self.successResultOf(existed), - self.successResultOf(exists_result)) - ) - - def test_does_not_exist(self): - """ - ``StopApplication.run()`` does not errback if the application does - not exist. - """ - api = ApplicationNodeDeployer(u'example.com', - docker_client=FakeDockerClient()) - application = Application( - name=u'site-example.com', - image=DockerImage(repository=u'clusterhq/flocker', - tag=u'release-14.0'), - links=frozenset(), - ) - result = StopApplication( - application=application, - ).run(api, state_persister=InMemoryStatePersister()) - result = self.successResultOf(result) - - self.assertIs(None, result) -APP_NAME = u"site-example.com" -UNIT_FOR_APP = Unit(name=APP_NAME, - container_name=APP_NAME, - container_image=u"flocker/wordpress:latest", - activation_state=u'active') -APP = Application( - name=APP_NAME, - image=DockerImage.from_string(UNIT_FOR_APP.container_image) -) -APP_NAME2 = u"site-example.net" -UNIT_FOR_APP2 = Unit(name=APP_NAME2, - container_name=APP_NAME2, - container_image=u"flocker/wordpress:latest", - activation_state=u'active') -APP2 = Application( - name=APP_NAME2, - image=DockerImage.from_string(UNIT_FOR_APP2.container_image) -) -# https://clusterhq.atlassian.net/browse/FLOC-1926 -EMPTY_NODESTATE = NodeState(hostname=u"example.com", uuid=uuid4(), - manifestations={}, devices={}, paths={}, - applications=[]) - - -class ApplicationNodeDeployerDiscoverNodeConfigurationTests( - TestCase): - """ - Tests for ``ApplicationNodeDeployer.discover_local_state``. - """ - def setUp(self): - super( - ApplicationNodeDeployerDiscoverNodeConfigurationTests, self - ).setUp() - self.hostname = u"example.com" - self.node_uuid = uuid4() - # https://clusterhq.atlassian.net/browse/FLOC-1926 - self.EMPTY_NODESTATE = NodeState( - hostname=self.hostname, - uuid=self.node_uuid, - manifestations={}, devices={}, paths={}, - applications=[]) - - def _verify_discover_state_applications( - self, units, expected_applications, - current_state=None, start_applications=False): - """ - Given Docker units, verifies that the correct applications are - discovered on the node by ``discover_state``. - - :param units: ``units`` to pass into the constructor of the - ``FakeDockerClient``. - - :param expected_applications: A ``PSet`` of ``Application`` instances - expected to be in the ``NodeState`` returned by ``discover_state`` - given the ``units`` in the ``FakeDockerClient``. - - :param NodeState current_state: The local_state to pass into the call - of ``discover_state``. - - :param bool start_applications: Whether to run ``StartApplication`` on - each of the applications before running ``discover_state``. - """ - if current_state is None: - current_state = self.EMPTY_NODESTATE - fake_docker = FakeDockerClient(units=units) - api = ApplicationNodeDeployer( - self.hostname, - node_uuid=self.node_uuid, - docker_client=fake_docker, - ) - if start_applications: - for app in expected_applications: - StartApplication( - node_state=NodeState(uuid=api.node_uuid, - hostname=api.hostname), - application=app - ).run(api, state_persister=InMemoryStatePersister()) - cluster_state = DeploymentState(nodes={current_state}) - d = api.discover_state(cluster_state, - persistent_state=PersistentState()) - - self.assertEqual(NodeState(uuid=api.node_uuid, hostname=api.hostname, - applications=expected_applications), - self.successResultOf(d).node_state) - - def test_discover_none(self): - """ - ``ApplicationNodeDeployer.discover_state`` returns an empty - ``NodeState`` if there are no Docker containers on the host. - """ - self._verify_discover_state_applications({}, []) - - def test_discover_one(self): - """ - ``ApplicationNodeDeployer.discover_state`` returns ``NodeState`` - with a a list of running ``Application``\ s; one for each active - container. - """ - self._verify_discover_state_applications({APP_NAME: UNIT_FOR_APP}, - [APP]) - - def test_discover_multiple(self): - """ - ``ApplicationNodeDeployer.discover_state`` returns a - ``NodeState`` with a running ``Application`` for every active - container on the host. - """ - units = {APP_NAME: UNIT_FOR_APP, APP_NAME2: UNIT_FOR_APP2} - applications = [APP, APP2] - self._verify_discover_state_applications(units, applications) - - def test_discover_application_with_cpushares(self): - """ - An ``Application`` with a cpu_shares value is discovered from a - ``Unit`` with a cpu_shares value. - """ - unit1 = UNIT_FOR_APP.set("cpu_shares", 512) - units = {unit1.name: unit1} - applications = [APP.set("cpu_shares", 512)] - self._verify_discover_state_applications(units, applications) - - def test_discover_application_with_memory_limit(self): - """ - An ``Application`` with a memory_limit value is discovered from a - ``Unit`` with a mem_limit value. - """ - memory_limit = 104857600 - unit1 = UNIT_FOR_APP.set("mem_limit", memory_limit) - units = {unit1.name: unit1} - applications = [APP.set("memory_limit", memory_limit)] - self._verify_discover_state_applications(units, applications) - - def test_discover_application_with_environment(self): - """ - An ``Application`` with ``Environment`` objects is discovered from a - ``Unit`` with ``Environment`` objects. - """ - environment_variables = ( - (b'CUSTOM_ENV_A', b'a value'), - (b'CUSTOM_ENV_B', b'something else'), - ) - environment = Environment(variables=environment_variables) - unit1 = UNIT_FOR_APP.set("environment", environment) - units = {unit1.name: unit1} - applications = [APP.set("environment", dict(environment_variables))] - self._verify_discover_state_applications(units, applications) - - def test_discover_application_with_environment_and_links(self): - """ - An ``Application`` with ``Environment`` and ``Link`` objects is - discovered from a ``Unit`` with both custom environment variables and - environment variables representing container links. The environment - variables taking the format _PORT__TCP are separated in - to ``Link`` representations in the ``Application``. - """ - environment_variables = ( - (b'CUSTOM_ENV_A', b'a value'), - (b'CUSTOM_ENV_B', b'something else'), - ) - link_environment_variables = ( - (b'APACHE_PORT_80_TCP', b'tcp://example.com:8080'), - (b'APACHE_PORT_80_TCP_PROTO', b'tcp'), - (b'APACHE_PORT_80_TCP_ADDR', b'example.com'), - (b'APACHE_PORT_80_TCP_PORT', b'8080'), - ) - unit_environment = environment_variables + link_environment_variables - environment = Environment(variables=frozenset(unit_environment)) - unit1 = UNIT_FOR_APP.set("environment", environment) - units = {unit1.name: unit1} - links = [ - Link(local_port=80, remote_port=8080, alias=u"APACHE") - ] - applications = [APP.set("links", links).set( - "environment", dict(environment_variables))] - self._verify_discover_state_applications(units, applications) - - def test_discover_application_with_links(self): - """ - An ``Application`` with ``Link`` objects is discovered from a ``Unit`` - with environment variables that correspond to an exposed link. - """ - applications = [APP.set("links", [ - Link(local_port=80, remote_port=8080, alias=u'APACHE') - ])] - self._verify_discover_state_applications( - {}, applications, start_applications=True) - - def test_discover_application_with_ports(self): - """ - An ``Application`` with ``Port`` objects is discovered from a ``Unit`` - with exposed ``Portmap`` objects. - """ - ports = [PortMap(internal_port=80, external_port=8080)] - unit1 = UNIT_FOR_APP.set("ports", ports) - units = {unit1.name: unit1} - applications = [APP.set("ports", - [Port(internal_port=80, external_port=8080)])] - - self._verify_discover_state_applications(units, applications) - - def test_discover_attached_volume(self): - """ - Datasets that are mounted at a path that matches the container's - volume are added to ``Application`` with same name as an - ``AttachedVolume``. - """ - DATASET_ID = unicode(uuid4()) - DATASET_ID2 = unicode(uuid4()) - - path1 = FilePath(b"/flocker").child(DATASET_ID.encode("ascii")) - path2 = FilePath(b"/flocker").child(DATASET_ID2.encode("ascii")) - manifestations = {dataset_id: - Manifestation( - dataset=Dataset(dataset_id=dataset_id), - primary=True, - ) - for dataset_id in (DATASET_ID, DATASET_ID2)} - current_known_state = NodeState(uuid=self.node_uuid, - hostname=u'example.com', - manifestations=manifestations, - devices={}, - paths={DATASET_ID: path1, - DATASET_ID2: path2}) - - unit1 = UNIT_FOR_APP.set("volumes", [ - DockerVolume( - node_path=path1, - container_path=FilePath(b'/var/lib/data') - )] - ) - - unit2 = UNIT_FOR_APP2.set("volumes", [ - DockerVolume( - node_path=path2, - container_path=FilePath(b'/var/lib/data') - )] - ) - units = {unit1.name: unit1, unit2.name: unit2} - applications = [app.set("volume", AttachedVolume( - manifestation=manifestations[respective_id], - mountpoint=FilePath(b'/var/lib/data') - )) for (app, respective_id) in [(APP, DATASET_ID), - (APP2, DATASET_ID2)]] - self._verify_discover_state_applications( - units, applications, current_state=current_known_state) - - def test_ignore_unknown_volumes(self): - """ - Docker volumes that cannot be matched to a dataset are ignored. - """ - unit = UNIT_FOR_APP.set("volumes", [ - DockerVolume( - node_path=FilePath(b"/some/random/path"), - container_path=FilePath(b'/var/lib/data') - )], - ) - units = {unit.name: unit} - applications = [APP] - self._verify_discover_state_applications(units, applications) - - def test_not_running_units(self): - """ - Units that are not active are considered to be not running by - ``discover_state()``. - """ - unit1 = UNIT_FOR_APP.set("activation_state", u"inactive") - unit2 = UNIT_FOR_APP2.set("activation_state", u'madeup') - units = {unit1.name: unit1, unit2.name: unit2} - applications = [APP.set("running", False), APP2.set("running", False)] - self._verify_discover_state_applications(units, applications) - - def test_discover_application_restart_policy(self): - """ - An ``Application`` with the appropriate ``IRestartPolicy`` is - discovered from the corresponding restart policy of the ``Unit``. - """ - policy = RestartAlways() - unit1 = UNIT_FOR_APP.set("restart_policy", policy) - units = {unit1.name: unit1} - applications = [APP.set("restart_policy", policy)] - self._verify_discover_state_applications(units, applications) - - def test_unknown_manifestations(self): - """ - If the given ``NodeState`` indicates ignorance of manifestations, the - ``ApplicationNodeDeployer`` doesn't bother doing any discovery and - just indicates ignorance of applications. - """ - units = {APP_NAME: UNIT_FOR_APP} - applications = None - # Apparently we know nothing about manifestations one way or the - # other: - current_state = NodeState( - uuid=self.node_uuid, - hostname=self.hostname, - manifestations=None, paths=None) - self._verify_discover_state_applications( - units, applications, current_state=current_state) - - -def restart(old, new, node_state): - """ - Construct the exact ``IStateChange`` that ``ApplicationNodeDeployer`` - returns when it wants to restart a particular application on a particular - node. - """ - return sequentially(changes=[ - in_parallel(changes=[ - sequentially(changes=[ - StopApplication(application=old), - StartApplication( - application=new, node_state=node_state, - ), - ]), - ]), - ]) - - -def no_change(): - """ - Construct the exact ``IStateChange`` that ``ApplicationNodeDeployer`` - returns when it doesn't want to make any changes. - """ - return NoOp(sleep=NOOP_SLEEP_TIME) - - -class ApplicationNodeDeployerCalculateVolumeChangesTests(TestCase): - """ - Tests for ``ApplicationNodeDeployer.calculate_changes`` specifically as it - relates to volume state and configuration. - """ - def test_no_volume_no_changes(self): - """ - If an ``Application`` with no volume is configured and exists, no - changes are calculated. - """ - local_state = EMPTY_NODESTATE.set( - applications=[APPLICATION_WITHOUT_VOLUME], - ) - local_config = to_node(local_state) - assert_application_calculated_changes( - self, local_state, local_config, set(), no_change(), - ) - - def test_has_volume_no_changes(self): - """ - If an ``Application`` with a volume (with a maximum size) is configured - and exists with that configuration, no changes are calculated. - """ - application = APPLICATION_WITH_VOLUME_SIZE - manifestation = application.volume.manifestation - local_state = EMPTY_NODESTATE.set( - devices={UUID(manifestation.dataset_id): FilePath(b"/dev/foo")}, - paths={manifestation.dataset_id: FilePath(b"/foo/bar")}, - manifestations={manifestation.dataset_id: manifestation}, - applications=[application], - ) - local_config = to_node(local_state) - assert_application_calculated_changes( - self, local_state, local_config, set(), no_change(), - ) - - def test_has_volume_cant_change_yet(self): - """ - If an ``Application`` is configured with a volume but exists without it - and the dataset for the volume isn't present on the node, no changes - are calculated. - """ - application = APPLICATION_WITH_VOLUME_SIZE - manifestation = application.volume.manifestation - local_state = EMPTY_NODESTATE.set( - applications=[application.set("volume", None)], - ) - local_config = to_node(local_state).set( - manifestations={manifestation.dataset_id: manifestation}, - applications=[application], - ) - assert_application_calculated_changes( - self, local_state, local_config, set(), no_change(), - ) - - def test_has_volume_needs_changes(self): - """ - If an ``Application`` is configured with a volume but exists without - the volume and the dataset for the volume is present on the node, a - change to restart that application is calculated. - """ - application = APPLICATION_WITH_VOLUME_SIZE - application_without_volume = application.set(volume=None) - manifestation = application.volume.manifestation - local_state = EMPTY_NODESTATE.set( - devices={UUID(manifestation.dataset_id): FilePath(b"/dev/foo")}, - paths={manifestation.dataset_id: FilePath(b"/foo/bar")}, - manifestations={manifestation.dataset_id: manifestation}, - applications=[application_without_volume], - ) - local_config = to_node(local_state).set( - applications=[application], - ) - assert_application_calculated_changes( - self, local_state, local_config, set(), - restart(application_without_volume, application, local_state), - ) - - def test_no_volume_needs_changes(self): - """ - If an ``Application`` is configured with no volume but exists with one, - a change to restart that application is calculated. - """ - application = APPLICATION_WITH_VOLUME_SIZE - application_without_volume = application.set(volume=None) - manifestation = application.volume.manifestation - local_state = EMPTY_NODESTATE.set( - devices={UUID(manifestation.dataset_id): FilePath(b"/dev/foo")}, - paths={manifestation.dataset_id: FilePath(b"/foo/bar")}, - manifestations={manifestation.dataset_id: manifestation}, - applications=[application], - ) - local_config = to_node(local_state).set( - applications=[application_without_volume], - ) - assert_application_calculated_changes( - self, local_state, local_config, set(), - restart(application, application_without_volume, local_state), - ) - - def _resize_no_changes(self, state_size, config_size): - application_state = APPLICATION_WITH_VOLUME.transform( - ["volume", "manifestation", "dataset", "maximum_size"], - state_size, - ) - application_config = application_state.transform( - ["volume", "manifestation", "dataset", "maximum_size"], - config_size, - ) - manifestation_state = application_state.volume.manifestation - manifestation_config = application_config.volume.manifestation - - # Both objects represent the same dataset so the id is the same on - # each. - dataset_id = manifestation_state.dataset_id - - local_state = EMPTY_NODESTATE.set( - devices={UUID(dataset_id): FilePath(b"/dev/foo")}, - paths={dataset_id: FilePath(b"/foo/bar")}, - manifestations={dataset_id: manifestation_state}, - applications=[application_state], - ) - local_config = to_node(local_state).set( - applications=[application_config], - manifestations={dataset_id: manifestation_config}, - ) - assert_application_calculated_changes( - self, local_state, local_config, set(), no_change(), - ) - - def test_resized_volume_no_changes(self): - """ - If an ``Application`` is configured with a volume and exists with that - volume but the volume is a different size than configured, no changes - are calculated because ``ApplicationNodeDeployer`` doesn't trust the - dataset agent to be able to resize volumes. - """ - self._resize_no_changes(GiB(1).to_Byte().value, GiB(2).to_Byte().value) - - def test_maximum_volume_size_applied_no_changes(self): - """ - If an ``Application``\ 's volume exists without a maximum size and the - configuration for that volume indicates a size, no changes are - calculated because ``ApplicationNodeDeployer`` doesn't trust the - dataset agent to be able to resize volumes. - """ - self._resize_no_changes(None, GiB(1).to_Byte().value) - - def test_maximum_volume_size_removed_no_changes(self): - """ - If an ``Application``\ 's volume exists with a maximum size and the - configuration for that volume indicates no maximum size, no changes are - calculated because ``ApplicationNodeDeployer`` doesn't trust the - dataset agent to be able to resize volumes. - """ - self._resize_no_changes(GiB(1).to_Byte().value, None) - - def test_moved_volume_needs_changes(self): - """ - If an ``Application`` is configured with a volume on a node but is no - longer configured to on that node, a change to stop that application is - calculated. - """ - application = APPLICATION_WITH_VOLUME_SIZE - manifestation = application.volume.manifestation - local_state = EMPTY_NODESTATE.set( - devices={UUID(manifestation.dataset_id): FilePath(b"/dev/foo")}, - paths={manifestation.dataset_id: FilePath(b"/foo/bar")}, - manifestations={manifestation.dataset_id: manifestation}, - applications=[application], - ) - local_config = to_node(EMPTY_NODESTATE) - assert_application_calculated_changes( - self, local_state, local_config, set(), - sequentially(changes=[ - in_parallel(changes=[ - StopApplication(application=application), - ]), - ]), - ) - - def test_different_volume_needs_change(self): - """ - If an ``Application`` is configured with a volume but exists with a - different volume, a change to restart that application is calculated. - """ - application = APPLICATION_WITH_VOLUME_SIZE - manifestation = application.volume.manifestation - another_manifestation = manifestation.transform( - ["dataset", "dataset_id"], uuid4(), - ) - changed_application = application.transform( - ["volume", "manifestation"], another_manifestation, - ) - local_state = EMPTY_NODESTATE.set( - devices={ - UUID(manifestation.dataset_id): FilePath(b"/dev/foo"), - UUID(another_manifestation.dataset_id): FilePath(b"/dev/bar"), - }, - paths={ - manifestation.dataset_id: FilePath(b"/foo/bar"), - another_manifestation.dataset_id: FilePath(b"/bar/baz"), - }, - manifestations={ - manifestation.dataset_id: manifestation, - another_manifestation.dataset_id: another_manifestation, - }, - applications=[application], - ) - local_config = to_node(local_state).set( - applications=[ - changed_application, - ], - ) - assert_application_calculated_changes( - self, local_state, local_config, set(), - restart(application, changed_application, local_state), - ) - - -class ApplicationNodeDeployerCalculateChangesTests(TestCase): - """ - Tests for ``ApplicationNodeDeployer.calculate_changes``. - """ - def test_no_state_changes(self): - """ - ``ApplicationNodeDeployer.calculate_changes`` returns a - ``Deferred`` which fires with a :class:`IStateChange` instance - indicating that no changes are necessary when there are no - applications running or desired, and no proxies exist or are - desired. - """ - assert_application_calculated_changes( - self, EMPTY_NODESTATE, to_node(EMPTY_NODESTATE), set(), - no_change(), - ) - - def test_application_needs_stopping(self): - """ - ``ApplicationNodeDeployer.calculate_changes`` specifies that an - application must be stopped when it is running but not desired. - """ - api = ApplicationNodeDeployer(u'node.example.com', - docker_client=FakeDockerClient()) - - to_stop = StopApplication(application=Application( - name=u"site-example.com", image=DockerImage.from_string( - u"flocker/wordpress"))) - - node_state = NodeState( - hostname=api.hostname, applications={to_stop.application}, - ) - result = api.calculate_changes( - desired_configuration=EMPTY, - current_cluster_state=DeploymentState(nodes=[node_state]), - local_state=NodeLocalState(node_state=node_state)) - expected = sequentially(changes=[in_parallel(changes=[to_stop])]) - self.assertEqual(expected, result) - - def test_application_needs_starting(self): - """ - ``ApplicationNodeDeployer.calculate_changes`` specifies that an - application must be started when it is desired on the given node but - not running. - """ - api = ApplicationNodeDeployer(u'example.com', - docker_client=FakeDockerClient(), - node_uuid=uuid4()) - application = Application( - name=u'mysql-hybridcluster', - image=DockerImage(repository=u'clusterhq/flocker', - tag=u'release-14.0') - ) - - nodes = frozenset([ - Node( - uuid=api.node_uuid, - applications=frozenset([application]) - ) - ]) - - node_state = NodeState( - hostname=api.hostname, uuid=api.node_uuid, - applications=[]) - - desired = Deployment(nodes=nodes) - result = api.calculate_changes( - desired_configuration=desired, - current_cluster_state=DeploymentState(nodes=[node_state]), - local_state=NodeLocalState(node_state=node_state)) - expected = sequentially(changes=[in_parallel( - changes=[StartApplication(application=application, - node_state=node_state)])]) - self.assertEqual(expected, result) - - def test_only_this_node(self): - """ - ``ApplicationNodeDeployer.calculate_changes`` does not specify - that an application must be started if the desired changes apply - to a different node. - """ - api = ApplicationNodeDeployer(u'node.example.com', - docker_client=FakeDockerClient()) - application = Application( - name=u'mysql-hybridcluster', - image=DockerImage(repository=u'clusterhq/flocker', - tag=u'release-14.0') - ) - - nodes = frozenset([ - Node( - hostname=u'node1.example.net', - applications=frozenset([application]) - ) - ]) - - desired = Deployment(nodes=nodes) - result = api.calculate_changes( - desired_configuration=desired, - current_cluster_state=EMPTY_STATE, - local_state=empty_node_local_state(api)) - expected = sequentially(changes=[]) - self.assertEqual(expected, result) - - def test_no_change_needed(self): - """ - ``ApplicationNodeDeployer.calculate_changes`` does not specify - that an application must be started or stopped if the desired - configuration is the same as the current configuration. - """ - api = ApplicationNodeDeployer(u'node.example.com', - docker_client=FakeDockerClient()) - - application = Application( - name=u'mysql-hybridcluster', - image=DockerImage(repository=u'clusterhq/mysql', - tag=u'latest'), - ports=frozenset(), - ) - - nodes = frozenset([ - Node( - hostname=u'node.example.com', - applications=frozenset([application]) - ) - ]) - - desired = Deployment(nodes=nodes) - node_state = NodeState(hostname=api.hostname, - applications=[application]) - result = api.calculate_changes( - desired_configuration=desired, - current_cluster_state=DeploymentState(nodes=[node_state]), - local_state=NodeLocalState(node_state=node_state)) - expected = no_change() - self.assertEqual(expected, result) - - def test_node_not_described(self): - """ - ``ApplicationNodeDeployer.calculate_changes`` specifies that - all applications on a node must be stopped if the desired - configuration does not include that node. - """ - api = ApplicationNodeDeployer(u'node.example.com', - docker_client=FakeDockerClient()) - application = Application( - name=u"my-db", - image=DockerImage.from_string("postgres") - ) - desired = Deployment(nodes=frozenset()) - node_state = NodeState(hostname=api.hostname, - applications=[application]) - result = api.calculate_changes( - desired_configuration=desired, - current_cluster_state=DeploymentState(nodes=[node_state]), - local_state=NodeLocalState(node_state=node_state)) - to_stop = StopApplication( - application=application, - ) - expected = sequentially(changes=[in_parallel(changes=[to_stop])]) - self.assertEqual(expected, result) - - def test_local_not_running_applications_restarted(self): - """ - Applications that are not running but are supposed to be on the local - node are restarted by Flocker (we cannot rely on Docker restart - policies to do so because FLOC-3148). - """ - api = ApplicationNodeDeployer(u'n.example.com', - docker_client=FakeDockerClient(), - node_uuid=uuid4()) - application_desired = Application( - name=u'mysql-hybridcluster', - image=DockerImage(repository=u'clusterhq/flocker', - tag=u'release-14.0'), - ) - application_stopped = application_desired.set("running", False) - nodes_desired = frozenset([ - Node( - uuid=api.node_uuid, - applications=frozenset([application_desired]) - ) - ]) - node_state = NodeState( - hostname=api.hostname, - uuid=api.node_uuid, - applications=[application_stopped]) - desired = Deployment(nodes=nodes_desired) - result = api.calculate_changes( - desired_configuration=desired, - current_cluster_state=DeploymentState(nodes=[node_state]), - local_state=NodeLocalState(node_state=node_state)) - - expected = sequentially(changes=[in_parallel(changes=[ - sequentially(changes=[ - StopApplication(application=application_stopped), - StartApplication( - application=application_desired, node_state=node_state), - ])])]) - self.assertEqual(expected, result) - - def test_not_local_not_running_applications_stopped(self): - """ - Applications that are not running and are not supposed to be on the - local node are added to the list of applications to stop. - """ - api = ApplicationNodeDeployer( - u'example.com', - docker_client=FakeDockerClient()) - to_stop = Application( - name=u"myapp", - image=DockerImage.from_string(u"postgres"), - running=False, - ) - node_state = NodeState(hostname=api.hostname, applications={to_stop}) - result = api.calculate_changes( - desired_configuration=EMPTY, - current_cluster_state=DeploymentState(nodes=[node_state]), - local_state=NodeLocalState(node_state=node_state)) - expected = sequentially(changes=[in_parallel(changes=[ - StopApplication(application=to_stop)])]) - self.assertEqual(expected, result) - - def test_app_with_changed_image_restarted(self): - """ - An ``Application`` running on a given node that has a different image - specified in the desired state to the image used by the application now - is added to the list of applications to restart. - """ - api = ApplicationNodeDeployer( - u'node1.example.com', - docker_client=FakeDockerClient(), - node_uuid=uuid4(), - ) - - old_postgres_app = Application( - name=u'postgres-example', - image=DockerImage.from_string(u'clusterhq/postgres:latest'), - volume=None - ) - - new_postgres_app = Application( - name=u'postgres-example', - image=DockerImage.from_string(u'docker/postgres:latest'), - volume=None - ) - - desired = Deployment(nodes=frozenset({ - Node(uuid=api.node_uuid, - applications=frozenset({new_postgres_app})), - })) - node_state = NodeState( - uuid=api.node_uuid, - hostname=api.hostname, - applications={old_postgres_app}) - result = api.calculate_changes( - desired_configuration=desired, - current_cluster_state=DeploymentState(nodes={node_state}), - local_state=NodeLocalState(node_state=node_state) - ) - - expected = sequentially(changes=[in_parallel(changes=[ - sequentially(changes=[ - StopApplication(application=old_postgres_app), - StartApplication(application=new_postgres_app, - node_state=node_state) - ]), - ])]) - - self.assertEqual(expected, result) - - def test_app_with_changed_ports_restarted(self): - """ - An ``Application`` running on a given node that has different port - exposures specified in the desired state to the ports exposed by the - application's current state is added to the list of applications to - restart. - """ - api = ApplicationNodeDeployer( - u'node1.example.com', - docker_client=FakeDockerClient(), - ) - - old_postgres_app = Application( - name=u'postgres-example', - image=DockerImage.from_string(u'clusterhq/postgres:latest'), - volume=None, - ports=frozenset([Port( - internal_port=5432, - external_port=50432 - )]) - ) - - new_postgres_app = Application( - name=u'postgres-example', - image=DockerImage.from_string(u'clusterhq/postgres:latest'), - volume=None, - ports=frozenset([Port( - internal_port=5433, - external_port=50433 - )]) - ) - - node_state = NodeState( - hostname=api.hostname, - applications={old_postgres_app}, - ) - - desired = Deployment(nodes=frozenset({ - Node(hostname=api.hostname, - applications=frozenset({new_postgres_app})), - })) - result = api.calculate_changes( - desired_configuration=desired, - current_cluster_state=DeploymentState(nodes={node_state}), - local_state=NodeLocalState(node_state=node_state), - ) - - expected = sequentially(changes=[ - in_parallel(changes=[ - sequentially(changes=[ - StopApplication(application=old_postgres_app), - StartApplication(application=new_postgres_app, - node_state=node_state) - ]), - ]), - ]) - - self.assertEqual(expected, result) - - def test_app_with_changed_links_restarted(self): - """ - An ``Application`` running on a given node that has different links - specified in the desired state to the links specified by the - application's current state is added to the list of applications to - restart. - """ - api = ApplicationNodeDeployer( - u'node1.example.com', - docker_client=FakeDockerClient(), - ) - - old_wordpress_app = Application( - name=u'wordpress-example', - image=DockerImage.from_string(u'clusterhq/wordpress:latest'), - volume=None, - links=frozenset([ - Link( - local_port=5432, remote_port=50432, alias='POSTGRES' - ) - ]) - ) - - postgres_app = Application( - name=u'postgres-example', - image=DockerImage.from_string(u'clusterhq/postgres:latest') - ) - new_wordpress_app = Application( - name=u'wordpress-example', - image=DockerImage.from_string(u'clusterhq/wordpress:latest'), - volume=None, - links=frozenset([ - Link( - local_port=5432, remote_port=51432, alias='POSTGRES' - ) - ]) - ) - - desired = Deployment(nodes=frozenset({ - Node(hostname=u'node1.example.com', - applications=frozenset({new_wordpress_app, postgres_app})), - })) - node_state = NodeState(hostname=api.hostname, - applications={postgres_app, old_wordpress_app}) - result = api.calculate_changes( - desired_configuration=desired, - current_cluster_state=DeploymentState(nodes={node_state}), - local_state=NodeLocalState(node_state=node_state), - ) - - expected = sequentially(changes=[in_parallel(changes=[ - sequentially(changes=[ - StopApplication(application=old_wordpress_app), - StartApplication(application=new_wordpress_app, - node_state=node_state) - ]), - ])]) - - self.assertEqual(expected, result) - - def test_stopped_app_with_change_restarted(self): - """ - An ``Application`` that is stopped, and then reconfigured such that it - would be restarted if it was running, will be restarted with the - new configuration. - """ - api = ApplicationNodeDeployer( - u'node1.example.com', - docker_client=FakeDockerClient(), - node_uuid=uuid4(), - ) - - old_postgres_app = Application( - name=u'postgres-example', - image=DockerImage.from_string(u'clusterhq/postgres:latest'), - running=False, - ) - - new_postgres_app = old_postgres_app.transform( - ["image"], DockerImage.from_string(u'docker/postgres:latest'), - ["running"], True) - - desired = Deployment(nodes=[ - Node(uuid=api.node_uuid, applications={new_postgres_app})]) - node_state = NodeState( - uuid=api.node_uuid, - hostname=api.hostname, - applications={old_postgres_app}) - result = api.calculate_changes( - desired_configuration=desired, - current_cluster_state=DeploymentState(nodes={node_state}), - local_state=NodeLocalState(node_state=node_state), - ) - - expected = sequentially(changes=[in_parallel(changes=[ - sequentially(changes=[ - StopApplication(application=old_postgres_app), - StartApplication(application=new_postgres_app, - node_state=node_state) - ]), - ])]) - - self.assertEqual(expected, result) - - def test_unknown_applications(self): - """ - If application state for local state is unknown, don't do anything. - """ - api = ApplicationNodeDeployer( - u'node1.example.com', - docker_client=FakeDockerClient(), - ) - - postgres_app = Application( - name=u'postgres-example', - image=DockerImage.from_string(u'docker/postgres:latest'), - ) - node = Node( - hostname=api.hostname, applications={postgres_app}) - desired = Deployment(nodes=[node]) - - node_state = NodeState(hostname=api.hostname, applications=None) - result = api.calculate_changes(desired, DeploymentState( - nodes=[node_state]), - local_state=NodeLocalState(node_state=node_state), - ) - self.assertEqual(result, sequentially(changes=[])) - - def test_missing_volume(self): - """ - If a desired but non-running application has a volume but its - manifestation does not exist on the node, the application is not - started. - - Eventually the manifestation will appear, at which point the - application can be started. - """ - api = ApplicationNodeDeployer(u'example.com', - docker_client=FakeDockerClient()) - manifestation = Manifestation( - dataset=Dataset(dataset_id=unicode(uuid4())), - primary=True, - ) - application = Application( - name=u'mysql-hybridcluster', - image=DockerImage(repository=u'clusterhq/flocker', - tag=u'release-14.0'), - volume=AttachedVolume( - manifestation=manifestation, - mountpoint=FilePath(b"/data"), - ) - ) - - desired = Deployment( - nodes=[Node(hostname=api.hostname, applications=[application], - manifestations={manifestation.dataset_id: - manifestation})]) - - result = api.calculate_changes( - desired_configuration=desired, - # No manifestations available! - current_cluster_state=EMPTY_STATE, - local_state=empty_node_local_state(api), - ) - expected = sequentially(changes=[]) - self.assertEqual(expected, result) - - def _app_restart_policy_test(self, restart_state, restart_config, - expect_restart): - """ - Verify that an application with a particular restart policy in its - state and in another (or the same) policy in its configuration is - either restarted or not. - - :param IRestartPolicy restart_state: The policy to put into the - application state. - :param IRestartPolicy restart_config: The policy to put into the - application configuration. - :param bool expect_restart: ``True`` if the given combination must - provoke an application restart. ``False`` if it must not. - - :raise: A test-failing exception if the restart expection is not met. - """ - app_state = APPLICATION_WITHOUT_VOLUME.set( - restart_policy=restart_state, - ) - node_state = NodeState( - uuid=uuid4(), hostname=u"192.0.2.10", - applications={app_state}, - ) - app_config = app_state.set( - restart_policy=restart_config, - ) - node_config = to_node(node_state.set(applications={app_config})) - if expect_restart: - expected_changes = restart(app_state, app_config, node_state) - else: - expected_changes = no_change() - assert_application_calculated_changes( - self, node_state, node_config, set(), - expected_changes, - ) - - def test_app_state_always_and_config_always_restarted(self): - """ - Restart policies interact poorly with containers with volumes. If an - application state is found with a restart policy other than "never", - even if the application configuration matches that restart policy, it - is restarted with the "never" policy. See FLOC-2449. - """ - self._app_restart_policy_test(RestartAlways(), RestartAlways(), True) - - def test_app_state_always_and_config_failure_restarted(self): - """ - See ``test_app_state_always_and_config_always_restarted`` - """ - self._app_restart_policy_test( - RestartAlways(), RestartOnFailure(maximum_retry_count=2), True, - ) - - def test_app_state_always_and_config_never_restarted(self): - """ - See ``test_app_state_always_and_config_always_restarted`` - """ - self._app_restart_policy_test(RestartAlways(), RestartNever(), True) - - def test_app_state_never_and_config_never_not_restarted(self): - """ - See ``test_app_state_always_and_config_always_restarted`` - """ - self._app_restart_policy_test(RestartNever(), RestartNever(), False) - - def test_app_state_never_and_config_always_not_restarted(self): - """ - See ``test_app_state_always_and_config_always_restarted`` - """ - self._app_restart_policy_test(RestartNever(), RestartAlways(), False) - - def test_app_state_never_and_config_failure_not_restarted(self): - """ - See ``test_app_state_always_and_config_always_restarted`` - """ - self._app_restart_policy_test( - RestartNever(), RestartOnFailure(maximum_retry_count=2), False, - ) - - def test_app_state_failure_and_config_never_restarted(self): - """ - See ``test_app_state_always_and_config_always_restarted`` - """ - self._app_restart_policy_test( - RestartOnFailure(maximum_retry_count=2), RestartNever(), True, - ) - - def test_app_state_failure_and_config_always_restarted(self): - """ - See ``test_app_state_always_and_config_always_restarted`` - """ - self._app_restart_policy_test( - RestartOnFailure(maximum_retry_count=2), RestartAlways(), True, - ) - - def test_app_state_failure_and_config_failure_restarted(self): - """ - See ``test_app_state_always_and_config_always_restarted`` - """ - self._app_restart_policy_test( - RestartOnFailure(maximum_retry_count=2), - RestartOnFailure(maximum_retry_count=2), - True, - ) diff --git a/flocker/node/test/test_docker.py b/flocker/node/test/test_docker.py deleted file mode 100644 index ee7aebe1e4..0000000000 --- a/flocker/node/test/test_docker.py +++ /dev/null @@ -1,500 +0,0 @@ -# Copyright ClusterHQ Inc. See LICENSE file for details. - -""" -Tests for :module:`flocker.node._docker`. -""" - -from zope.interface.verify import verifyObject - -from pyrsistent import pset, pvector - -from docker.errors import APIError - -from twisted.python.filepath import FilePath - -from ...testtools import ( - AsyncTestCase, TestCase, random_name, make_with_init_tests, -) -from ..testtools import add_with_port_collision_retry - -from .._docker import ( - IDockerClient, FakeDockerClient, AddressInUse, AlreadyExists, PortMap, - Unit, Environment, Volume, -) - -from ...control._model import RestartAlways, RestartNever, RestartOnFailure - -# Just some image we can use to start a container. No particularly behavior -# should be expected from this image except that it exists. -# -# Note we explicitly select the "latest" tag to avoid tripping over a Docker -# 1.8.1 / Docker hub interaction that results in pulls failing. See -# https://github.com/docker/docker/issues/15699 -ANY_IMAGE = u"openshift/busybox-http-app:latest" - - -def make_idockerclient_tests(fixture): - """ - Create a TestCase for IDockerClient. - - :param fixture: A fixture that returns a :class:`IDockerClient` - provider. - """ - class IDockerClientTests(AsyncTestCase): - """ - Tests for :class:`IDockerClientTests`. - - These are functional tests if run against a real Docker daemon. - """ - def test_interface(self): - """The tested object provides :class:`IDockerClient`.""" - client = fixture(self) - self.assertTrue(verifyObject(IDockerClient, client)) - - def test_add_and_remove(self): - """ - An added container can be removed without an error. - """ - client = fixture(self) - name = random_name(self) - d = client.add(name, u"busybox") - d.addCallback(lambda _: client.remove(name)) - return d - - def test_no_double_add(self): - """ - Adding a container with name that already exists results in error. - """ - client = fixture(self) - name = random_name(self) - self.addCleanup(client.remove, name) - d = client.add(name, u"busybox") - - def added(_): - return client.add(name, u"busybox") - d.addCallback(added) - d = self.assertFailure(d, AlreadyExists) - d.addCallback(lambda exc: self.assertEqual(exc.args[0], name)) - return d - - def test_remove_nonexistent_is_ok(self): - """ - Removing a non-existent container does not result in a error. - """ - client = fixture(self) - name = random_name(self) - return client.remove(name) - - def test_double_remove_is_ok(self): - """ - Removing a container twice in a row does not result in error. - """ - client = fixture(self) - name = random_name(self) - d = client.add(name, u"busybox") - d.addCallback(lambda _: client.remove(name)) - d.addCallback(lambda _: client.remove(name)) - return d - - def test_unknown_does_not_exist(self): - """ - A container that was never added does not exist. - """ - client = fixture(self) - name = random_name(self) - d = client.exists(name) - d.addCallback(self.assertFalse) - return d - - def test_added_exists(self): - """ - An added container exists. - """ - client = fixture(self) - name = random_name(self) - self.addCleanup(client.remove, name) - d = client.add(name, u"busybox") - - def added(_): - return client.exists(name) - d.addCallback(added) - d.addCallback(self.assertTrue) - return d - - def test_removed_does_not_exist(self): - """ - A removed container does not exist. - """ - client = fixture(self) - name = random_name(self) - d = client.add(name, ANY_IMAGE) - d.addCallback(lambda _: client.remove(name)) - d.addCallback(lambda _: client.exists(name)) - d.addCallback(self.assertFalse) - return d - - def test_zero_port_randomly_assigned(self): - """ - If an external port number is given as 0, a random available port - number is used. - """ - client = fixture(self) - name = random_name(self) - portmap = PortMap( - internal_port=1234, external_port=0, - ) - self.addCleanup(client.remove, name) - d = client.add(name, ANY_IMAGE, ports=(portmap,)) - d.addCallback(lambda ignored: client.list()) - - def check_port(units): - portmap = list(list(units)[0].ports)[0] - self.assertTrue( - 0 < portmap.external_port < 2 ** 16, - "Unexpected automatic port assignment: {}".format( - portmap.external_port - ), - ) - d.addCallback(check_port) - return d - - def test_port_collision_raises_addressinuse(self): - """ - If the container is configured with an external port number which - is already in use, ``AddressInUse`` is raised. - """ - client = fixture(self) - name = random_name(self) - portmap = PortMap( - internal_port=12345, external_port=0, - ) - self.addCleanup(client.remove, name) - d = client.add(name, ANY_IMAGE, ports=(portmap,)) - d.addCallback(lambda ignored: client.list()) - - def extract_port(units): - return list(list(units)[0].ports)[0].external_port - d.addCallback(extract_port) - - def collide(external_port): - self.external_port = external_port - portmap = PortMap( - internal_port=54321, external_port=external_port, - ) - name = random_name(self) - self.addCleanup(client.remove, name) - return client.add(name, ANY_IMAGE, ports=(portmap,)) - d.addCallback(collide) - d = self.assertFailure(d, AddressInUse) - - def failed(exception): - self.assertEqual( - exception.address, (b"0.0.0.0", self.external_port) - ) - self.assertIsInstance(exception.apierror, APIError) - d.addCallback(failed) - return d - - def test_added_is_listed(self): - """ - An added container is included in the output of ``list()``. - """ - client = fixture(self) - name = random_name(self) - image = ANY_IMAGE - - portmaps = [ - PortMap(internal_port=80, external_port=0), - PortMap(internal_port=5432, external_port=0), - ] - volumes = ( - Volume(node_path=FilePath(self.mktemp()), - container_path=FilePath(b'/var/lib/data')), - ) - environment = ( - (u'CUSTOM_ENV_A', u'a value'), - (u'CUSTOM_ENV_B', u'another value'), - ) - environment = Environment(variables=frozenset(environment)) - self.addCleanup(client.remove, name) - - d = add_with_port_collision_retry( - client, - name, - image_name=image, - ports=portmaps, - volumes=volumes, - environment=environment, - mem_limit=100000000, - cpu_shares=512, - restart_policy=RestartAlways(), - ) - - def added((app, portmaps)): - d = client.list() - d.addCallback(lambda units: (units, portmaps)) - return d - d.addCallback(added) - - def got_list((units, portmaps)): - result = units.pop() - - expected = Unit( - name=name, container_name=name, activation_state=u"active", - container_image=image, ports=frozenset(portmaps), - environment=environment, volumes=frozenset(volumes), - mem_limit=100000000, cpu_shares=512, - restart_policy=RestartAlways(), - ) - - # This test is not concerned with a returned ``Unit``'s - # ``container_name`` and unlike other properties of the - # result, does not expect ``container_name`` to be any - # particular value. Manually setting it below to a fixed - # known value simply allows us to compare an entire Unit - # object instead of individual properties and is therefore - # a convenience measure. - result = result.set("container_name", name) - self.assertEqual(result, expected) - d.addCallback(got_list) - return d - - def test_removed_is_not_listed(self): - """ - A removed container is not included in the output of ``list()``. - """ - client = fixture(self) - name = random_name(self) - - d = client.add(name, ANY_IMAGE) - d.addCallback(lambda _: client.remove(name)) - d.addCallback(lambda _: client.list()) - - def got_list(units): - self.assertNotIn(name, [unit.name for unit in units]) - d.addCallback(got_list) - return d - - def test_container_name(self): - """ - Each container also records the container name twice. - """ - # This is silly behavior. Get rid of it when fixing - # . - client = fixture(self) - name = random_name(self) - self.addCleanup(client.remove, name) - d = client.add(name, u"busybox") - d.addCallback(lambda _: client.list()) - - def got_list(units): - unit = [unit for unit in units if unit.name == name][0] - self.assertIsInstance(unit.container_name, unicode) - d.addCallback(got_list) - return d - - def test_command_line(self): - """ - Containers created with a command-line have a command-line included - when listed. - """ - client = fixture(self) - name = random_name(self) - self.addCleanup(client.remove, name) - command_line = [u"nc", u"-l", u"-p", u"1234"] - d = client.add(name, u"busybox", command_line=command_line) - d.addCallback(lambda _: client.list()) - - def got_list(units): - unit = [unit for unit in units if unit.name == name][0] - self.assertEqual(unit.command_line, pvector(command_line)) - d.addCallback(got_list) - return d - - def assert_restart_policy_round_trips(self, restart_policy): - """ - Creating a container with the given restart policy creates a - container that reports that same policy. - - :param IRestartPolicy restart_policy: The restart policy to test. - """ - client = fixture(self) - name = random_name(self) - self.addCleanup(client.remove, name) - d = client.add(name, u"busybox", restart_policy=restart_policy) - d.addCallback(lambda _: client.list()) - - def got_list(units): - unit = [unit for unit in units if unit.name == name][0] - self.assertEqual(unit.restart_policy, restart_policy) - d.addCallback(got_list) - return d - - def test_add_with_restart_never(self): - """ - ``DockerClient.add`` when creating a container with a restart - policy, of never will create a container with this policy. - """ - return self.assert_restart_policy_round_trips(RestartNever()) - - def test_add_with_restart_always(self): - """ - ``DockerClient.add`` when creating a container with a restart - policy, of always will create a container with this policy. - """ - return self.assert_restart_policy_round_trips(RestartAlways()) - - def test_add_with_restart_on_failure(self): - """ - ``DockerClient.add`` when creating a container with a restart - policy, of on failure will create a container with this policy. - """ - return self.assert_restart_policy_round_trips(RestartOnFailure()) - - def test_add_with_restart_on_failure_with_maximum_retry(self): - """ - ``DockerClient.add`` when creating a container with a restart - policy, of on failure with a retry count will create a container - with this policy. - """ - return self.assert_restart_policy_round_trips( - RestartOnFailure(maximum_retry_count=5)) - - return IDockerClientTests - - -class FakeIDockerClientTests( - make_idockerclient_tests( - fixture=lambda test_case: FakeDockerClient(), - ) -): - """ - ``IDockerClient`` tests for ``FakeDockerClient``. - """ - - -class FakeDockerClientImplementationTests(TestCase): - """ - Tests for implementation details of ``FakeDockerClient``. - """ - def test_units_default(self): - """ - ``FakeDockerClient._units`` is an empty dict by default. - """ - self.assertEqual({}, FakeDockerClient()._units) - - def test_units_override(self): - """ - ``FakeDockerClient._units`` can be supplied in the constructor. - """ - units = {u'foo': Unit(name=u'foo', container_name=u'foo', - activation_state=u'active', - container_image=u'flocker/flocker:v1.0.0')} - self.assertEqual(units, FakeDockerClient(units=units)._units) - - -class PortMapInitTests( - make_with_init_tests( - record_type=PortMap, - kwargs=dict( - internal_port=5678, - external_port=910, - ) - ) -): - """ - Tests for ``PortMap.__init__``. - """ - - -class PortMapTests(TestCase): - """ - Tests for ``PortMap``. - - XXX: The equality tests in this case are incomplete. See - https://github.com/hynek/characteristic/issues/4 for a proposed solution to - this. - """ - def test_equal(self): - """ - ``PortMap`` instances with the same internal and external ports compare - equal. - """ - self.assertEqual( - PortMap(internal_port=5678, external_port=910), - PortMap(internal_port=5678, external_port=910), - ) - - def test_not_equal(self): - """ - ``PortMap`` instances with the different internal and external ports do - not compare equal. - """ - self.assertNotEqual( - PortMap(internal_port=5678, external_port=910), - PortMap(internal_port=1516, external_port=1718) - ) - - -class UnitInitTests( - make_with_init_tests( - record_type=Unit, - kwargs=dict( - name=u'site-example.com', - container_name=u'flocker--site-example.com', - activation_state=u'active', - container_image=u'flocker/flocker:v1.0.0', - ports=pset((PortMap(internal_port=80, external_port=8080),)), - environment=Environment(variables={u'foo': u'bar'}), - restart_policy=RestartAlways(), - ), - expected_defaults=dict( - ports=pset(), container_image=None, environment=None, - restart_policy=RestartNever()) - ) -): - """ - Tests for ``Unit.__init__``. - """ - - -class EnvironmentInitTests( - make_with_init_tests( - record_type=Environment, - kwargs=dict( - variables=dict(foo="bar"), - ), - ) -): - """ - Tests for ``Environment.__init__``. - """ - - -class EnvironmentTests(TestCase): - """ - Tests for ``Environment``. - """ - def test_to_dict(self): - """ - ``Environment.to_dict`` returns a dictionary containing the - the environment variables as key/value entries. - """ - variables = {'baz': 'qux', 'foo': 'bar'} - environment = Environment(variables=frozenset(variables.items())) - - self.assertEqual(environment.to_dict(), variables) - - -class VolumeInitTests( - make_with_init_tests( - record_type=Volume, - kwargs=dict( - node_path=FilePath(b"/tmp"), - container_path=FilePath(b"/blah"), - ), - ) -): - """ - Tests for ``Volume.__init__``. - """ diff --git a/flocker/node/test/test_script.py b/flocker/node/test/test_script.py index bb27edeb28..9113792d8a 100644 --- a/flocker/node/test/test_script.py +++ b/flocker/node/test/test_script.py @@ -33,7 +33,7 @@ from ...common._era import get_era from ..script import ( - AgentScript, ContainerAgentOptions, + AgentScript, AgentServiceFactory, DatasetAgentOptions, validate_configuration, _context_factory_and_credential, DatasetServiceFactory, AgentService, get_configuration, @@ -1031,14 +1031,6 @@ class DatasetAgentOptionsTests( """ -class ContainerAgentOptionsTests( - make_amp_agent_options_tests(ContainerAgentOptions) -): - """ - Tests for ``ContainerAgentOptions``. - """ - - class GetExternalIPTests(TestCase): """ Tests for ``_get_external_ip``. diff --git a/flocker/node/testtools.py b/flocker/node/testtools.py index ed9534425a..11fbbd3938 100644 --- a/flocker/node/testtools.py +++ b/flocker/node/testtools.py @@ -4,14 +4,7 @@ Testing utilities for ``flocker.node``. """ -from functools import wraps -import os -import pwd -from unittest import skipIf, SkipTest from uuid import uuid4 -from distutils.version import LooseVersion # pylint: disable=import-error - -import psutil from zope.interface import implementer @@ -21,80 +14,18 @@ from zope.interface.verify import verifyObject -from eliot import Logger, ActionType, MessageType, fields +from eliot import Logger, ActionType from . import ( ILocalState, IDeployer, NodeLocalState, IStateChange, sequentially ) from ..common import loop_until -from ..testtools import AsyncTestCase, find_free_port +from ..testtools import AsyncTestCase from ..control import ( IClusterStateChange, Node, NodeState, Deployment, DeploymentState, PersistentState, ) from ..control._model import ip_to_uuid, Leases -from ._docker import AddressInUse, DockerClient - - -def docker_accessible(): - """ - Attempt to connect to the Docker control socket. - - :return: A ``bytes`` string describing the reason Docker is not - accessible or ``None`` if it appears to be accessible. - """ - try: - client = DockerClient() - client._client.ping() - except Exception as e: - return str(e) - return None - -_docker_reason = docker_accessible() - -if_docker_configured = skipIf( - _docker_reason, - "User {!r} cannot access Docker: {}".format( - pwd.getpwuid(os.geteuid()).pw_name, - _docker_reason, - )) - - -def require_docker_version(minimum_docker_version, message): - """ - Skip the wrapped test if the actual Docker version is less than - ``minimum_docker_version``. - - :param str minimum_docker_version: The minimum version required by the - test. - :param str message: An explanatory message which will be printed when - skipping the test. - """ - minimum_docker_version = LooseVersion( - minimum_docker_version - ) - - # XXX: Can we change this to use skipIf? - def decorator(wrapped): - @wraps(wrapped) - def wrapper(*args, **kwargs): - client = DockerClient() - docker_version = LooseVersion( - client._client.version()['Version'] - ) - if docker_version < minimum_docker_version: - raise SkipTest( - 'Minimum required Docker version: {}. ' - 'Actual Docker version: {}. ' - 'Details: {}'.format( - minimum_docker_version, - docker_version, - message, - ) - ) - return wrapped(*args, **kwargs) - return wrapper - return decorator def wait_for_unit_state(reactor, docker_client, unit_name, @@ -402,94 +333,3 @@ def assert_calculated_changes_for_deployer( cluster_configuration, cluster_state, local_state ) case.assertEqual(expected_changes, changes) - - -ADDRESS_IN_USE = MessageType( - u"flocker:test:address_in_use", - fields(ip=unicode, port=int, name=bytes), -) - - -def _find_process_name(port_number): - """ - Get the name of the process using the given port number. - """ - for connection in psutil.net_connections(): - if connection.laddr[1] == port_number: - return psutil.Process(connection.pid).name() - return None - - -def _retry_on_port_collision(reason, add, cleanup): - """ - Cleanup and re-add a container if it failed to start because of a port - collision. - - :param reason: The exception describing the container startup failure. - :param add: A no-argument callable that can be used to try adding and - starting the container again. - :param cleanup: A no-argument callable that can be used to remove the - container. - """ - # We select a random, available port number on each attempt. If it was in - # use it's because the "available" part of that port number selection logic - # is fairly shaky. It should be good enough that trying again works fairly - # well, though. So do that. - reason.trap(AddressInUse) - ip, port = reason.value.address - used_by = _find_process_name(port) - ADDRESS_IN_USE(ip=ip, port=port, name=used_by).write() - d = cleanup() - d.addCallback(lambda ignored: add()) - return d - - -def add_with_port_collision_retry(client, unit_name, **kw): - """ - Add a container. Try adding it repeatedly if it has ports defined and - container startup fails with ``AddressInUse``. - - If ports in the container are defined with an external port number of ``0`` - a locally free port number will be assigned. On each re-try attempt, these - will be re-assigned to try to avoid the port collision. - - :param DockerClient client: The ``IDockerClient`` to use to try to add the - container. - :param unicode unit_name: The name of the container to add. See the - ``unit_name`` parameter of ``IDockerClient.add``. - :param kw: Additional keyword arguments to pass on to - ``IDockerClient.add``. - - :return: A ``Deferred`` which fires with a two-tuple. The first element - represents the container which has been added and started. The second - element is a ``list`` of ``PortMap`` instances describing the ports - which were ultimately requested. - """ - ultimate_ports = [] - - def add(): - # Generate a replacement for any auto-assigned ports - ultimate_ports[:] = tentative_ports = list( - port.set( - external_port=find_free_port()[1] - ) - if port.external_port == 0 - else port - for port in kw["ports"] - ) - tentative_kw = kw.copy() - tentative_kw["ports"] = tentative_ports - return client.add(unit_name, **tentative_kw) - - def cleanup(): - return client.remove(unit_name) - - if "ports" in kw: - trying = add() - trying.addErrback(_retry_on_port_collision, add, cleanup) - result = trying - else: - result = client.add(unit_name, **kw) - - result.addCallback(lambda app: (app, ultimate_ports)) - return result diff --git a/setup.py b/setup.py index 1ea932dd5d..9f8b970d9f 100644 --- a/setup.py +++ b/setup.py @@ -98,8 +98,6 @@ def requirements_list_from_file(requirements_file, dependency_links): package_data={ 'flocker.node.functional': [ - 'sendbytes-docker/*', - 'env-docker/*', 'retry-docker/*' ], # These data files are used by the volumes API to define input and @@ -119,7 +117,6 @@ def requirements_list_from_file(requirements_file, dependency_links): # (admin/packaging.py) if you make changes here. 'console_scripts': [ 'flocker-volume = flocker.volume.script:flocker_volume_main', - 'flocker-container-agent = flocker.node.script:flocker_container_agent_main', # noqa 'flocker-dataset-agent = flocker.node.script:flocker_dataset_agent_main', # noqa 'flocker-control = flocker.control.script:flocker_control_main', 'flocker-ca = flocker.ca._script:flocker_ca_main',