Create a new module for lxc code.
Create a new python module for lxc code. Move lxc.py and related code into that
module. Script-style functionality in site_utils/lxc.py and lxc_cleanup.py was
left as-is.
BUG=chromium:720219
TEST=sudo python site_utils/lxc_functional_test.py -v
2017-06-15 15:29:52,700 All tests passed.
TEST=Install autotest-server code onto moblab, run a test.
Test runs successfully.
Change-Id: I7b8400db64594f4d1268a62838ac379e713738c1
Reviewed-on: https://chromium-review.googlesource.com/538127
Commit-Ready: Ben Kwa <[email protected]>
Tested-by: Ben Kwa <[email protected]>
Reviewed-by: Ben Kwa <[email protected]>
diff --git a/site_utils/lxc/__init__.py b/site_utils/lxc/__init__.py
new file mode 100644
index 0000000..8ee0426
--- /dev/null
+++ b/site_utils/lxc/__init__.py
@@ -0,0 +1,18 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""This module provides some tools to interact with LXC containers, for example:
+ 1. Download base container from given GS location, setup the base container.
+ 2. Create a snapshot as test container from base container.
+ 3. Mount a directory in drone to the test container.
+ 4. Run a command in the container and return the output.
+ 5. Cleanup, e.g., destroy the container.
+"""
+
+from constants import *
+from container import Container
+from container_bucket import ContainerBucket
+from lxc import install_package
+from lxc import install_packages
+from lxc import install_python_package
diff --git a/site_utils/lxc/cleanup_if_fail.py b/site_utils/lxc/cleanup_if_fail.py
new file mode 100644
index 0000000..14ef874
--- /dev/null
+++ b/site_utils/lxc/cleanup_if_fail.py
@@ -0,0 +1,73 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import socket
+import sys
+
+import common
+from autotest_lib.client.bin import utils
+from autotest_lib.client.common_lib import error
+from autotest_lib.client.common_lib.cros.graphite import autotest_es
+from autotest_lib.site_utils.lxc import constants
+
+
+def cleanup_if_fail():
+ """Decorator to do cleanup if container fails to be set up.
+ """
+ def deco_cleanup_if_fail(func):
+ """Wrapper for the decorator.
+
+ @param func: Function to be called.
+ """
+ def func_cleanup_if_fail(*args, **kwargs):
+ """Decorator to do cleanup if container fails to be set up.
+
+ The first argument must be a ContainerBucket object, which can be
+ used to retrieve the container object by name.
+
+ @param func: function to be called.
+ @param args: arguments for function to be called.
+ @param kwargs: keyword arguments for function to be called.
+ """
+ bucket = args[0]
+ name = utils.get_function_arg_value(func, 'name', args, kwargs)
+ try:
+ skip_cleanup = utils.get_function_arg_value(
+ func, 'skip_cleanup', args, kwargs)
+ except (KeyError, ValueError):
+ skip_cleanup = False
+ try:
+ return func(*args, **kwargs)
+ except:
+ exc_info = sys.exc_info()
+ try:
+ container = bucket.get(name)
+ if container and not skip_cleanup:
+ container.destroy()
+ except error.CmdError as e:
+ logging.error(e)
+
+ try:
+ job_id = utils.get_function_arg_value(
+ func, 'job_id', args, kwargs)
+ except (KeyError, ValueError):
+ job_id = ''
+ metadata={'drone': socket.gethostname(),
+ 'job_id': job_id,
+ 'success': False}
+ # Record all args if job_id is not available.
+ if not job_id:
+ metadata['args'] = str(args)
+ if kwargs:
+ metadata.update(kwargs)
+ autotest_es.post(
+ use_http=True,
+ type_str=constants.CONTAINER_CREATE_METADB_TYPE,
+ metadata=metadata)
+
+ # Raise the cached exception with original backtrace.
+ raise exc_info[0], exc_info[1], exc_info[2]
+ return func_cleanup_if_fail
+ return deco_cleanup_if_fail
diff --git a/site_utils/lxc/common.py b/site_utils/lxc/common.py
new file mode 100644
index 0000000..41607e1
--- /dev/null
+++ b/site_utils/lxc/common.py
@@ -0,0 +1,8 @@
+import os, sys
+dirname = os.path.dirname(sys.modules[__name__].__file__)
+autotest_dir = os.path.abspath(os.path.join(dirname, "..", ".."))
+client_dir = os.path.join(autotest_dir, "client")
+sys.path.insert(0, client_dir)
+import setup_modules
+sys.path.pop(0)
+setup_modules.setup(base_path=autotest_dir, root_module_name="autotest_lib")
diff --git a/site_utils/lxc/config.py b/site_utils/lxc/config.py
new file mode 100644
index 0000000..34ae2f9
--- /dev/null
+++ b/site_utils/lxc/config.py
@@ -0,0 +1,409 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+This module helps to deploy config files and shared folders from host to
+container. It reads the settings from a setting file (ssp_deploy_config), and
+deploy the config files based on the settings. The setting file has a json
+string of a list of deployment settings. For example:
+[{
+ "source": "/etc/resolv.conf",
+ "target": "/etc/resolv.conf",
+ "append": true,
+ "permission": 400
+ },
+ {
+ "source": "ssh",
+ "target": "/root/.ssh",
+ "append": false,
+ "permission": 400
+ },
+ {
+ "source": "/usr/local/autotest/results/shared",
+ "target": "/usr/local/autotest/results/shared",
+ "mount": true,
+ "readonly": false,
+ "force_create": true
+ }
+]
+
+Definition of each attribute for config files are as follows:
+source: config file in host to be copied to container.
+target: config file's location inside container.
+append: true to append the content of config file to existing file inside
+ container. If it's set to false, the existing file inside container will
+ be overwritten.
+permission: Permission to set to the config file inside container.
+
+Example:
+{
+ "source": "/etc/resolv.conf",
+ "target": "/etc/resolv.conf",
+ "append": true,
+ "permission": 400
+}
+The above example will:
+1. Append the content of /etc/resolv.conf in host machine to file
+ /etc/resolv.conf inside container.
+2. Copy all files in ssh to /root/.ssh in container.
+3. Change all these files' permission to 400
+
+Definition of each attribute for sharing folders are as follows:
+source: a folder in host to be mounted in container.
+target: the folder's location inside container.
+mount: true to mount the source folder onto the target inside container.
+ A setting with false value of mount is invalid.
+readonly: true if the mounted folder inside container should be readonly.
+force_create: true to create the source folder if it doesn't exist.
+
+Example:
+ {
+ "source": "/usr/local/autotest/results/shared",
+ "target": "/usr/local/autotest/results/shared",
+ "mount": true,
+ "readonly": false,
+ "force_create": true
+ }
+The above example will mount folder "/usr/local/autotest/results/shared" in the
+host to path "/usr/local/autotest/results/shared" inside the container. The
+folder can be written to inside container. If the source folder doesn't exist,
+it will be created as `force_create` is set to true.
+
+The setting file (ssp_deploy_config) lives in AUTOTEST_DIR folder.
+For relative file path specified in ssp_deploy_config, AUTOTEST_DIR/containers
+is the parent folder.
+The setting file can be overridden by a shadow config, ssp_deploy_shadow_config.
+For lab servers, puppet should be used to deploy ssp_deploy_shadow_config to
+AUTOTEST_DIR and the configure files to AUTOTEST_DIR/containers.
+
+The default setting file (ssp_deploy_config) contains
+For SSP to work with none-lab servers, e.g., moblab and developer's workstation,
+the module still supports copy over files like ssh config and autotest
+shadow_config to container when AUTOTEST_DIR/containers/ssp_deploy_config is not
+presented.
+
+"""
+
+import collections
+import getpass
+import json
+import os
+import socket
+
+import common
+from autotest_lib.client.bin import utils
+from autotest_lib.client.common_lib import global_config
+from autotest_lib.client.common_lib import utils
+from autotest_lib.site_utils.lxc import utils as lxc_utils
+
+
+config = global_config.global_config
+
+# Path to ssp_deploy_config and ssp_deploy_shadow_config.
+SSP_DEPLOY_CONFIG_FILE = os.path.join(common.autotest_dir,
+ 'ssp_deploy_config.json')
+SSP_DEPLOY_SHADOW_CONFIG_FILE = os.path.join(common.autotest_dir,
+ 'ssp_deploy_shadow_config.json')
+# A temp folder used to store files to be appended to the files inside
+# container.
+APPEND_FOLDER = 'usr/local/ssp_append'
+# Path to folder that contains autotest code inside container.
+CONTAINER_AUTOTEST_DIR = '/usr/local/autotest'
+
+DeployConfig = collections.namedtuple(
+ 'DeployConfig', ['source', 'target', 'append', 'permission'])
+MountConfig = collections.namedtuple(
+ 'MountConfig', ['source', 'target', 'mount', 'readonly',
+ 'force_create'])
+
+
+class SSPDeployError(Exception):
+ """Exception raised if any error occurs when setting up test container."""
+
+
+class DeployConfigManager(object):
+ """An object to deploy config to container.
+
+ The manager retrieves deploy configs from ssp_deploy_config or
+ ssp_deploy_shadow_config, and sets up the container accordingly.
+ For example:
+ 1. Copy given config files to specified location inside container.
+ 2. Append the content of given config files to specific files inside
+ container.
+ 3. Make sure the config files have proper permission inside container.
+
+ """
+
+ @staticmethod
+ def validate_path(deploy_config):
+ """Validate the source and target in deploy_config dict.
+
+ @param deploy_config: A dictionary of deploy config to be validated.
+
+ @raise SSPDeployError: If any path in deploy config is invalid.
+ """
+ target = deploy_config['target']
+ source = deploy_config['source']
+ if not os.path.isabs(target):
+ raise SSPDeployError('Target path must be absolute path: %s' %
+ target)
+ if not os.path.isabs(source):
+ if source.startswith('~'):
+ # This is to handle the case that the script is run with sudo.
+ inject_user_path = ('~%s%s' % (utils.get_real_user(),
+ source[1:]))
+ source = os.path.expanduser(inject_user_path)
+ else:
+ source = os.path.join(common.autotest_dir, source)
+ # Update the source setting in deploy config with the updated path.
+ deploy_config['source'] = source
+
+
+ @staticmethod
+ def validate(deploy_config):
+ """Validate the deploy config.
+
+ Deploy configs need to be validated and pre-processed, e.g.,
+ 1. Target must be an absolute path.
+ 2. Source must be updated to be an absolute path.
+
+ @param deploy_config: A dictionary of deploy config to be validated.
+
+ @return: A DeployConfig object that contains the deploy config.
+
+ @raise SSPDeployError: If the deploy config is invalid.
+
+ """
+ DeployConfigManager.validate_path(deploy_config)
+ return DeployConfig(**deploy_config)
+
+
+ @staticmethod
+ def validate_mount(deploy_config):
+ """Validate the deploy config for mounting a directory.
+
+ Deploy configs need to be validated and pre-processed, e.g.,
+ 1. Target must be an absolute path.
+ 2. Source must be updated to be an absolute path.
+ 3. Mount must be true.
+
+ @param deploy_config: A dictionary of deploy config to be validated.
+
+ @return: A DeployConfig object that contains the deploy config.
+
+ @raise SSPDeployError: If the deploy config is invalid.
+
+ """
+ DeployConfigManager.validate_path(deploy_config)
+ c = MountConfig(**deploy_config)
+ if not c.mount:
+ raise SSPDeployError('`mount` must be true.')
+ if not c.force_create and not os.path.exists(c.source):
+ raise SSPDeployError('`source` does not exist.')
+ return c
+
+
+ def __init__(self, container):
+ """Initialize the deploy config manager.
+
+ @param container: The container needs to deploy config.
+
+ """
+ self.container = container
+ # If shadow config is used, the deployment procedure will skip some
+ # special handling of config file, e.g.,
+ # 1. Set enable_master_ssh to False in autotest shadow config.
+ # 2. Set ssh logleve to ERROR for all hosts.
+ self.is_shadow_config = os.path.exists(SSP_DEPLOY_SHADOW_CONFIG_FILE)
+ config_file = (SSP_DEPLOY_SHADOW_CONFIG_FILE if self.is_shadow_config
+ else SSP_DEPLOY_CONFIG_FILE)
+ with open(config_file) as f:
+ deploy_configs = json.load(f)
+ self.deploy_configs = [self.validate(c) for c in deploy_configs
+ if 'append' in c]
+ self.mount_configs = [self.validate_mount(c) for c in deploy_configs
+ if 'mount' in c]
+ self.tmp_append = os.path.join(self.container.rootfs, APPEND_FOLDER)
+ if lxc_utils.path_exists(self.tmp_append):
+ utils.run('sudo rm -rf "%s"' % self.tmp_append)
+ utils.run('sudo mkdir -p "%s"' % self.tmp_append)
+
+
+ def _deploy_config_pre_start(self, deploy_config):
+ """Deploy a config before container is started.
+
+ Most configs can be deployed before the container is up. For configs
+ require a reboot to take effective, they must be deployed in this
+ function.
+
+ @param deploy_config: Config to be deployed.
+
+ """
+ if not lxc_utils.path_exists(deploy_config.source):
+ return
+ # Path to the target file relative to host.
+ if deploy_config.append:
+ target = os.path.join(self.tmp_append,
+ os.path.basename(deploy_config.target))
+ else:
+ target = os.path.join(self.container.rootfs,
+ deploy_config.target[1:])
+ # Recursively copy files/folder to the target. `-L` to always follow
+ # symbolic links in source.
+ target_dir = os.path.dirname(target)
+ if not lxc_utils.path_exists(target_dir):
+ utils.run('sudo mkdir -p "%s"' % target_dir)
+ source = deploy_config.source
+ # Make sure the source ends with `/.` if it's a directory. Otherwise
+ # command cp will not work.
+ if os.path.isdir(source) and source[-1] != '.':
+ source += '/.' if source[-1] != '/' else '.'
+ utils.run('sudo cp -RL "%s" "%s"' % (source, target))
+
+
+ def _deploy_config_post_start(self, deploy_config):
+ """Deploy a config after container is started.
+
+ For configs to be appended after the existing config files in container,
+ they must be copied to a temp location before container is up (deployed
+ in function _deploy_config_pre_start). After the container is up, calls
+ can be made to append the content of such configs to existing config
+ files.
+
+ @param deploy_config: Config to be deployed.
+
+ """
+ if deploy_config.append:
+ source = os.path.join('/', APPEND_FOLDER,
+ os.path.basename(deploy_config.target))
+ self.container.attach_run('cat \'%s\' >> \'%s\'' %
+ (source, deploy_config.target))
+ self.container.attach_run(
+ 'chmod -R %s \'%s\'' %
+ (deploy_config.permission, deploy_config.target))
+
+
+ def _modify_shadow_config(self):
+ """Update the shadow config used in container with correct values.
+
+ This only applies when no shadow SSP deploy config is applied. For
+ default SSP deploy config, autotest shadow_config.ini is from autotest
+ directory, which requires following modification to be able to work in
+ container. If one chooses to use a shadow SSP deploy config file, the
+ autotest shadow_config.ini must be from a source with following
+ modification:
+ 1. Disable master ssh connection in shadow config, as it is not working
+ properly in container yet, and produces noise in the log.
+ 2. Update AUTOTEST_WEB/host and SERVER/hostname to be the IP of the host
+ if any is set to localhost or 127.0.0.1. Otherwise, set it to be the
+ FQDN of the config value.
+ 3. Update SSP/user, which is used as the user makes RPC inside the
+ container. This allows the RPC to pass ACL check as if the call is
+ made in the host.
+
+ """
+ shadow_config = os.path.join(CONTAINER_AUTOTEST_DIR,
+ 'shadow_config.ini')
+
+ # Inject "AUTOSERV/enable_master_ssh: False" in shadow config as
+ # container does not support master ssh connection yet.
+ self.container.attach_run(
+ 'echo $\'\n[AUTOSERV]\nenable_master_ssh: False\n\' >> %s' %
+ shadow_config)
+
+ host_ip = lxc_utils.get_host_ip()
+ local_names = ['localhost', '127.0.0.1']
+
+ db_host = config.get_config_value('AUTOTEST_WEB', 'host')
+ if db_host.lower() in local_names:
+ new_host = host_ip
+ else:
+ new_host = socket.getfqdn(db_host)
+ self.container.attach_run('echo $\'\n[AUTOTEST_WEB]\nhost: %s\n\' >> %s'
+ % (new_host, shadow_config))
+
+ afe_host = config.get_config_value('SERVER', 'hostname')
+ if afe_host.lower() in local_names:
+ new_host = host_ip
+ else:
+ new_host = socket.getfqdn(afe_host)
+ self.container.attach_run('echo $\'\n[SERVER]\nhostname: %s\n\' >> %s' %
+ (new_host, shadow_config))
+
+ # Update configurations in SSP section:
+ # user: The user running current process.
+ # is_moblab: True if the autotest server is a Moblab instance.
+ # host_container_ip: IP address of the lxcbr0 interface. Process running
+ # inside container can make RPC through this IP.
+ self.container.attach_run(
+ 'echo $\'\n[SSP]\nuser: %s\nis_moblab: %s\n'
+ 'host_container_ip: %s\n\' >> %s' %
+ (getpass.getuser(), bool(utils.is_moblab()),
+ lxc_utils.get_host_ip(), shadow_config))
+
+
+ def _modify_ssh_config(self):
+ """Modify ssh config for it to work inside container.
+
+ This is only called when default ssp_deploy_config is used. If shadow
+ deploy config is manually set up, this function will not be called.
+ Therefore, the source of ssh config must be properly updated to be able
+ to work inside container.
+
+ """
+ # Remove domain specific flags.
+ ssh_config = '/root/.ssh/config'
+ self.container.attach_run('sed -i \'s/UseProxyIf=false//g\' \'%s\'' %
+ ssh_config)
+ # TODO(dshi): crbug.com/451622 ssh connection loglevel is set to
+ # ERROR in container before master ssh connection works. This is
+ # to avoid logs being flooded with warning `Permanently added
+ # '[hostname]' (RSA) to the list of known hosts.` (crbug.com/478364)
+ # The sed command injects following at the beginning of .ssh/config
+ # used in config. With such change, ssh command will not post
+ # warnings.
+ # Host *
+ # LogLevel Error
+ self.container.attach_run(
+ 'sed -i \'1s/^/Host *\\n LogLevel ERROR\\n\\n/\' \'%s\'' %
+ ssh_config)
+
+ # Inject ssh config for moblab to ssh to dut from container.
+ if utils.is_moblab():
+ # ssh to moblab itself using moblab user.
+ self.container.attach_run(
+ 'echo $\'\nHost 192.168.231.1\n User moblab\n '
+ 'IdentityFile %%d/.ssh/testing_rsa\' >> %s' %
+ '/root/.ssh/config')
+ # ssh to duts using root user.
+ self.container.attach_run(
+ 'echo $\'\nHost *\n User root\n '
+ 'IdentityFile %%d/.ssh/testing_rsa\' >> %s' %
+ '/root/.ssh/config')
+
+
+ def deploy_pre_start(self):
+ """Deploy configs before the container is started.
+ """
+ for deploy_config in self.deploy_configs:
+ self._deploy_config_pre_start(deploy_config)
+ for mount_config in self.mount_configs:
+ if (mount_config.force_create and
+ not os.path.exists(mount_config.source)):
+ utils.run('mkdir -p %s' % mount_config.source)
+
+
+ def deploy_post_start(self):
+ """Deploy configs after the container is started.
+ """
+ for deploy_config in self.deploy_configs:
+ self._deploy_config_post_start(deploy_config)
+ # Autotest shadow config requires special handling to update hostname
+ # of `localhost` with host IP. Shards always use `localhost` as value
+ # of SERVER\hostname and AUTOTEST_WEB\host.
+ self._modify_shadow_config()
+ # Only apply special treatment for files deployed by the default
+ # ssp_deploy_config
+ if not self.is_shadow_config:
+ self._modify_ssh_config()
diff --git a/site_utils/lxc/constants.py b/site_utils/lxc/constants.py
new file mode 100644
index 0000000..910bf8e
--- /dev/null
+++ b/site_utils/lxc/constants.py
@@ -0,0 +1,98 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+
+import common
+from autotest_lib.client.bin import utils as common_utils
+from autotest_lib.client.common_lib.global_config import global_config
+from autotest_lib.site_utils.lxc import config as lxc_config
+
+
+# Name of the base container.
+BASE = global_config.get_config_value('AUTOSERV', 'container_base_name')
+# Naming convention of test container, e.g., test_300_1422862512_2424, where:
+# 300: The test job ID.
+# 1422862512: The tick when container is created.
+# 2424: The PID of autoserv that starts the container.
+TEST_CONTAINER_NAME_FMT = 'test_%s_%d_%d'
+# Naming convention of the result directory in test container.
+RESULT_DIR_FMT = os.path.join(lxc_config.CONTAINER_AUTOTEST_DIR, 'results',
+ '%s')
+# Attributes to retrieve about containers.
+ATTRIBUTES = ['name', 'state']
+
+# Format for mount entry to share a directory in host with container.
+# source is the directory in host, destination is the directory in container.
+# readonly is a binding flag for readonly mount, its value should be `,ro`.
+MOUNT_FMT = ('lxc.mount.entry = %(source)s %(destination)s none '
+ 'bind%(readonly)s 0 0')
+SSP_ENABLED = global_config.get_config_value('AUTOSERV', 'enable_ssp_container',
+ type=bool, default=True)
+# url to the folder stores base container.
+CONTAINER_BASE_FOLDER_URL = global_config.get_config_value('AUTOSERV',
+ 'container_base_folder_url')
+CONTAINER_BASE_URL_FMT = '%s/%%s.tar.xz' % CONTAINER_BASE_FOLDER_URL
+CONTAINER_BASE_URL = CONTAINER_BASE_URL_FMT % BASE
+# Default directory used to store LXC containers.
+DEFAULT_CONTAINER_PATH = global_config.get_config_value('AUTOSERV',
+ 'container_path')
+
+# Path to drone_temp folder in the container, which stores the control file for
+# test job to run.
+CONTROL_TEMP_PATH = os.path.join(lxc_config.CONTAINER_AUTOTEST_DIR, 'drone_tmp')
+
+# Bash command to return the file count in a directory. Test the existence first
+# so the command can return an error code if the directory doesn't exist.
+COUNT_FILE_CMD = '[ -d %(dir)s ] && ls %(dir)s | wc -l'
+
+# Command line to append content to a file
+APPEND_CMD_FMT = ('echo \'%(content)s\' | sudo tee --append %(file)s'
+ '> /dev/null')
+
+# Path to site-packates in Moblab
+MOBLAB_SITE_PACKAGES = '/usr/lib64/python2.7/site-packages'
+MOBLAB_SITE_PACKAGES_CONTAINER = '/usr/local/lib/python2.7/dist-packages/'
+
+# Flag to indicate it's running in a Moblab. Due to crbug.com/457496, lxc-ls has
+# different behavior in Moblab.
+IS_MOBLAB = common_utils.is_moblab()
+
+# TODO(dshi): If we are adding more logic in how lxc should interact with
+# different systems, we should consider code refactoring to use a setting-style
+# object to store following flags mapping to different systems.
+# TODO(crbug.com/464834): Snapshot clone is disabled until Moblab can
+# support overlayfs or aufs, which requires a newer kernel.
+SUPPORT_SNAPSHOT_CLONE = not IS_MOBLAB
+
+# Number of seconds to wait for network to be up in a container.
+NETWORK_INIT_TIMEOUT = 300
+# Network bring up is slower in Moblab.
+NETWORK_INIT_CHECK_INTERVAL = 2 if IS_MOBLAB else 0.1
+
+# Number of seconds to download files from devserver. We chose a timeout that
+# is on the same order as the permitted CTS runtime for normal jobs (1h). In
+# principle we should not retry timeouts as they indicate server/network
+# overload, but we may be tempted to retry for other failures.
+DEVSERVER_CALL_TIMEOUT = 3600
+# Number of retries to download files from devserver. There is no point in
+# having more than one retry for a file download.
+DEVSERVER_CALL_RETRY = 2
+# Average delay before attempting a retry to download from devserver. This
+# value needs to be large enough to allow an overloaded server/network to
+# calm down even in the face of retries.
+DEVSERVER_CALL_DELAY = 600
+
+# Type string for container related metadata.
+CONTAINER_CREATE_METADB_TYPE = 'container_create'
+CONTAINER_CREATE_RETRY_METADB_TYPE = 'container_create_retry'
+CONTAINER_RUN_TEST_METADB_TYPE = 'container_run_test'
+
+# The container's hostname MUST start with `test-` or `test_`. DHCP server in
+# MobLab uses that prefix to determine the lease time. Note that `test_` is not
+# a valid hostname as hostnames cannot contain underscores. Work is underway to
+# migrate to `test-`. See crbug/726131.
+CONTAINER_UTSNAME_FORMAT = 'test-%s'
+
+STATS_KEY = 'chromeos/autotest/lxc'
diff --git a/site_utils/lxc/container.py b/site_utils/lxc/container.py
new file mode 100644
index 0000000..a4003e2
--- /dev/null
+++ b/site_utils/lxc/container.py
@@ -0,0 +1,297 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+import re
+import time
+
+import common
+from autotest_lib.client.bin import utils
+from autotest_lib.client.common_lib import error
+from autotest_lib.site_utils.lxc import config as lxc_config
+from autotest_lib.site_utils.lxc import constants
+from autotest_lib.site_utils.lxc import lxc
+
+try:
+ from chromite.lib import metrics
+except ImportError:
+ metrics = utils.metrics_mock
+
+
+class Container(object):
+ """A wrapper class of an LXC container.
+
+ The wrapper class provides methods to interact with a container, e.g.,
+ start, stop, destroy, run a command. It also has attributes of the
+ container, including:
+ name: Name of the container.
+ state: State of the container, e.g., ABORTING, RUNNING, STARTING, STOPPED,
+ or STOPPING.
+
+ lxc-ls can also collect other attributes of a container including:
+ ipv4: IP address for IPv4.
+ ipv6: IP address for IPv6.
+ autostart: If the container will autostart at system boot.
+ pid: Process ID of the container.
+ memory: Memory used by the container, as a string, e.g., "6.2MB"
+ ram: Physical ram used by the container, as a string, e.g., "6.2MB"
+ swap: swap used by the container, as a string, e.g., "1.0MB"
+
+ For performance reason, such info is not collected for now.
+
+ The attributes available are defined in ATTRIBUTES constant.
+ """
+
+ def __init__(self, container_path, attribute_values):
+ """Initialize an object of LXC container with given attribute values.
+
+ @param container_path: Directory that stores the container.
+ @param attribute_values: A dictionary of attribute values for the
+ container.
+ """
+ self.container_path = os.path.realpath(container_path)
+ # Path to the rootfs of the container. This will be initialized when
+ # property rootfs is retrieved.
+ self._rootfs = None
+ for attribute, value in attribute_values.iteritems():
+ setattr(self, attribute, value)
+
+
+ def refresh_status(self):
+ """Refresh the status information of the container.
+ """
+ containers = lxc.get_container_info(self.container_path, name=self.name)
+ if not containers:
+ raise error.ContainerError(
+ 'No container found in directory %s with name of %s.' %
+ self.container_path, self.name)
+ attribute_values = containers[0]
+ for attribute, value in attribute_values.iteritems():
+ setattr(self, attribute, value)
+
+
+ @property
+ def rootfs(self):
+ """Path to the rootfs of the container.
+
+ This property returns the path to the rootfs of the container, that is,
+ the folder where the container stores its local files. It reads the
+ attribute lxc.rootfs from the config file of the container, e.g.,
+ lxc.rootfs = /usr/local/autotest/containers/t4/rootfs
+ If the container is created with snapshot, the rootfs is a chain of
+ folders, separated by `:` and ordered by how the snapshot is created,
+ e.g.,
+ lxc.rootfs = overlayfs:/usr/local/autotest/containers/base/rootfs:
+ /usr/local/autotest/containers/t4_s/delta0
+ This function returns the last folder in the chain, in above example,
+ that is `/usr/local/autotest/containers/t4_s/delta0`
+
+ Files in the rootfs will be accessible directly within container. For
+ example, a folder in host "[rootfs]/usr/local/file1", can be accessed
+ inside container by path "/usr/local/file1". Note that symlink in the
+ host can not across host/container boundary, instead, directory mount
+ should be used, refer to function mount_dir.
+
+ @return: Path to the rootfs of the container.
+ """
+ if not self._rootfs:
+ cmd = ('sudo lxc-info -P %s -n %s -c lxc.rootfs' %
+ (self.container_path, self.name))
+ lxc_rootfs_config = utils.run(cmd).stdout.strip()
+ match = re.match('lxc.rootfs = (.*)', lxc_rootfs_config)
+ if not match:
+ raise error.ContainerError(
+ 'Failed to locate rootfs for container %s. lxc.rootfs '
+ 'in the container config file is %s' %
+ (self.name, lxc_rootfs_config))
+ lxc_rootfs = match.group(1)
+ self.clone_from_snapshot = ':' in lxc_rootfs
+ if self.clone_from_snapshot:
+ self._rootfs = lxc_rootfs.split(':')[-1]
+ else:
+ self._rootfs = lxc_rootfs
+ return self._rootfs
+
+
+ def attach_run(self, command, bash=True):
+ """Attach to a given container and run the given command.
+
+ @param command: Command to run in the container.
+ @param bash: Run the command through bash -c "command". This allows
+ pipes to be used in command. Default is set to True.
+
+ @return: The output of the command.
+
+ @raise error.CmdError: If container does not exist, or not running.
+ """
+ cmd = 'sudo lxc-attach -P %s -n %s' % (self.container_path, self.name)
+ if bash and not command.startswith('bash -c'):
+ command = 'bash -c "%s"' % utils.sh_escape(command)
+ cmd += ' -- %s' % command
+ # TODO(dshi): crbug.com/459344 Set sudo to default to False when test
+ # container can be unprivileged container.
+ return utils.run(cmd)
+
+
+ def is_network_up(self):
+ """Check if network is up in the container by curl base container url.
+
+ @return: True if the network is up, otherwise False.
+ """
+ try:
+ self.attach_run('curl --head %s' % constants.CONTAINER_BASE_URL)
+ return True
+ except error.CmdError as e:
+ logging.debug(e)
+ return False
+
+
+ @metrics.SecondsTimerDecorator(
+ '%s/container_start_duration' % constants.STATS_KEY)
+ def start(self, wait_for_network=True):
+ """Start the container.
+
+ @param wait_for_network: True to wait for network to be up. Default is
+ set to True.
+
+ @raise ContainerError: If container does not exist, or fails to start.
+ """
+ cmd = 'sudo lxc-start -P %s -n %s -d' % (self.container_path, self.name)
+ output = utils.run(cmd).stdout
+ self.refresh_status()
+ if self.state != 'RUNNING':
+ raise error.ContainerError(
+ 'Container %s failed to start. lxc command output:\n%s' %
+ (os.path.join(self.container_path, self.name),
+ output))
+
+ if wait_for_network:
+ logging.debug('Wait for network to be up.')
+ start_time = time.time()
+ utils.poll_for_condition(
+ condition=self.is_network_up,
+ timeout=constants.NETWORK_INIT_TIMEOUT,
+ sleep_interval=constants.NETWORK_INIT_CHECK_INTERVAL)
+ logging.debug('Network is up after %.2f seconds.',
+ time.time() - start_time)
+
+
+ @metrics.SecondsTimerDecorator(
+ '%s/container_stop_duration' % constants.STATS_KEY)
+ def stop(self):
+ """Stop the container.
+
+ @raise ContainerError: If container does not exist, or fails to start.
+ """
+ cmd = 'sudo lxc-stop -P %s -n %s' % (self.container_path, self.name)
+ output = utils.run(cmd).stdout
+ self.refresh_status()
+ if self.state != 'STOPPED':
+ raise error.ContainerError(
+ 'Container %s failed to be stopped. lxc command output:\n'
+ '%s' % (os.path.join(self.container_path, self.name),
+ output))
+
+
+ @metrics.SecondsTimerDecorator(
+ '%s/container_destroy_duration' % constants.STATS_KEY)
+ def destroy(self, force=True):
+ """Destroy the container.
+
+ @param force: Set to True to force to destroy the container even if it's
+ running. This is faster than stop a container first then
+ try to destroy it. Default is set to True.
+
+ @raise ContainerError: If container does not exist or failed to destroy
+ the container.
+ """
+ cmd = 'sudo lxc-destroy -P %s -n %s' % (self.container_path,
+ self.name)
+ if force:
+ cmd += ' -f'
+ utils.run(cmd)
+
+
+ def mount_dir(self, source, destination, readonly=False):
+ """Mount a directory in host to a directory in the container.
+
+ @param source: Directory in host to be mounted.
+ @param destination: Directory in container to mount the source directory
+ @param readonly: Set to True to make a readonly mount, default is False.
+ """
+ # Destination path in container must be relative.
+ destination = destination.lstrip('/')
+ # Create directory in container for mount.
+ utils.run('sudo mkdir -p %s' % os.path.join(self.rootfs, destination))
+ config_file = os.path.join(self.container_path, self.name, 'config')
+ mount = constants.MOUNT_FMT % {'source': source,
+ 'destination': destination,
+ 'readonly': ',ro' if readonly else ''}
+ utils.run(
+ constants.APPEND_CMD_FMT % {'content': mount, 'file': config_file})
+
+
+ def verify_autotest_setup(self, job_folder):
+ """Verify autotest code is set up properly in the container.
+
+ @param job_folder: Name of the job result folder.
+
+ @raise ContainerError: If autotest code is not set up properly.
+ """
+ # Test autotest code is setup by verifying a list of
+ # (directory, minimum file count)
+ if constants.IS_MOBLAB:
+ site_packages_path = constants.MOBLAB_SITE_PACKAGES_CONTAINER
+ else:
+ site_packages_path = os.path.join(lxc_config.CONTAINER_AUTOTEST_DIR,
+ 'site-packages')
+ directories_to_check = [
+ (lxc_config.CONTAINER_AUTOTEST_DIR, 3),
+ (constants.RESULT_DIR_FMT % job_folder, 0),
+ (site_packages_path, 3)]
+ for directory, count in directories_to_check:
+ result = self.attach_run(command=(constants.COUNT_FILE_CMD %
+ {'dir': directory})).stdout
+ logging.debug('%s entries in %s.', int(result), directory)
+ if int(result) < count:
+ raise error.ContainerError('%s is not properly set up.' %
+ directory)
+ # lxc-attach and run command does not run in shell, thus .bashrc is not
+ # loaded. Following command creates a symlink in /usr/bin/ for gsutil
+ # if it's installed.
+ # TODO(dshi): Remove this code after lab container is updated with
+ # gsutil installed in /usr/bin/
+ self.attach_run('test -f /root/gsutil/gsutil && '
+ 'ln -s /root/gsutil/gsutil /usr/bin/gsutil || true')
+
+
+ def modify_import_order(self):
+ """Swap the python import order of lib and local/lib.
+
+ In Moblab, the host's python modules located in
+ /usr/lib64/python2.7/site-packages is mounted to following folder inside
+ container: /usr/local/lib/python2.7/dist-packages/. The modules include
+ an old version of requests module, which is used in autotest
+ site-packages. For test, the module is only used in
+ dev_server/symbolicate_dump for requests.call and requests.codes.OK.
+ When pip is installed inside the container, it installs requests module
+ with version of 2.2.1 in /usr/lib/python2.7/dist-packages/. The version
+ is newer than the one used in autotest site-packages, but not the latest
+ either.
+ According to /usr/lib/python2.7/site.py, modules in /usr/local/lib are
+ imported before the ones in /usr/lib. That leads to pip to use the older
+ version of requests (0.11.2), and it will fail. On the other hand,
+ requests module 2.2.1 can't be installed in CrOS (refer to CL:265759),
+ and higher version of requests module can't work with pip.
+ The only fix to resolve this is to switch the import order, so modules
+ in /usr/lib can be imported before /usr/local/lib.
+ """
+ site_module = '/usr/lib/python2.7/site.py'
+ self.attach_run("sed -i ':a;N;$!ba;s/\"local\/lib\",\\n/"
+ "\"lib_placeholder\",\\n/g' %s" % site_module)
+ self.attach_run("sed -i ':a;N;$!ba;s/\"lib\",\\n/"
+ "\"local\/lib\",\\n/g' %s" % site_module)
+ self.attach_run('sed -i "s/lib_placeholder/lib/g" %s' %
+ site_module)
diff --git a/site_utils/lxc/container_bucket.py b/site_utils/lxc/container_bucket.py
new file mode 100644
index 0000000..6f7d73f
--- /dev/null
+++ b/site_utils/lxc/container_bucket.py
@@ -0,0 +1,382 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+import socket
+import time
+
+import common
+from autotest_lib.client.bin import utils
+from autotest_lib.client.common_lib import error
+from autotest_lib.client.common_lib.cros.graphite import autotest_es
+from autotest_lib.site_utils.lxc import Container
+from autotest_lib.site_utils.lxc import config as lxc_config
+from autotest_lib.site_utils.lxc import constants
+from autotest_lib.site_utils.lxc import lxc
+from autotest_lib.site_utils.lxc import utils as lxc_utils
+from autotest_lib.site_utils.lxc.cleanup_if_fail import cleanup_if_fail
+
+try:
+ from chromite.lib import metrics
+except ImportError:
+ metrics = utils.metrics_mock
+
+
+class ContainerBucket(object):
+ """A wrapper class to interact with containers in a specific container path.
+ """
+
+ def __init__(self, container_path=constants.DEFAULT_CONTAINER_PATH):
+ """Initialize a ContainerBucket.
+
+ @param container_path: Path to the directory used to store containers.
+ Default is set to AUTOSERV/container_path in
+ global config.
+ """
+ self.container_path = os.path.realpath(container_path)
+
+
+ def get_all(self):
+ """Get details of all containers.
+
+ @return: A dictionary of all containers with detailed attributes,
+ indexed by container name.
+ """
+ info_collection = lxc.get_container_info(self.container_path)
+ containers = {}
+ for info in info_collection:
+ container = Container(self.container_path, info)
+ containers[container.name] = container
+ return containers
+
+
+ def get(self, name):
+ """Get a container with matching name.
+
+ @param name: Name of the container.
+
+ @return: A container object with matching name. Returns None if no
+ container matches the given name.
+ """
+ return self.get_all().get(name, None)
+
+
+ def exist(self, name):
+ """Check if a container exists with the given name.
+
+ @param name: Name of the container.
+
+ @return: True if the container with the given name exists, otherwise
+ returns False.
+ """
+ return self.get(name) != None
+
+
+ def destroy_all(self):
+ """Destroy all containers, base must be destroyed at the last.
+ """
+ containers = self.get_all().values()
+ for container in sorted(
+ containers, key=lambda n: 1 if n.name == constants.BASE else 0):
+ logging.info('Destroy container %s.', container.name)
+ container.destroy()
+
+
+ @metrics.SecondsTimerDecorator(
+ '%s/create_from_base_duration' % constants.STATS_KEY)
+ def create_from_base(self, name, disable_snapshot_clone=False,
+ force_cleanup=False):
+ """Create a container from the base container.
+
+ @param name: Name of the container.
+ @param disable_snapshot_clone: Set to True to force to clone without
+ using snapshot clone even if the host supports that.
+ @param force_cleanup: Force to cleanup existing container.
+
+ @return: A Container object for the created container.
+
+ @raise ContainerError: If the container already exist.
+ @raise error.CmdError: If lxc-clone call failed for any reason.
+ """
+ if self.exist(name) and not force_cleanup:
+ raise error.ContainerError('Container %s already exists.' % name)
+
+ use_snapshot = (constants.SUPPORT_SNAPSHOT_CLONE and not
+ disable_snapshot_clone)
+
+ try:
+ return self.clone_container(path=self.container_path,
+ name=constants.BASE,
+ new_path=self.container_path,
+ new_name=name,
+ snapshot=use_snapshot,
+ cleanup=force_cleanup)
+ except error.CmdError:
+ if not use_snapshot:
+ raise
+ else:
+ # Snapshot clone failed, retry clone without snapshot.
+ container = self.clone_container(path=self.container_path,
+ name=constants.BASE,
+ new_path=self.container_path,
+ new_name=name,
+ snapshot=False,
+ cleanup=force_cleanup)
+ # Report metadata about retry success.
+ autotest_es.post(
+ use_http=True,
+ type_str=constants.CONTAINER_CREATE_RETRY_METADB_TYPE,
+ metadata={'drone': socket.gethostname(),
+ 'name': name,
+ 'success': True})
+ return container
+
+
+ def clone_container(self, path, name, new_path, new_name, snapshot=False,
+ cleanup=False):
+ """Clone one container from another.
+
+ @param path: LXC path for the source container.
+ @param name: Name of the source container.
+ @param new_path: LXC path for the cloned container.
+ @param new_name: Name for the cloned container.
+ @param snapshot: Whether to snapshot, or create a full clone.
+ @param cleanup: If a container with the given name and path already
+ exist, clean it up first.
+
+ @return: A Container object for the created container.
+
+ @raise ContainerError: If the container already exists.
+ @raise error.CmdError: If lxc-clone call failed for any reason.
+ """
+ # Cleanup existing container with the given name.
+ container_folder = os.path.join(new_path, new_name)
+
+ if lxc_utils.path_exists(container_folder):
+ if not cleanup:
+ raise error.ContainerError('Container %s already exists.' %
+ new_name)
+ container = Container(new_path, {'name': name})
+ try:
+ container.destroy()
+ except error.CmdError as e:
+ # The container could be created in a incompleted state. Delete
+ # the container folder instead.
+ logging.warn('Failed to destroy container %s, error: %s',
+ name, e)
+ utils.run('sudo rm -rf "%s"' % container_folder)
+
+ snapshot_arg = '-s' if snapshot else ''
+ # overlayfs is the default clone backend storage. However it is not
+ # supported in Ganeti yet. Use aufs as the alternative.
+ aufs_arg = '-B aufs' if utils.is_vm() and snapshot else ''
+ cmd = (('sudo lxc-clone --lxcpath %s --newpath %s '
+ '--orig %s --new %s %s %s') %
+ (path, new_path, name, new_name, snapshot_arg, aufs_arg))
+
+ utils.run(cmd)
+ return self.get(new_name)
+
+
+ @cleanup_if_fail()
+ def setup_base(self, name=constants.BASE, force_delete=False):
+ """Setup base container.
+
+ @param name: Name of the base container, default to base.
+ @param force_delete: True to force to delete existing base container.
+ This action will destroy all running test
+ containers. Default is set to False.
+ """
+ if not self.container_path:
+ raise error.ContainerError(
+ 'You must set a valid directory to store containers in '
+ 'global config "AUTOSERV/ container_path".')
+
+ if not os.path.exists(self.container_path):
+ os.makedirs(self.container_path)
+
+ base_path = os.path.join(self.container_path, name)
+ if self.exist(name) and not force_delete:
+ logging.error(
+ 'Base container already exists. Set force_delete to True '
+ 'to force to re-stage base container. Note that this '
+ 'action will destroy all running test containers')
+ # Set proper file permission. base container in moblab may have
+ # owner of not being root. Force to update the folder's owner.
+ # TODO(dshi): Change root to current user when test container can be
+ # unprivileged container.
+ utils.run('sudo chown -R root "%s"' % base_path)
+ utils.run('sudo chgrp -R root "%s"' % base_path)
+ return
+
+ # Destroy existing base container if exists.
+ if self.exist(name):
+ # TODO: We may need to destroy all snapshots created from this base
+ # container, not all container.
+ self.destroy_all()
+
+ # Download and untar the base container.
+ tar_path = os.path.join(self.container_path, '%s.tar.xz' % name)
+ path_to_cleanup = [tar_path, base_path]
+ for path in path_to_cleanup:
+ if os.path.exists(path):
+ utils.run('sudo rm -rf "%s"' % path)
+ container_url = constants.CONTAINER_BASE_URL_FMT % name
+ lxc.download_extract(container_url, tar_path, self.container_path)
+ # Remove the downloaded container tar file.
+ utils.run('sudo rm "%s"' % tar_path)
+ # Set proper file permission.
+ # TODO(dshi): Change root to current user when test container can be
+ # unprivileged container.
+ utils.run('sudo chown -R root "%s"' % base_path)
+ utils.run('sudo chgrp -R root "%s"' % base_path)
+
+ # Update container config with container_path from global config.
+ config_path = os.path.join(base_path, 'config')
+ utils.run('sudo sed -i "s|container_dir|%s|g" "%s"' %
+ (self.container_path, config_path))
+
+
+ @metrics.SecondsTimerDecorator(
+ '%s/setup_test_duration' % constants.STATS_KEY)
+ @cleanup_if_fail()
+ def setup_test(self, name, job_id, server_package_url, result_path,
+ control=None, skip_cleanup=False, job_folder=None,
+ dut_name=None):
+ """Setup test container for the test job to run.
+
+ The setup includes:
+ 1. Install autotest_server package from given url.
+ 2. Copy over local shadow_config.ini.
+ 3. Mount local site-packages.
+ 4. Mount test result directory.
+
+ TODO(dshi): Setup also needs to include test control file for autoserv
+ to run in container.
+
+ @param name: Name of the container.
+ @param job_id: Job id for the test job to run in the test container.
+ @param server_package_url: Url to download autotest_server package.
+ @param result_path: Directory to be mounted to container to store test
+ results.
+ @param control: Path to the control file to run the test job. Default is
+ set to None.
+ @param skip_cleanup: Set to True to skip cleanup, used to troubleshoot
+ container failures.
+ @param job_folder: Folder name of the job, e.g., 123-debug_user.
+ @param dut_name: Name of the dut to run test, used as the hostname of
+ the container. Default is None.
+ @return: A Container object for the test container.
+
+ @raise ContainerError: If container does not exist, or not running.
+ """
+ start_time = time.time()
+
+ if not os.path.exists(result_path):
+ raise error.ContainerError('Result directory does not exist: %s',
+ result_path)
+ result_path = os.path.abspath(result_path)
+
+ # Save control file to result_path temporarily. The reason is that the
+ # control file in drone_tmp folder can be deleted during scheduler
+ # restart. For test not using SSP, the window between test starts and
+ # control file being picked up by the test is very small (< 2 seconds).
+ # However, for tests using SSP, it takes around 1 minute before the
+ # container is setup. If scheduler is restarted during that period, the
+ # control file will be deleted, and the test will fail.
+ if control:
+ control_file_name = os.path.basename(control)
+ safe_control = os.path.join(result_path, control_file_name)
+ utils.run('cp %s %s' % (control, safe_control))
+
+ # Create test container from the base container.
+ container = self.create_from_base(name)
+
+ # Update the hostname of the test container to be `dut-name`.
+ # Some TradeFed tests use hostname in test results, which is used to
+ # group test results in dashboard. The default container name is set to
+ # be the name of the folder, which is unique (as it is composed of job
+ # id and timestamp. For better result view, the container's hostname is
+ # set to be a string containing the dut hostname.
+ if dut_name:
+ config_file = os.path.join(container.container_path, name, 'config')
+ lxc_utsname_setting = (
+ 'lxc.utsname = ' +
+ (constants.CONTAINER_UTSNAME_FORMAT %
+ dut_name.replace('.', '-')))
+ utils.run(
+ constants.APPEND_CMD_FMT % {'content': lxc_utsname_setting,
+ 'file': config_file})
+
+ # Deploy server side package
+ usr_local_path = os.path.join(container.rootfs, 'usr', 'local')
+ autotest_pkg_path = os.path.join(usr_local_path,
+ 'autotest_server_package.tar.bz2')
+ autotest_path = os.path.join(usr_local_path, 'autotest')
+ # sudo is required so os.makedirs may not work.
+ utils.run('sudo mkdir -p %s'% usr_local_path)
+
+ lxc.download_extract(
+ server_package_url, autotest_pkg_path, usr_local_path)
+ deploy_config_manager = lxc_config.DeployConfigManager(container)
+ deploy_config_manager.deploy_pre_start()
+
+ # Copy over control file to run the test job.
+ if control:
+ container_drone_temp = os.path.join(autotest_path, 'drone_tmp')
+ utils.run('sudo mkdir -p %s'% container_drone_temp)
+ container_control_file = os.path.join(
+ container_drone_temp, control_file_name)
+ # Move the control file stored in the result folder to container.
+ utils.run('sudo mv %s %s' % (safe_control, container_control_file))
+
+ if constants.IS_MOBLAB:
+ site_packages_path = constants.MOBLAB_SITE_PACKAGES
+ site_packages_container_path = (
+ constants.MOBLAB_SITE_PACKAGES_CONTAINER[1:])
+ else:
+ site_packages_path = os.path.join(common.autotest_dir,
+ 'site-packages')
+ site_packages_container_path = os.path.join(
+ lxc_config.CONTAINER_AUTOTEST_DIR, 'site-packages')
+ mount_entries = [(site_packages_path, site_packages_container_path,
+ True),
+ (os.path.join(common.autotest_dir, 'puppylab'),
+ os.path.join(lxc_config.CONTAINER_AUTOTEST_DIR,
+ 'puppylab'),
+ True),
+ (result_path,
+ os.path.join(constants.RESULT_DIR_FMT % job_folder),
+ False),
+ ]
+ for mount_config in deploy_config_manager.mount_configs:
+ mount_entries.append((mount_config.source, mount_config.target,
+ mount_config.readonly))
+ # Update container config to mount directories.
+ for source, destination, readonly in mount_entries:
+ container.mount_dir(source, destination, readonly)
+
+ # Update file permissions.
+ # TODO(dshi): crbug.com/459344 Skip following action when test container
+ # can be unprivileged container.
+ utils.run('sudo chown -R root "%s"' % autotest_path)
+ utils.run('sudo chgrp -R root "%s"' % autotest_path)
+
+ container.start(name)
+ deploy_config_manager.deploy_post_start()
+
+ container.modify_import_order()
+
+ container.verify_autotest_setup(job_folder)
+
+ autotest_es.post(use_http=True,
+ type_str=constants.CONTAINER_CREATE_METADB_TYPE,
+ metadata={'drone': socket.gethostname(),
+ 'job_id': job_id,
+ 'time_used': time.time() - start_time,
+ 'success': True})
+
+ logging.debug('Test container %s is set up.', name)
+ return container
diff --git a/site_utils/lxc/lxc.py b/site_utils/lxc/lxc.py
new file mode 100644
index 0000000..7403582
--- /dev/null
+++ b/site_utils/lxc/lxc.py
@@ -0,0 +1,275 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+import tempfile
+
+import common
+from autotest_lib.client.bin import utils as common_utils
+from autotest_lib.client.common_lib import error
+from autotest_lib.client.common_lib.cros import dev_server
+from autotest_lib.client.common_lib.cros import retry
+from autotest_lib.server import utils as server_utils
+from autotest_lib.site_utils.lxc import constants
+from autotest_lib.site_utils.lxc import utils as lxc_utils
+
+try:
+ from chromite.lib import metrics
+except ImportError:
+ metrics = common_utils.metrics_mock
+
+
+def _get_container_info_moblab(container_path, **filters):
+ """Get a collection of container information in the given container path
+ in a Moblab.
+
+ TODO(crbug.com/457496): remove this method once python 3 can be installed
+ in Moblab and lxc-ls command can use python 3 code.
+
+ When running in Moblab, lxc-ls behaves differently from a server with python
+ 3 installed:
+ 1. lxc-ls returns a list of containers installed under /etc/lxc, the default
+ lxc container directory.
+ 2. lxc-ls --active lists all active containers, regardless where the
+ container is located.
+ For such differences, we have to special case Moblab to make the behavior
+ close to a server with python 3 installed. That is,
+ 1. List only containers in a given folder.
+ 2. Assume all active containers have state of RUNNING.
+
+ @param container_path: Path to look for containers.
+ @param filters: Key value to filter the containers, e.g., name='base'
+
+ @return: A list of dictionaries that each dictionary has the information of
+ a container. The keys are defined in ATTRIBUTES.
+ """
+ info_collection = []
+ active_containers = common_utils.run('sudo lxc-ls --active').stdout.split()
+ name_filter = filters.get('name', None)
+ state_filter = filters.get('state', None)
+ if filters and set(filters.keys()) - set(['name', 'state']):
+ raise error.ContainerError('When running in Moblab, container list '
+ 'filter only supports name and state.')
+
+ for name in os.listdir(container_path):
+ # Skip all files and folders without rootfs subfolder.
+ if (os.path.isfile(os.path.join(container_path, name)) or
+ not lxc_utils.path_exists(os.path.join(container_path, name,
+ 'rootfs'))):
+ continue
+ info = {'name': name,
+ 'state': 'RUNNING' if name in active_containers else 'STOPPED'
+ }
+ if ((name_filter and name_filter != info['name']) or
+ (state_filter and state_filter != info['state'])):
+ continue
+
+ info_collection.append(info)
+ return info_collection
+
+
+def get_container_info(container_path, **filters):
+ """Get a collection of container information in the given container path.
+
+ This method parse the output of lxc-ls to get a list of container
+ information. The lxc-ls command output looks like:
+ NAME STATE IPV4 IPV6 AUTOSTART PID MEMORY RAM SWAP
+ --------------------------------------------------------------------------
+ base STOPPED - - NO - - - -
+ test_123 RUNNING 10.0.3.27 - NO 8359 6.28MB 6.28MB 0.0MB
+
+ @param container_path: Path to look for containers.
+ @param filters: Key value to filter the containers, e.g., name='base'
+
+ @return: A list of dictionaries that each dictionary has the information of
+ a container. The keys are defined in ATTRIBUTES.
+ """
+ if constants.IS_MOBLAB:
+ return _get_container_info_moblab(container_path, **filters)
+
+ cmd = 'sudo lxc-ls -P %s -f -F %s' % (os.path.realpath(container_path),
+ ','.join(constants.ATTRIBUTES))
+ output = common_utils.run(cmd).stdout
+ info_collection = []
+
+ for line in output.splitlines()[1:]:
+ # Only LXC 1.x has the second line of '-' as a separator.
+ if line.startswith('------'):
+ continue
+ info_collection.append(dict(zip(constants.ATTRIBUTES, line.split())))
+ if filters:
+ filtered_collection = []
+ for key, value in filters.iteritems():
+ for info in info_collection:
+ if key in info and info[key] == value:
+ filtered_collection.append(info)
+ info_collection = filtered_collection
+ return info_collection
+
+
+# Make sure retries only happen in the non-timeout case.
[email protected]((error.CmdError),
+ blacklist=[error.CmdTimeoutError],
+ timeout_min=(constants.DEVSERVER_CALL_TIMEOUT *
+ constants.DEVSERVER_CALL_RETRY / 60),
+ delay_sec=constants.DEVSERVER_CALL_DELAY)
+def download_extract(url, target, extract_dir):
+ """Download the file from given url and save it to the target, then extract.
+
+ @param url: Url to download the file.
+ @param target: Path of the file to save to.
+ @param extract_dir: Directory to extract the content of the file to.
+ """
+ remote_url = dev_server.DevServer.get_server_url(url)
+ # TODO(xixuan): Better to only ssh to devservers in lab, and continue using
+ # wget for ganeti devservers.
+ if remote_url in dev_server.ImageServerBase.servers():
+ # This can be run in multiple threads, pick a unique tmp_file.name.
+ with tempfile.NamedTemporaryFile(prefix=os.path.basename(target) + '_',
+ delete=False) as tmp_file:
+ dev_server.ImageServerBase.download_file(
+ url,
+ tmp_file.name,
+ timeout=constants.DEVSERVER_CALL_TIMEOUT)
+ common_utils.run('sudo mv %s %s' % (tmp_file.name, target))
+ else:
+ # We do not want to retry on CmdTimeoutError but still retry on
+ # CmdError. Hence we can't use wget --timeout=...
+ common_utils.run('sudo wget -nv %s -O %s' % (url, target),
+ stderr_tee=common_utils.TEE_TO_LOGS,
+ timeout=constants.DEVSERVER_CALL_TIMEOUT)
+
+ common_utils.run('sudo tar -xvf %s -C %s' % (target, extract_dir))
+
+
+def _install_package_precheck(packages):
+ """If SSP is not enabled or the test is running in chroot (using test_that),
+ packages installation should be skipped.
+
+ The check does not raise exception so tests started by test_that or running
+ in an Autotest setup with SSP disabled can continue. That assume the running
+ environment, chroot or a machine, has the desired packages installed
+ already.
+
+ @param packages: A list of names of the packages to install.
+
+ @return: True if package installation can continue. False if it should be
+ skipped.
+
+ """
+ if not constants.SSP_ENABLED and not common_utils.is_in_container():
+ logging.info('Server-side packaging is not enabled. Install package %s '
+ 'is skipped.', packages)
+ return False
+
+ if server_utils.is_inside_chroot():
+ logging.info('Test is running inside chroot. Install package %s is '
+ 'skipped.', packages)
+ return False
+
+ if not common_utils.is_in_container():
+ raise error.ContainerError('Package installation is only supported '
+ 'when test is running inside container.')
+
+ return True
+
+
[email protected](
+ '%s/install_packages_duration' % constants.STATS_KEY)
[email protected](error.CmdError, timeout_min=30)
+def install_packages(packages=[], python_packages=[], force_latest=False):
+ """Install the given package inside container.
+
+ !!! WARNING !!!
+ This call may introduce several minutes of delay in test run. The best way
+ to avoid such delay is to update the base container used for the test run.
+ File a bug for infra deputy to update the base container with the new
+ package a test requires.
+
+ @param packages: A list of names of the packages to install.
+ @param python_packages: A list of names of the python packages to install
+ using pip.
+ @param force_latest: True to force to install the latest version of the
+ package. Default to False, which means skip installing
+ the package if it's installed already, even with an old
+ version.
+
+ @raise error.ContainerError: If package is attempted to be installed outside
+ a container.
+ @raise error.CmdError: If the package doesn't exist or failed to install.
+
+ """
+ if not _install_package_precheck(packages or python_packages):
+ return
+
+ # If force_latest is False, only install packages that are not already
+ # installed.
+ if not force_latest:
+ packages = [p for p in packages
+ if not common_utils.is_package_installed(p)]
+ python_packages = [p for p in python_packages
+ if not common_utils.is_python_package_installed(p)]
+ if not packages and not python_packages:
+ logging.debug('All packages are installed already, skip reinstall.')
+ return
+
+ # Always run apt-get update before installing any container. The base
+ # container may have outdated cache.
+ common_utils.run('sudo apt-get update')
+ # Make sure the lists are not None for iteration.
+ packages = [] if not packages else packages
+ if python_packages:
+ packages.extend(['python-pip', 'python-dev'])
+ if packages:
+ common_utils.run(
+ 'sudo apt-get install %s -y --force-yes' % ' '.join(packages))
+ logging.debug('Packages are installed: %s.', packages)
+
+ target_setting = ''
+ # For containers running in Moblab, /usr/local/lib/python2.7/dist-packages/
+ # is a readonly mount from the host. Therefore, new python modules have to
+ # be installed in /usr/lib/python2.7/dist-packages/
+ # Containers created in Moblab does not have autotest/site-packages folder.
+ if not os.path.exists('/usr/local/autotest/site-packages'):
+ target_setting = '--target="/usr/lib/python2.7/dist-packages/"'
+ if python_packages:
+ common_utils.run('sudo pip install %s %s' % (target_setting,
+ ' '.join(python_packages)))
+ logging.debug('Python packages are installed: %s.', python_packages)
+
+
[email protected](error.CmdError, timeout_min=20)
+def install_package(package):
+ """Install the given package inside container.
+
+ This function is kept for backwards compatibility reason. New code should
+ use function install_packages for better performance.
+
+ @param package: Name of the package to install.
+
+ @raise error.ContainerError: If package is attempted to be installed outside
+ a container.
+ @raise error.CmdError: If the package doesn't exist or failed to install.
+
+ """
+ logging.warn('This function is obsoleted, please use install_packages '
+ 'instead.')
+ install_packages(packages=[package])
+
+
[email protected](error.CmdError, timeout_min=20)
+def install_python_package(package):
+ """Install the given python package inside container using pip.
+
+ This function is kept for backwards compatibility reason. New code should
+ use function install_packages for better performance.
+
+ @param package: Name of the python package to install.
+
+ @raise error.CmdError: If the package doesn't exist or failed to install.
+ """
+ logging.warn('This function is obsoleted, please use install_packages '
+ 'instead.')
+ install_packages(python_packages=[package])
diff --git a/site_utils/lxc/lxc_config_unittest.py b/site_utils/lxc/lxc_config_unittest.py
new file mode 100644
index 0000000..52184cb
--- /dev/null
+++ b/site_utils/lxc/lxc_config_unittest.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+import os
+import unittest
+
+import common
+from autotest_lib.site_utils.lxc import config as lxc_config
+
+
+class DeployConfigTest(unittest.TestCase):
+ """Test DeployConfigManager.
+ """
+
+ def testValidate(self):
+ """Test ssp_deploy_config.json can be validated.
+ """
+ global_deploy_config_file = os.path.join(
+ common.autotest_dir, lxc_config.SSP_DEPLOY_CONFIG_FILE)
+ with open(global_deploy_config_file) as f:
+ deploy_configs = json.load(f)
+ for config in deploy_configs:
+ lxc_config.DeployConfigManager.validate(config)
+
+
+if '__main__':
+ unittest.main()
diff --git a/site_utils/lxc/lxc_functional_test.py b/site_utils/lxc/lxc_functional_test.py
new file mode 100644
index 0000000..9f5a623
--- /dev/null
+++ b/site_utils/lxc/lxc_functional_test.py
@@ -0,0 +1,385 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Function tests of lxc module. To be able to run this test, following setup
+is required:
+ 1. lxc is installed.
+ 2. Autotest code exists in /usr/local/autotest, with site-packages installed.
+ (run utils/build_externals.py)
+ 3. The user runs the test should have sudo access. Run the test with sudo.
+Note that the test does not require Autotest database and frontend.
+"""
+
+
+import argparse
+import logging
+import os
+import sys
+import tempfile
+import time
+
+import common
+from autotest_lib.client.bin import utils
+from autotest_lib.site_utils import lxc
+
+
+TEST_JOB_ID = 123
+TEST_JOB_FOLDER = '123-debug_user'
+# Create a temp directory for functional tests. The directory is not under /tmp
+# for Moblab to be able to run the test.
+TEMP_DIR = tempfile.mkdtemp(dir=lxc.DEFAULT_CONTAINER_PATH,
+ prefix='container_test_')
+RESULT_PATH = os.path.join(TEMP_DIR, 'results', str(TEST_JOB_ID))
+# Link to download a test package of autotest server package.
+# Ideally the test should stage a build on devserver and download the
+# autotest_server_package from devserver. This test is focused on testing
+# container, so it's prefered to avoid dependency on devserver.
+AUTOTEST_SERVER_PKG = ('http://storage.googleapis.com/abci-ssp/'
+ 'autotest-containers/autotest_server_package.tar.bz2')
+
+# Test log file to be created in result folder, content is `test`.
+TEST_LOG = 'test.log'
+# Name of test script file to run in container.
+TEST_SCRIPT = 'test.py'
+# Test script to run in container to verify autotest code setup.
+TEST_SCRIPT_CONTENT = """
+import socket
+import sys
+
+# Test import
+import common
+import chromite
+
+# This test has to be before the import of autotest_lib, because ts_mon requires
+# httplib2 module in chromite/third_party. The one in Autotest site-packages is
+# out dated.
+%(ts_mon_test)s
+
+from autotest_lib.server import utils
+from autotest_lib.site_utils import lxc
+
+with open(sys.argv[1], 'w') as f:
+ f.write('test')
+
+# Confirm hostname starts with `test-`
+if not socket.gethostname().startswith('test-'):
+ raise Exception('The container\\\'s hostname must start with `test-`.')
+
+# Test installing packages
+lxc.install_packages(['atop'], ['acora'])
+
+"""
+
+TEST_SCRIPT_CONTENT_TS_MON = """
+# Test ts_mon metrics can be set up.
+from chromite.lib import ts_mon_config
+ts_mon_config.SetupTsMonGlobalState('some_test', suppress_exception=False)
+"""
+
+CREATE_FAKE_TS_MON_CONFIG_SCRIPT = 'create_fake_key.py'
+
+CREATE_FAKE_TS_MON_CONFIG_SCRIPT_CONTENT = """
+import os
+import rsa
+
+EXPECTED_TS_MON_CONFIG_NAME = '/etc/chrome-infra/ts-mon.json'
+
+FAKE_TS_MON_CONFIG_CONTENT = '''
+ {
+ "credentials":"/tmp/service_account_prodx_mon.json",
+ "endpoint":"https://xxx.googleapis.com/v1:insert",
+ "use_new_proto": true
+ }'''
+
+FAKE_SERVICE_ACCOUNT_CRED_JSON = '''
+ {
+ "type": "service_account",
+ "project_id": "test_project",
+ "private_key_id": "aaa",
+ "private_key": "%s",
+ "client_email": "xxx",
+ "client_id": "111",
+ "auth_uri": "https://accounts.google.com/o/oauth2/auth",
+ "token_uri": "https://accounts.google.com/o/oauth2/token",
+ "auth_provider_x509_cert_url":
+ "https://www.googleapis.com/oauth2/v1/certs",
+ "client_x509_cert_url":
+ "https://www.googleapis.com/robot/v1/metadata/x509/xxx"
+ }'''
+
+
+TEST_KEY = '''------BEGIN PRIVATE KEY-----
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCzg4K2SXqf9LAM
+52a/t2HfpY5y49sbrgRb1llP6c8RVWhUX/pGdjbcIM97+1CJEWBN8Vmraoe4+71o
+1idTPehJfHRNeyXQUnro8CmnSxE9tLHtdKj0pzvO+yqT66O6Iw1aUAIX+dG4Us9Q
+Z22ypFHaJ74lKw9JFwAFTJ/TF1rXUXqgufYTNNqP3Ra7wCHF8BmtjwRYAlvsR9CO
+c4eVC1+qhq/8/EOMCgF/rsbZW93r/nz5xgsSX0k6WkAz5WX2mniHfmBFpmr039jZ
+0eI1mEMGDAYuUn05++dNveo/ZOZj3wBlFzyfNSeeWJB5SdKPTvN3H/Iu0Aw+Rtb6
+szwNClaFAgMBAAECggEAHZ8cjVRUJ/tiJorzlTyfKZ6hwhsPv4JIRVg6LhnceZWA
+jPW2cHSWyl2epyx55lhH7iyeeY7vXOqrX1aBMDb1stSWw2dH/tdxYSkqEmksa+R6
+fL6kl5RV5epjpPt77Z3VmPq9UbP/M310qKWcgB8lw4wN0AfKMqsZLYauk9BVhNRu
+Bgah9O7BmcXS+mp49w0Xyfo1UBvzW8R6UnBhHbf9aOY8ObMD0Jj/wDjlYMqSSIKR
+9/8GZWQEKe6q0PyRRdNNtdzbpBrR0fIw6/T9pfDR2fBAcpNvD50eJk2jRiRDTWFJ
+rVSc0bvZFb74Rc3LbMSXW/6Kb7I2IG1XsWw7nxp92QKBgQDgzdIxZrkNZ3Tbuzng
+SG4atjnaCXoekOHK7VZVYd30S0AAizeGu1sjpUVQgsf+qkFskXAQp2/2f+Wiuq2G
++nJYvXwZ/r9IcUs/oD3Fa2ezCVz1N/HOSPFAZK9XZuZbL8sXEYIPGJWH5F8Sanmb
+xNp9IUynlpwgM2JlZNeTCkv4PQKBgQDMbL/AF3LSpKvwi+QvYVkX/gChQmNMr4pP
+TM/GI4D03tNrzsut3oerKMUw0c5MxonkAJpuACN6baRyBOBxRYQSt8wWkORg9iqy
+a7aHnQqIGRafydW1/Snhr2DJSSaViHfO0oaA1r61zgMUTnSGb3UjyxJQp65dvPac
+BhpR9wpz6QKBgQDR2S/CL8rEqXObfi1roREu3DYqw7f8enBb1qtFrsLbPbd0CoD9
+wz0zjB6lJj/9CP9jkmwTD8njR8ab3jkIDBfboJ4NQhFbVW7R6QpglH9L0Iy2189g
+KhUScCqBoyubqYSidxR6dQ94uATLkxsL/nmaXxBITL5XDMBoN/dIak86XQKBgDqa
+oo4LKtvAYZpgQFZk7gm2w693PMhrOpdpSddfrkSE7M9nRXTe6r3ivkU0oJPaBwXa
+Nmt6lrEuZYpaY42VhDtpfZSqjQ5PBAaKYpWWK8LAjn/YeO/nV+5fPLv3wJv1t4MP
+T4f4CExOdwuHQliX81kDioicyZwN5BTumvUMgW6hAoGAF29kI1KthKaHN9P1DchI
+qqoHb9FPdZ5I6HDQpn6fr9ut7+9kVqexUrQ2AMvcVei6gDWW6P3yDCdTKcV9qtts
+1JOP2aSmXvibflx/bNfnhu988qJDhJ3CCjfc79fjwntUIXNPsFmwC9W5lnlSMKHM
+rH4RdmnjeCIG1PZ35m/yUSU=
+-----END PRIVATE KEY-----'''
+
+if not os.path.exists(EXPECTED_TS_MON_CONFIG_NAME):
+ try:
+ os.makedirs(os.path.dirname(EXPECTED_TS_MON_CONFIG_NAME))
+ except OSError:
+ # Directory already exists.
+ pass
+
+ with open(EXPECTED_TS_MON_CONFIG_NAME, 'w') as f:
+ f.write(FAKE_TS_MON_CONFIG_CONTENT)
+ with open ('/tmp/service_account_prodx_mon.json', 'w') as f:
+ f.write(FAKE_SERVICE_ACCOUNT_CRED_JSON % repr(TEST_KEY)[2:-1])
+"""
+
+# Name of the test control file.
+TEST_CONTROL_FILE = 'attach.1'
+TEST_DUT = '172.27.213.193'
+TEST_RESULT_PATH = lxc.RESULT_DIR_FMT % TEST_JOB_FOLDER
+# Test autoserv command.
+AUTOSERV_COMMAND = (('/usr/bin/python -u /usr/local/autotest/server/autoserv '
+ '-p -r %(result_path)s/%(test_dut)s -m %(test_dut)s '
+ '-u debug_user -l test -s -P %(job_id)s-debug_user/'
+ '%(test_dut)s -n %(result_path)s/%(test_control_file)s '
+ '--verify_job_repo_url') %
+ {'job_id': TEST_JOB_ID,
+ 'result_path': TEST_RESULT_PATH,
+ 'test_dut': TEST_DUT,
+ 'test_control_file': TEST_CONTROL_FILE})
+# Content of the test control file.
+TEST_CONTROL_CONTENT = """
+def run(machine):
+ job.run_test('dummy_PassServer',
+ host=hosts.create_host(machine))
+
+parallel_simple(run, machines)
+"""
+
+
+def setup_logging(log_level=logging.INFO):
+ """Direct logging to stdout.
+
+ @param log_level: Level of logging to redirect to stdout, default to INFO.
+ """
+ logger = logging.getLogger()
+ logger.setLevel(log_level)
+ handler = logging.StreamHandler(sys.stdout)
+ handler.setLevel(log_level)
+ formatter = logging.Formatter('%(asctime)s %(message)s')
+ handler.setFormatter(formatter)
+ logger.handlers = []
+ logger.addHandler(handler)
+
+
+def setup_base(bucket):
+ """Test setup base container works.
+
+ @param bucket: ContainerBucket to interact with containers.
+ """
+ logging.info('Rebuild base container in folder %s.', bucket.container_path)
+ bucket.setup_base()
+ containers = bucket.get_all()
+ logging.info('Containers created: %s', containers.keys())
+
+
+def setup_test(bucket, name, skip_cleanup):
+ """Test container can be created from base container.
+
+ @param bucket: ContainerBucket to interact with containers.
+ @param name: Name of the test container.
+ @param skip_cleanup: Set to True to skip cleanup, used to troubleshoot
+ container failures.
+
+ @return: A Container object created for the test container.
+ """
+ logging.info('Create test container.')
+ os.makedirs(RESULT_PATH)
+ container = bucket.setup_test(name, TEST_JOB_ID, AUTOTEST_SERVER_PKG,
+ RESULT_PATH, skip_cleanup=skip_cleanup,
+ job_folder=TEST_JOB_FOLDER,
+ dut_name='192.168.0.3')
+
+ # Inject "AUTOSERV/testing_mode: True" in shadow config to test autoserv.
+ container.attach_run('echo $\'[AUTOSERV]\ntesting_mode: True\' >>'
+ ' /usr/local/autotest/shadow_config.ini')
+
+ if not utils.is_moblab():
+ # Create fake '/etc/chrome-infra/ts-mon.json' if it doesn't exist.
+ create_key_script = os.path.join(
+ RESULT_PATH, CREATE_FAKE_TS_MON_CONFIG_SCRIPT)
+ with open(create_key_script, 'w') as script:
+ script.write(CREATE_FAKE_TS_MON_CONFIG_SCRIPT_CONTENT)
+ container_result_path = lxc.RESULT_DIR_FMT % TEST_JOB_FOLDER
+ container_create_key_script = os.path.join(
+ container_result_path, CREATE_FAKE_TS_MON_CONFIG_SCRIPT)
+ container.attach_run('python %s' % container_create_key_script)
+
+ return container
+
+
+def test_share(container):
+ """Test container can share files with the host.
+
+ @param container: The test container.
+ """
+ logging.info('Test files written to result directory can be accessed '
+ 'from the host running the container..')
+ host_test_script = os.path.join(RESULT_PATH, TEST_SCRIPT)
+ with open(host_test_script, 'w') as script:
+ if utils.is_moblab():
+ script.write(TEST_SCRIPT_CONTENT)
+ else:
+ script.write(TEST_SCRIPT_CONTENT %
+ {'ts_mon_test': TEST_SCRIPT_CONTENT_TS_MON})
+
+ container_result_path = lxc.RESULT_DIR_FMT % TEST_JOB_FOLDER
+ container_test_script = os.path.join(container_result_path, TEST_SCRIPT)
+ container_test_script_dest = os.path.join('/usr/local/autotest/utils/',
+ TEST_SCRIPT)
+ container_test_log = os.path.join(container_result_path, TEST_LOG)
+ host_test_log = os.path.join(RESULT_PATH, TEST_LOG)
+ # Move the test script out of result folder as it needs to import common.
+ container.attach_run('mv %s %s' % (container_test_script,
+ container_test_script_dest))
+ container.attach_run('python %s %s' % (container_test_script_dest,
+ container_test_log))
+ if not os.path.exists(host_test_log):
+ raise Exception('Results created in container can not be accessed from '
+ 'the host.')
+ with open(host_test_log, 'r') as log:
+ if log.read() != 'test':
+ raise Exception('Failed to read the content of results in '
+ 'container.')
+
+
+def test_autoserv(container):
+ """Test container can run autoserv command.
+
+ @param container: The test container.
+ """
+ logging.info('Test autoserv command.')
+ logging.info('Create test control file.')
+ host_control_file = os.path.join(RESULT_PATH, TEST_CONTROL_FILE)
+ with open(host_control_file, 'w') as control_file:
+ control_file.write(TEST_CONTROL_CONTENT)
+
+ logging.info('Run autoserv command.')
+ container.attach_run(AUTOSERV_COMMAND)
+
+ logging.info('Confirm results are available from host.')
+ # Read status.log to check the content is not empty.
+ container_status_log = os.path.join(TEST_RESULT_PATH, TEST_DUT,
+ 'status.log')
+ status_log = container.attach_run(command='cat %s' % container_status_log
+ ).stdout
+ if len(status_log) < 10:
+ raise Exception('Failed to read status.log in container.')
+
+
+def test_package_install(container):
+ """Test installing package in container.
+
+ @param container: The test container.
+ """
+ # Packages are installed in TEST_SCRIPT_CONTENT. Verify the packages in
+ # this method.
+ container.attach_run('which atop')
+ container.attach_run('python -c "import acora"')
+
+
+def test_ssh(container, remote):
+ """Test container can run ssh to remote server.
+
+ @param container: The test container.
+ @param remote: The remote server to ssh to.
+
+ @raise: error.CmdError if container can't ssh to remote server.
+ """
+ logging.info('Test ssh to %s.', remote)
+ container.attach_run('ssh %s -a -x -o StrictHostKeyChecking=no '
+ '-o BatchMode=yes -o UserKnownHostsFile=/dev/null '
+ '-p 22 "true"' % remote)
+
+
+def parse_options():
+ """Parse command line inputs.
+ """
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-d', '--dut', type=str,
+ help='Test device to ssh to.',
+ default=None)
+ parser.add_argument('-r', '--devserver', type=str,
+ help='Test devserver to ssh to.',
+ default=None)
+ parser.add_argument('-v', '--verbose', action='store_true',
+ default=False,
+ help='Print out ALL entries.')
+ parser.add_argument('-s', '--skip_cleanup', action='store_true',
+ default=False,
+ help='Skip deleting test containers.')
+ return parser.parse_args()
+
+
+def main(options):
+ """main script.
+
+ @param options: Options to run the script.
+ """
+ # Force to run the test as superuser.
+ # TODO(dshi): crbug.com/459344 Set remove this enforcement when test
+ # container can be unprivileged container.
+ if utils.sudo_require_password():
+ logging.warn('SSP requires root privilege to run commands, please '
+ 'grant root access to this process.')
+ utils.run('sudo true')
+
+ setup_logging(log_level=(logging.DEBUG if options.verbose
+ else logging.INFO))
+
+ bucket = lxc.ContainerBucket(TEMP_DIR)
+
+ setup_base(bucket)
+ container_test_name = (lxc.TEST_CONTAINER_NAME_FMT %
+ (TEST_JOB_ID, time.time(), os.getpid()))
+ container = setup_test(bucket, container_test_name, options.skip_cleanup)
+ test_share(container)
+ test_autoserv(container)
+ if options.dut:
+ test_ssh(container, options.dut)
+ if options.devserver:
+ test_ssh(container, options.devserver)
+ # Packages are installed in TEST_SCRIPT, verify the packages are installed.
+ test_package_install(container)
+ logging.info('All tests passed.')
+
+
+if __name__ == '__main__':
+ options = parse_options()
+ try:
+ main(options)
+ finally:
+ if not options.skip_cleanup:
+ logging.info('Cleaning up temporary directory %s.', TEMP_DIR)
+ try:
+ lxc.ContainerBucket(TEMP_DIR).destroy_all()
+ finally:
+ utils.run('sudo rm -rf "%s"' % TEMP_DIR)
diff --git a/site_utils/lxc/utils.py b/site_utils/lxc/utils.py
new file mode 100644
index 0000000..29ad241
--- /dev/null
+++ b/site_utils/lxc/utils.py
@@ -0,0 +1,55 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""This module provides some utilities used by LXC and its tools.
+"""
+
+import common
+from autotest_lib.client.bin import utils
+from autotest_lib.client.common_lib import error
+from autotest_lib.client.common_lib.cros.network import interface
+
+
+def path_exists(path):
+ """Check if path exists.
+
+ If the process is not running with root user, os.path.exists may fail to
+ check if a path owned by root user exists. This function uses command
+ `test -e` to check if path exists.
+
+ @param path: Path to check if it exists.
+
+ @return: True if path exists, otherwise False.
+ """
+ try:
+ utils.run('sudo test -e "%s"' % path)
+ return True
+ except error.CmdError:
+ return False
+
+
+def get_host_ip():
+ """Get the IP address of the host running containers on lxcbr*.
+
+ This function gets the IP address on network interface lxcbr*. The
+ assumption is that lxc uses the network interface started with "lxcbr".
+
+ @return: IP address of the host running containers.
+ """
+ # The kernel publishes symlinks to various network devices in /sys.
+ result = utils.run('ls /sys/class/net', ignore_status=True)
+ # filter out empty strings
+ interface_names = [x for x in result.stdout.split() if x]
+
+ lxc_network = None
+ for name in interface_names:
+ if name.startswith('lxcbr'):
+ lxc_network = name
+ break
+ if not lxc_network:
+ raise error.ContainerError('Failed to find network interface used by '
+ 'lxc. All existing interfaces are: %s' %
+ interface_names)
+ netif = interface.Interface(lxc_network)
+ return netif.ipv4_address