=== modified file 'Makefile'
--- Makefile	2016-05-10 16:13:29 +0000
+++ Makefile	2016-10-03 18:55:20 +0000
@@ -49,5 +49,7 @@
 sync-images:
 	@$(CWD)/tools/vmtest-sync-images
 
+clean:
+	rm -rf doc/_build
 
-.PHONY: all test pyflakes pyflakes3 pep8 build
+.PHONY: all clean test pyflakes pyflakes3 pep8 build

=== modified file 'curtin/__init__.py'
--- curtin/__init__.py	2015-11-23 16:22:09 +0000
+++ curtin/__init__.py	2016-10-03 18:55:20 +0000
@@ -33,6 +33,10 @@
     'SUBCOMMAND_SYSTEM_INSTALL',
     # subcommand 'system-upgrade' is present
     'SUBCOMMAND_SYSTEM_UPGRADE',
+    # supports new format of apt configuration
+    'APT_CONFIG_V1',
 ]
 
+__version__ = "0.1.0"
+
 # vi: ts=4 expandtab syntax=python

=== modified file 'curtin/block/__init__.py'
--- curtin/block/__init__.py	2016-10-03 18:00:41 +0000
+++ curtin/block/__init__.py	2016-10-03 18:55:20 +0000
@@ -23,21 +23,31 @@
 import itertools
 
 from curtin import util
+from curtin.block import lvm
+from curtin.log import LOG
 from curtin.udev import udevadm_settle
-from curtin.log import LOG
 
 
 def get_dev_name_entry(devname):
+    """
+    convert device name to path in /dev
+    """
     bname = devname.split('/dev/')[-1]
     return (bname, "/dev/" + bname)
 
 
 def is_valid_device(devname):
+    """
+    check if device is a valid device
+    """
     devent = get_dev_name_entry(devname)[1]
     return is_block_device(devent)
 
 
 def is_block_device(path):
+    """
+    check if path is a block device
+    """
     try:
         return stat.S_ISBLK(os.stat(path).st_mode)
     except OSError as e:
@@ -47,26 +57,99 @@
 
 
 def dev_short(devname):
+    """
+    get short form of device name
+    """
+    devname = os.path.normpath(devname)
     if os.path.sep in devname:
         return os.path.basename(devname)
     return devname
 
 
 def dev_path(devname):
+    """
+    convert device name to path in /dev
+    """
     if devname.startswith('/dev/'):
         return devname
     else:
         return '/dev/' + devname
 
 
+def path_to_kname(path):
+    """
+    converts a path in /dev or a path in /sys/block to the device kname,
+    taking special devices and unusual naming schemes into account
+    """
+    # if path given is a link, get real path
+    # only do this if given a path though, if kname is already specified then
+    # this would cause a failure where the function should still be able to run
+    if os.path.sep in path:
+        path = os.path.realpath(path)
+    # using basename here ensures that the function will work given a path in
+    # /dev, a kname, or a path in /sys/block as an arg
+    dev_kname = os.path.basename(path)
+    # cciss devices need to have 'cciss!' prepended
+    if path.startswith('/dev/cciss'):
+        dev_kname = 'cciss!' + dev_kname
+    LOG.debug("path_to_kname input: '{}' output: '{}'".format(path, dev_kname))
+    return dev_kname
+
+
+def kname_to_path(kname):
+    """
+    converts a kname to a path in /dev, taking special devices and unusual
+    naming schemes into account
+    """
+    # if given something that is already a dev path, return it
+    if os.path.exists(kname) and is_valid_device(kname):
+        path = kname
+        LOG.debug("kname_to_path input: '{}' output: '{}'".format(kname, path))
+        return os.path.realpath(path)
+    # adding '/dev' to path is not sufficient to handle cciss devices and
+    # possibly other special devices which have not been encountered yet
+    path = os.path.realpath(os.sep.join(['/dev'] + kname.split('!')))
+    # make sure path we get is correct
+    if not (os.path.exists(path) and is_valid_device(path)):
+        raise OSError('could not get path to dev from kname: {}'.format(kname))
+    LOG.debug("kname_to_path input: '{}' output: '{}'".format(kname, path))
+    return path
+
+
+def partition_kname(disk_kname, partition_number):
+    """
+    Add number to disk_kname prepending a 'p' if needed
+    """
+    for dev_type in ['nvme', 'mmcblk', 'cciss', 'mpath', 'dm']:
+        if disk_kname.startswith(dev_type):
+            partition_number = "p%s" % partition_number
+            break
+    return "%s%s" % (disk_kname, partition_number)
+
+
+def sysfs_to_devpath(sysfs_path):
+    """
+    convert a path in /sys/class/block to a path in /dev
+    """
+    path = kname_to_path(path_to_kname(sysfs_path))
+    if not is_block_device(path):
+        raise ValueError('could not find blockdev for sys path: {}'
+                         .format(sysfs_path))
+    return path
+
+
 def sys_block_path(devname, add=None, strict=True):
+    """
+    get path to device in /sys/class/block
+    """
     toks = ['/sys/class/block']
     # insert parent dev if devname is partition
+    devname = os.path.normpath(devname)
     (parent, partnum) = get_blockdev_for_partition(devname)
     if partnum:
-        toks.append(dev_short(parent))
+        toks.append(path_to_kname(parent))
 
-    toks.append(dev_short(devname))
+    toks.append(path_to_kname(devname))
 
     if add is not None:
         toks.append(add)
@@ -83,6 +166,9 @@
 
 
 def _lsblock_pairs_to_dict(lines):
+    """
+    parse lsblock output and convert to dict
+    """
     ret = {}
     for line in lines.splitlines():
         toks = shlex.split(line)
@@ -98,6 +184,9 @@
 
 
 def _lsblock(args=None):
+    """
+    get lsblock data as dict
+    """
     # lsblk  --help | sed -n '/Available/,/^$/p' |
     #     sed -e 1d -e '$d' -e 's,^[ ]\+,,' -e 's, .*,,' | sort
     keys = ['ALIGNMENT', 'DISC-ALN', 'DISC-GRAN', 'DISC-MAX', 'DISC-ZERO',
@@ -120,8 +209,10 @@
 
 
 def get_unused_blockdev_info():
-    # return a list of unused block devices. These are devices that
-    # do not have anything mounted on them.
+    """
+    return a list of unused block devices.
+    These are devices that do not have anything mounted on them.
+    """
 
     # get a list of top level block devices, then iterate over it to get
     # devices dependent on those.  If the lsblk call for that specific
@@ -137,7 +228,9 @@
 
 
 def get_devices_for_mp(mountpoint):
-    # return a list of devices (full paths) used by the provided mountpoint
+    """
+    return a list of devices (full paths) used by the provided mountpoint
+    """
     bdinfo = _lsblock()
     found = set()
     for devname, data in bdinfo.items():
@@ -158,6 +251,9 @@
 
 
 def get_installable_blockdevs(include_removable=False, min_size=1024**3):
+    """
+    find blockdevs suitable for installation
+    """
     good = []
     unused = get_unused_blockdev_info()
     for devname, data in unused.items():
@@ -172,21 +268,25 @@
 
 
 def get_blockdev_for_partition(devpath):
+    """
+    find the parent device for a partition.
+    returns a tuple of the parent block device and the partition number
+    if device is not a partition, None will be returned for partition number
+    """
+    # normalize path
+    rpath = os.path.realpath(devpath)
+
     # convert an entry in /dev/ to parent disk and partition number
     # if devpath is a block device and not a partition, return (devpath, None)
-
-    # input of /dev/vdb or /dev/disk/by-label/foo
-    # rpath is hopefully a real-ish path in /dev (vda, sdb..)
-    rpath = os.path.realpath(devpath)
-
-    bname = os.path.basename(rpath)
-    syspath = "/sys/class/block/%s" % bname
-
+    base = '/sys/class/block'
+
+    # input of /dev/vdb, /dev/disk/by-label/foo, /sys/block/foo,
+    # /sys/block/class/foo, or just foo
+    syspath = os.path.join(base, path_to_kname(devpath))
+
+    # don't need to try out multiple sysfs paths as path_to_kname handles cciss
     if not os.path.exists(syspath):
-        syspath2 = "/sys/class/block/cciss!%s" % bname
-        if not os.path.exists(syspath2):
-            raise ValueError("%s had no syspath (%s)" % (devpath, syspath))
-        syspath = syspath2
+        raise OSError("%s had no syspath (%s)" % (devpath, syspath))
 
     ptpath = os.path.join(syspath, "partition")
     if not os.path.exists(ptpath):
@@ -207,8 +307,21 @@
     return (diskdevpath, ptnum)
 
 
+def get_sysfs_partitions(device):
+    """
+    get a list of sysfs paths for partitions under a block device
+    accepts input as a device kname, sysfs path, or dev path
+    returns empty list if no partitions available
+    """
+    sysfs_path = sys_block_path(device)
+    return [sys_block_path(kname) for kname in os.listdir(sysfs_path)
+            if os.path.exists(os.path.join(sysfs_path, kname, 'partition'))]
+
+
 def get_pardevs_on_blockdevs(devs):
-    # return a dict of partitions with their info that are on provided devs
+    """
+    return a dict of partitions with their info that are on provided devs
+    """
     if devs is None:
         devs = []
     devs = [get_dev_name_entry(d)[1] for d in devs]
@@ -243,7 +356,9 @@
 
 
 def rescan_block_devices():
-    # run 'blockdev --rereadpt' for all block devices not currently mounted
+    """
+    run 'blockdev --rereadpt' for all block devices not currently mounted
+    """
     unused = get_unused_blockdev_info()
     devices = []
     for devname, data in unused.items():
@@ -271,6 +386,9 @@
 
 
 def blkid(devs=None, cache=True):
+    """
+    get data about block devices from blkid and convert to dict
+    """
     if devs is None:
         devs = []
 
@@ -423,7 +541,18 @@
     """
     info = _lsblock([devpath])
     LOG.debug('get_blockdev_sector_size: info:\n%s' % util.json_dumps(info))
-    [parent] = info
+    # (LP: 1598310) The call to _lsblock() may return multiple results.
+    # If it does, then search for a result with the correct device path.
+    # If no such device is found among the results, then fall back to previous
+    # behavior, which was taking the first of the results
+    assert len(info) > 0
+    for (k, v) in info.items():
+        if v.get('device_path') == devpath:
+            parent = k
+            break
+    else:
+        parent = list(info.keys())[0]
+
     return (int(info[parent]['LOG-SEC']), int(info[parent]['PHY-SEC']))
 
 
@@ -499,50 +628,108 @@
 def sysfs_partition_data(blockdev=None, sysfs_path=None):
     # given block device or sysfs_path, return a list of tuples
     # of (kernel_name, number, offset, size)
-    if blockdev is None and sysfs_path is None:
-        raise ValueError("Blockdev and sysfs_path cannot both be None")
-
     if blockdev:
+        blockdev = os.path.normpath(blockdev)
         sysfs_path = sys_block_path(blockdev)
-
-    ptdata = []
-    # /sys/class/block/dev has entries of 'kname' for each partition
+    elif sysfs_path:
+        # use normpath to ensure that paths with trailing slash work
+        sysfs_path = os.path.normpath(sysfs_path)
+        blockdev = os.path.join('/dev', os.path.basename(sysfs_path))
+    else:
+        raise ValueError("Blockdev and sysfs_path cannot both be None")
 
     # queue property is only on parent devices, ie, we can't read
     # /sys/class/block/vda/vda1/queue/* as queue is only on the
     # parent device
+    sysfs_prefix = sysfs_path
     (parent, partnum) = get_blockdev_for_partition(blockdev)
-    sysfs_prefix = sysfs_path
     if partnum:
         sysfs_prefix = sys_block_path(parent)
-
-    block_size = int(util.load_file(os.path.join(sysfs_prefix,
-                                    'queue/logical_block_size')))
-
-    block_size = int(
-        util.load_file(os.path.join(sysfs_path, 'queue/logical_block_size')))
+        partnum = int(partnum)
+
+    block_size = int(util.load_file(os.path.join(
+        sysfs_prefix, 'queue/logical_block_size')))
     unit = block_size
-    for d in os.listdir(sysfs_path):
-        partd = os.path.join(sysfs_path, d)
+
+    ptdata = []
+    for part_sysfs in get_sysfs_partitions(sysfs_prefix):
         data = {}
         for sfile in ('partition', 'start', 'size'):
-            dfile = os.path.join(partd, sfile)
+            dfile = os.path.join(part_sysfs, sfile)
             if not os.path.isfile(dfile):
                 continue
             data[sfile] = int(util.load_file(dfile))
-        if 'partition' not in data:
-            continue
-        ptdata.append((d, data['partition'], data['start'] * unit,
-                       data['size'] * unit,))
+        if partnum is None or data['partition'] == partnum:
+            ptdata.append((path_to_kname(part_sysfs), data['partition'],
+                           data['start'] * unit, data['size'] * unit,))
 
     return ptdata
 
 
+def get_part_table_type(device):
+    """
+    check the type of partition table present on the specified device
+    returns None if no ptable was present or device could not be read
+    """
+    # it is neccessary to look for the gpt signature first, then the dos
+    # signature, because a gpt formatted disk usually has a valid mbr to
+    # protect the disk from being modified by older partitioning tools
+    return ('gpt' if check_efi_signature(device) else
+            'dos' if check_dos_signature(device) else None)
+
+
+def check_dos_signature(device):
+    """
+    check if there is a dos partition table signature present on device
+    """
+    # the last 2 bytes of a dos partition table have the signature with the
+    # value 0xAA55. the dos partition table is always 0x200 bytes long, even if
+    # the underlying disk uses a larger logical block size, so the start of
+    # this signature must be at 0x1fe
+    # https://en.wikipedia.org/wiki/Master_boot_record#Sector_layout
+    return (is_block_device(device) and util.file_size(device) >= 0x200 and
+            (util.load_file(device, mode='rb', read_len=2, offset=0x1fe) ==
+             b'\x55\xAA'))
+
+
+def check_efi_signature(device):
+    """
+    check if there is a gpt partition table signature present on device
+    """
+    # the gpt partition table header is always on lba 1, regardless of the
+    # logical block size used by the underlying disk. therefore, a static
+    # offset cannot be used, the offset to the start of the table header is
+    # always the sector size of the disk
+    # the start of the gpt partition table header shoult have the signaure
+    # 'EFI PART'.
+    # https://en.wikipedia.org/wiki/GUID_Partition_Table
+    sector_size = get_blockdev_sector_size(device)[0]
+    return (is_block_device(device) and
+            util.file_size(device) >= 2 * sector_size and
+            (util.load_file(device, mode='rb', read_len=8,
+                            offset=sector_size) == b'EFI PART'))
+
+
+def is_extended_partition(device):
+    """
+    check if the specified device path is a dos extended partition
+    """
+    # an extended partition must be on a dos disk, must be a partition, must be
+    # within the first 4 partitions and will have a valid dos signature,
+    # because the format of the extended partition matches that of a real mbr
+    (parent_dev, part_number) = get_blockdev_for_partition(device)
+    return (get_part_table_type(parent_dev) in ['dos', 'msdos'] and
+            part_number is not None and int(part_number) <= 4 and
+            check_dos_signature(device))
+
+
 def wipe_file(path, reader=None, buflen=4 * 1024 * 1024):
-    # wipe the existing file at path.
-    #  if reader is provided, it will be called as a 'reader(buflen)'
-    #  to provide data for each write.  Otherwise, zeros are used.
-    #  writes will be done in size of buflen.
+    """
+    wipe the existing file at path.
+    if reader is provided, it will be called as a 'reader(buflen)'
+    to provide data for each write.  Otherwise, zeros are used.
+    writes will be done in size of buflen.
+    """
     if reader:
         readfunc = reader
     else:
@@ -551,13 +738,11 @@
         def readfunc(size):
             return buf
 
+    size = util.file_size(path)
+    LOG.debug("%s is %s bytes. wiping with buflen=%s",
+              path, size, buflen)
+
     with open(path, "rb+") as fp:
-        # get the size by seeking to end.
-        fp.seek(0, 2)
-        size = fp.tell()
-        LOG.debug("%s is %s bytes. wiping with buflen=%s",
-                  path, size, buflen)
-        fp.seek(0)
         while True:
             pbuf = readfunc(buflen)
             pos = fp.tell()
@@ -574,16 +759,18 @@
 
 
 def quick_zero(path, partitions=True):
-    # zero 1M at front, 1M at end, and 1M at front
-    # if this is a block device and partitions is true, then
-    # zero 1M at front and end of each partition.
+    """
+    zero 1M at front, 1M at end, and 1M at front
+    if this is a block device and partitions is true, then
+    zero 1M at front and end of each partition.
+    """
     buflen = 1024
     count = 1024
     zero_size = buflen * count
     offsets = [0, -zero_size]
     is_block = is_block_device(path)
     if not (is_block or os.path.isfile(path)):
-        raise ValueError("%s: not an existing file or block device")
+        raise ValueError("%s: not an existing file or block device", path)
 
     if partitions and is_block:
         ptdata = sysfs_partition_data(path)
@@ -596,6 +783,9 @@
 
 
 def zero_file_at_offsets(path, offsets, buflen=1024, count=1024, strict=False):
+    """
+    write zeros to file at specified offsets
+    """
     bmsg = "{path} (size={size}): "
     m_short = bmsg + "{tot} bytes from {offset} > size."
     m_badoff = bmsg + "invalid offset {offset}."
@@ -657,15 +847,13 @@
     if mode == "pvremove":
         # We need to use --force --force in case it's already in a volgroup and
         # pvremove doesn't want to remove it
-        cmds = []
-        cmds.append(["pvremove", "--force", "--force", "--yes", path])
-        cmds.append(["pvscan", "--cache"])
-        cmds.append(["vgscan", "--mknodes", "--cache"])
+
         # If pvremove is run and there is no label on the system,
         # then it exits with 5. That is also okay, because we might be
         # wiping something that is already blank
-        for cmd in cmds:
-            util.subp(cmd, rcs=[0, 5], capture=True)
+        util.subp(['pvremove', '--force', '--force', '--yes', path],
+                  rcs=[0, 5], capture=True)
+        lvm.lvm_scan()
     elif mode == "zero":
         wipe_file(path)
     elif mode == "random":

=== added file 'curtin/block/clear_holders.py'
--- curtin/block/clear_holders.py	1970-01-01 00:00:00 +0000
+++ curtin/block/clear_holders.py	2016-10-03 18:55:20 +0000
@@ -0,0 +1,387 @@
+#   Copyright (C) 2016 Canonical Ltd.
+#
+#   Author: Wesley Wiedenmeier <wesley.wiedenmeier@canonical.com>
+#
+#   Curtin is free software: you can redistribute it and/or modify it under
+#   the terms of the GNU Affero General Public License as published by the
+#   Free Software Foundation, either version 3 of the License, or (at your
+#   option) any later version.
+#
+#   Curtin is distributed in the hope that it will be useful, but WITHOUT ANY
+#   WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+#   FOR A PARTICULAR PURPOSE.  See the GNU Affero General Public License for
+#   more details.
+#
+#   You should have received a copy of the GNU Affero General Public License
+#   along with Curtin.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+This module provides a mechanism for shutting down virtual storage layers on
+top of a block device, making it possible to reuse the block device without
+having to reboot the system
+"""
+
+import os
+
+from curtin import (block, udev, util)
+from curtin.block import lvm
+from curtin.log import LOG
+
+
+def _define_handlers_registry():
+    """
+    returns instantiated dev_types
+    """
+    return {
+        'partition': {'shutdown': wipe_superblock,
+                      'ident': identify_partition},
+        'lvm': {'shutdown': shutdown_lvm, 'ident': identify_lvm},
+        'crypt': {'shutdown': shutdown_crypt, 'ident': identify_crypt},
+        'raid': {'shutdown': shutdown_mdadm, 'ident': identify_mdadm},
+        'bcache': {'shutdown': shutdown_bcache, 'ident': identify_bcache},
+        'disk': {'ident': lambda x: False, 'shutdown': wipe_superblock},
+    }
+
+
+def get_dmsetup_uuid(device):
+    """
+    get the dm uuid for a specified dmsetup device
+    """
+    blockdev = block.sysfs_to_devpath(device)
+    (out, _) = util.subp(['dmsetup', 'info', blockdev, '-C', '-o', 'uuid',
+                          '--noheadings'], capture=True)
+    return out.strip()
+
+
+def get_bcache_using_dev(device):
+    """
+    Get the /sys/fs/bcache/ path of the bcache volume using specified device
+    """
+    # FIXME: when block.bcache is written this should be moved there
+    sysfs_path = block.sys_block_path(device)
+    return os.path.realpath(os.path.join(sysfs_path, 'bcache', 'cache'))
+
+
+def shutdown_bcache(device):
+    """
+    Shut down bcache for specified bcache device
+    """
+    bcache_shutdown_message = ('shutdown_bcache running on {} has determined '
+                               'that the device has already been shut down '
+                               'during handling of another bcache dev. '
+                               'skipping'.format(device))
+    if not os.path.exists(device):
+        LOG.info(bcache_shutdown_message)
+        return
+
+    bcache_sysfs = get_bcache_using_dev(device)
+    if not os.path.exists(bcache_sysfs):
+        LOG.info(bcache_shutdown_message)
+        return
+
+    LOG.debug('stopping bcache at: %s', bcache_sysfs)
+    util.write_file(os.path.join(bcache_sysfs, 'stop'), '1', mode=None)
+
+
+def shutdown_lvm(device):
+    """
+    Shutdown specified lvm device.
+    """
+    device = block.sys_block_path(device)
+    # lvm devices have a dm directory that containes a file 'name' containing
+    # '{volume group}-{logical volume}'. The volume can be freed using lvremove
+    name_file = os.path.join(device, 'dm', 'name')
+    (vg_name, lv_name) = lvm.split_lvm_name(util.load_file(name_file))
+    # use two --force flags here in case the volume group that this lv is
+    # attached two has been damaged
+    LOG.debug('running lvremove on %s/%s', vg_name, lv_name)
+    util.subp(['lvremove', '--force', '--force',
+               '{}/{}'.format(vg_name, lv_name)], rcs=[0, 5])
+    # if that was the last lvol in the volgroup, get rid of volgroup
+    if len(lvm.get_lvols_in_volgroup(vg_name)) == 0:
+        util.subp(['vgremove', '--force', '--force', vg_name], rcs=[0, 5])
+    # refresh lvmetad
+    lvm.lvm_scan()
+
+
+def shutdown_crypt(device):
+    """
+    Shutdown specified cryptsetup device
+    """
+    blockdev = block.sysfs_to_devpath(device)
+    util.subp(['cryptsetup', 'remove', blockdev], capture=True)
+
+
+def shutdown_mdadm(device):
+    """
+    Shutdown specified mdadm device.
+    """
+    blockdev = block.sysfs_to_devpath(device)
+    LOG.debug('using mdadm.mdadm_stop on dev: %s', blockdev)
+    block.mdadm.mdadm_stop(blockdev)
+    block.mdadm.mdadm_remove(blockdev)
+
+
+def wipe_superblock(device):
+    """
+    Wrapper for block.wipe_volume compatible with shutdown function interface
+    """
+    blockdev = block.sysfs_to_devpath(device)
+    # when operating on a disk that used to have a dos part table with an
+    # extended partition, attempting to wipe the extended partition will fail
+    if block.is_extended_partition(blockdev):
+        LOG.info("extended partitions do not need wiping, so skipping: '%s'",
+                 blockdev)
+    else:
+        LOG.info('wiping superblock on %s', blockdev)
+        block.wipe_volume(blockdev, mode='superblock')
+
+
+def identify_lvm(device):
+    """
+    determine if specified device is a lvm device
+    """
+    return (block.path_to_kname(device).startswith('dm') and
+            get_dmsetup_uuid(device).startswith('LVM'))
+
+
+def identify_crypt(device):
+    """
+    determine if specified device is dm-crypt device
+    """
+    return (block.path_to_kname(device).startswith('dm') and
+            get_dmsetup_uuid(device).startswith('CRYPT'))
+
+
+def identify_mdadm(device):
+    """
+    determine if specified device is a mdadm device
+    """
+    return block.path_to_kname(device).startswith('md')
+
+
+def identify_bcache(device):
+    """
+    determine if specified device is a bcache device
+    """
+    return block.path_to_kname(device).startswith('bcache')
+
+
+def identify_partition(device):
+    """
+    determine if specified device is a partition
+    """
+    path = os.path.join(block.sys_block_path(device), 'partition')
+    return os.path.exists(path)
+
+
+def get_holders(device):
+    """
+    Look up any block device holders, return list of knames
+    """
+    # block.sys_block_path works when given a /sys or /dev path
+    sysfs_path = block.sys_block_path(device)
+    # get holders
+    holders = os.listdir(os.path.join(sysfs_path, 'holders'))
+    LOG.debug("devname '%s' had holders: %s", device, holders)
+    return holders
+
+
+def gen_holders_tree(device):
+    """
+    generate a tree representing the current storage hirearchy above 'device'
+    """
+    device = block.sys_block_path(device)
+    dev_name = block.path_to_kname(device)
+    # the holders for a device should consist of the devices in the holders/
+    # dir in sysfs and any partitions on the device. this ensures that a
+    # storage tree starting from a disk will include all devices holding the
+    # disk's partitions
+    holder_paths = ([block.sys_block_path(h) for h in get_holders(device)] +
+                    block.get_sysfs_partitions(device))
+    # the DEV_TYPE registry contains a function under the key 'ident' for each
+    # device type entry that returns true if the device passed to it is of the
+    # correct type. there should never be a situation in which multiple
+    # identify functions return true. therefore, it will always work to take
+    # the device type with the first identify function that returns true as the
+    # device type for the current device. in the event that no identify
+    # functions return true, the device will be treated as a disk
+    # (DEFAULT_DEV_TYPE). the identify function for disk never returns true.
+    # the next() builtin in python will not raise a StopIteration exception if
+    # there is a default value defined
+    dev_type = next((k for k, v in DEV_TYPES.items() if v['ident'](device)),
+                    DEFAULT_DEV_TYPE)
+    return {
+        'device': device, 'dev_type': dev_type, 'name': dev_name,
+        'holders': [gen_holders_tree(h) for h in holder_paths],
+    }
+
+
+def plan_shutdown_holder_trees(holders_trees):
+    """
+    plan best order to shut down holders in, taking into account high level
+    storage layers that may have many devices below them
+
+    returns a sorted list of descriptions of storage config entries including
+    their path in /sys/block and their dev type
+
+    can accept either a single storage tree or a list of storage trees assumed
+    to start at an equal place in storage hirearchy (i.e. a list of trees
+    starting from disk)
+    """
+    # holds a temporary registry of holders to allow cross references
+    # key = device sysfs path, value = {} of priority level, shutdown function
+    reg = {}
+
+    # normalize to list of trees
+    if not isinstance(holders_trees, (list, tuple)):
+        holders_trees = [holders_trees]
+
+    def flatten_holders_tree(tree, level=0):
+        """
+        add entries from holders tree to registry with level key corresponding
+        to how many layers from raw disks the current device is at
+        """
+        device = tree['device']
+
+        # always go with highest level if current device has been
+        # encountered already. since the device and everything above it is
+        # re-added to the registry it ensures that any increase of level
+        # required here will propagate down the tree
+        # this handles a scenario like mdadm + bcache, where the backing
+        # device for bcache is a 3nd level item like mdadm, but the cache
+        # device is 1st level (disk) or second level (partition), ensuring
+        # that the bcache item is always considered higher level than
+        # anything else regardless of whether it was added to the tree via
+        # the cache device or backing device first
+        if device in reg:
+            level = max(reg[device]['level'], level)
+
+        reg[device] = {'level': level, 'device': device,
+                       'dev_type': tree['dev_type']}
+
+        # handle holders above this level
+        for holder in tree['holders']:
+            flatten_holders_tree(holder, level=level + 1)
+
+    # flatten the holders tree into the registry
+    for holders_tree in holders_trees:
+        flatten_holders_tree(holders_tree)
+
+    # return list of entry dicts with highest level first
+    return [reg[k] for k in sorted(reg, key=lambda x: reg[x]['level'] * -1)]
+
+
+def format_holders_tree(holders_tree):
+    """
+    draw a nice dirgram of the holders tree
+    """
+    # spacer styles based on output of 'tree --charset=ascii'
+    spacers = (('`-- ', ' ' * 4), ('|-- ', '|' + ' ' * 3))
+
+    def format_tree(tree):
+        """
+        format entry and any subentries
+        """
+        result = [tree['name']]
+        holders = tree['holders']
+        for (holder_no, holder) in enumerate(holders):
+            spacer_style = spacers[min(len(holders) - (holder_no + 1), 1)]
+            subtree_lines = format_tree(holder)
+            for (line_no, line) in enumerate(subtree_lines):
+                result.append(spacer_style[min(line_no, 1)] + line)
+        return result
+
+    return '\n'.join(format_tree(holders_tree))
+
+
+def get_holder_types(tree):
+    """
+    get flattened list of types of holders in holders tree and the devices
+    they correspond to
+    """
+    types = {(tree['dev_type'], tree['device'])}
+    for holder in tree['holders']:
+        types.update(get_holder_types(holder))
+    return types
+
+
+def assert_clear(base_paths):
+    """
+    Check if all paths in base_paths are clear to use
+    """
+    valid = ('disk', 'partition')
+    if not isinstance(base_paths, (list, tuple)):
+        base_paths = [base_paths]
+    base_paths = [block.sys_block_path(path) for path in base_paths]
+    for holders_tree in [gen_holders_tree(p) for p in base_paths]:
+        if any(holder_type not in valid and path not in base_paths
+               for (holder_type, path) in get_holder_types(holders_tree)):
+            raise OSError('Storage not clear, remaining:\n{}'
+                          .format(format_holders_tree(holders_tree)))
+
+
+def clear_holders(base_paths, try_preserve=False):
+    """
+    Clear all storage layers depending on the devices specified in 'base_paths'
+    A single device or list of devices can be specified.
+    Device paths can be specified either as paths in /dev or /sys/block
+    Will throw OSError if any holders could not be shut down
+    """
+    # handle single path
+    if not isinstance(base_paths, (list, tuple)):
+        base_paths = [base_paths]
+
+    # get current holders and plan how to shut them down
+    holder_trees = [gen_holders_tree(path) for path in base_paths]
+    LOG.info('Current device storage tree:\n%s',
+             '\n'.join(format_holders_tree(tree) for tree in holder_trees))
+    ordered_devs = plan_shutdown_holder_trees(holder_trees)
+
+    # run shutdown functions
+    for dev_info in ordered_devs:
+        dev_type = DEV_TYPES.get(dev_info['dev_type'])
+        shutdown_function = dev_type.get('shutdown')
+        if not shutdown_function:
+            continue
+        if try_preserve and shutdown_function in DATA_DESTROYING_HANDLERS:
+            LOG.info('shutdown function for holder type: %s is destructive. '
+                     'attempting to preserve data, so not skipping' %
+                     dev_info['dev_type'])
+            continue
+        LOG.info("shutdown running on holder type: '%s' syspath: '%s'",
+                 dev_info['dev_type'], dev_info['device'])
+        shutdown_function(dev_info['device'])
+        udev.udevadm_settle()
+
+
+def start_clear_holders_deps():
+    """
+    prepare system for clear holders to be able to scan old devices
+    """
+    # a mdadm scan has to be started in case there is a md device that needs to
+    # be detected. if the scan fails, it is either because there are no mdadm
+    # devices on the system, or because there is a mdadm device in a damaged
+    # state that could not be started. due to the nature of mdadm tools, it is
+    # difficult to know which is the case. if any errors did occur, then ignore
+    # them, since no action needs to be taken if there were no mdadm devices on
+    # the system, and in the case where there is some mdadm metadata on a disk,
+    # but there was not enough to start the array, the call to wipe_volume on
+    # all disks and partitions should be sufficient to remove the mdadm
+    # metadata
+    block.mdadm.mdadm_assemble(scan=True, ignore_errors=True)
+    # the bcache module needs to be present to properly detect bcache devs
+    # on some systems (precise without hwe kernel) it may not be possible to
+    # lad the bcache module bcause it is not present in the kernel. if this
+    # happens then there is no need to halt installation, as the bcache devices
+    # will never appear and will never prevent the disk from being reformatted
+    util.subp(['modprobe', 'bcache'], rcs=[0, 1])
+
+
+# anything that is not identified can assumed to be a 'disk' or similar
+DEFAULT_DEV_TYPE = 'disk'
+# handlers that should not be run if an attempt is being made to preserve data
+DATA_DESTROYING_HANDLERS = [wipe_superblock]
+# types of devices that could be encountered by clear holders and functions to
+# identify them and shut them down
+DEV_TYPES = _define_handlers_registry()

=== added file 'curtin/block/lvm.py'
--- curtin/block/lvm.py	1970-01-01 00:00:00 +0000
+++ curtin/block/lvm.py	2016-10-03 18:55:20 +0000
@@ -0,0 +1,96 @@
+#   Copyright (C) 2016 Canonical Ltd.
+#
+#   Author: Wesley Wiedenmeier <wesley.wiedenmeier@canonical.com>
+#
+#   Curtin is free software: you can redistribute it and/or modify it under
+#   the terms of the GNU Affero General Public License as published by the
+#   Free Software Foundation, either version 3 of the License, or (at your
+#   option) any later version.
+#
+#   Curtin is distributed in the hope that it will be useful, but WITHOUT ANY
+#   WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+#   FOR A PARTICULAR PURPOSE.  See the GNU Affero General Public License for
+#   more details.
+#
+#   You should have received a copy of the GNU Affero General Public License
+#   along with Curtin.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+This module provides some helper functions for manipulating lvm devices
+"""
+
+from curtin import util
+from curtin.log import LOG
+import os
+
+# separator to use for lvm/dm tools
+_SEP = '='
+
+
+def _filter_lvm_info(lvtool, match_field, query_field, match_key):
+    """
+    filter output of pv/vg/lvdisplay tools
+    """
+    (out, _) = util.subp([lvtool, '-C', '--separator', _SEP, '--noheadings',
+                          '-o', ','.join([match_field, query_field])],
+                         capture=True)
+    return [qf for (mf, qf) in
+            [l.strip().split(_SEP) for l in out.strip().splitlines()]
+            if mf == match_key]
+
+
+def get_pvols_in_volgroup(vg_name):
+    """
+    get physical volumes used by volgroup
+    """
+    return _filter_lvm_info('pvdisplay', 'vg_name', 'pv_name', vg_name)
+
+
+def get_lvols_in_volgroup(vg_name):
+    """
+    get logical volumes in volgroup
+    """
+    return _filter_lvm_info('lvdisplay', 'vg_name', 'lv_name', vg_name)
+
+
+def split_lvm_name(full):
+    """
+    split full lvm name into tuple of (volgroup, lv_name)
+    """
+    # 'dmsetup splitname' is the authoratative source for lvm name parsing
+    (out, _) = util.subp(['dmsetup', 'splitname', full, '-c', '--noheadings',
+                          '--separator', _SEP, '-o', 'vg_name,lv_name'],
+                         capture=True)
+    return out.strip().split(_SEP)
+
+
+def lvmetad_running():
+    """
+    check if lvmetad is running
+    """
+    return os.path.exists(os.environ.get('LVM_LVMETAD_PIDFILE',
+                                         '/run/lvmetad.pid'))
+
+
+def lvm_scan():
+    """
+    run full scan for volgroups, logical volumes and physical volumes
+    """
+    # the lvm tools lvscan, vgscan and pvscan on ubuntu precise do not
+    # support the flag --cache. the flag is present for the tools in ubuntu
+    # trusty and later. since lvmetad is used in current releases of
+    # ubuntu, the --cache flag is needed to ensure that the data cached by
+    # lvmetad is updated.
+
+    # before appending the cache flag though, check if lvmetad is running. this
+    # ensures that we do the right thing even if lvmetad is supported but is
+    # not running
+    release = util.lsb_release().get('codename')
+    if release in [None, 'UNAVAILABLE']:
+        LOG.warning('unable to find release number, assuming xenial or later')
+        release = 'xenial'
+
+    for cmd in [['pvscan'], ['vgscan', '--mknodes']]:
+        if release != 'precise' and lvmetad_running():
+            cmd.append('--cache')
+        util.subp(cmd, capture=True)

=== modified file 'curtin/block/mdadm.py'
--- curtin/block/mdadm.py	2016-05-10 16:13:29 +0000
+++ curtin/block/mdadm.py	2016-10-03 18:55:20 +0000
@@ -28,7 +28,7 @@
 from subprocess import CalledProcessError
 
 from curtin.block import (dev_short, dev_path, is_valid_device, sys_block_path)
-from curtin import util
+from curtin import (util, udev)
 from curtin.log import LOG
 
 NOSPARE_RAID_LEVELS = [
@@ -117,21 +117,34 @@
 #
 
 
-def mdadm_assemble(md_devname=None, devices=[], spares=[], scan=False):
+def mdadm_assemble(md_devname=None, devices=[], spares=[], scan=False,
+                   ignore_errors=False):
     # md_devname is a /dev/XXXX
     # devices is non-empty list of /dev/xxx
     # if spares is non-empt list append of /dev/xxx
     cmd = ["mdadm", "--assemble"]
     if scan:
-        cmd += ['--scan']
+        cmd += ['--scan', '-v']
     else:
         valid_mdname(md_devname)
         cmd += [md_devname, "--run"] + devices
         if spares:
             cmd += spares
 
-    util.subp(cmd, capture=True, rcs=[0, 1, 2])
-    util.subp(["udevadm", "settle"])
+    try:
+        # mdadm assemble returns 1 when no arrays are found. this might not be
+        # an error depending on the situation this function was called in, so
+        # accept a return code of 1
+        # mdadm assemble returns 2 when called on an array that is already
+        # assembled. this is not an error, so accept return code of 2
+        # all other return codes can be accepted with ignore_error set to true
+        util.subp(cmd, capture=True, rcs=[0, 1, 2])
+    except util.ProcessExecutionError:
+        LOG.warning("mdadm_assemble had unexpected return code")
+        if not ignore_errors:
+            raise
+
+    udev.udevadm_settle()
 
 
 def mdadm_create(md_devname, raidlevel, devices, spares=None, md_name=""):

=== modified file 'curtin/block/mkfs.py'
--- curtin/block/mkfs.py	2016-05-10 16:13:29 +0000
+++ curtin/block/mkfs.py	2016-10-03 18:55:20 +0000
@@ -78,6 +78,7 @@
              "swap": "--uuid"},
     "force": {"btrfs": "--force",
               "ext": "-F",
+              "fat": "-I",
               "ntfs": "--force",
               "reiserfs": "-f",
               "swap": "--force",
@@ -91,6 +92,7 @@
         "btrfs": "--sectorsize",
         "ext": "-b",
         "fat": "-S",
+        "xfs": "-s",
         "ntfs": "--sector-size",
         "reiserfs": "--block-size"}
 }
@@ -165,12 +167,15 @@
     # use device logical block size to ensure properly formated filesystems
     (logical_bsize, physical_bsize) = block.get_blockdev_sector_size(path)
     if logical_bsize > 512:
+        lbs_str = ('size={}'.format(logical_bsize) if fs_family == "xfs"
+                   else str(logical_bsize))
         cmd.extend(get_flag_mapping("sectorsize", fs_family,
-                                    param=str(logical_bsize),
-                                    strict=strict))
-        # mkfs.vfat doesn't calculate this right for non-512b sector size
-        # lp:1569576 , d-i uses the same setting.
-        cmd.extend(["-s", "1"])
+                                    param=lbs_str, strict=strict))
+
+        if fs_family == 'fat':
+            # mkfs.vfat doesn't calculate this right for non-512b sector size
+            # lp:1569576 , d-i uses the same setting.
+            cmd.extend(["-s", "1"])
 
     if force:
         cmd.extend(get_flag_mapping("force", fs_family, strict=strict))

=== modified file 'curtin/commands/apply_net.py'
--- curtin/commands/apply_net.py	2016-05-10 16:13:29 +0000
+++ curtin/commands/apply_net.py	2016-10-03 18:55:20 +0000
@@ -26,6 +26,57 @@
 
 LOG = log.LOG
 
+IFUPDOWN_IPV6_MTU_PRE_HOOK = """#!/bin/bash -e
+# injected by curtin installer
+
+[ "${IFACE}" != "lo" ] || exit 0
+
+# Trigger only if MTU configured
+[ -n "${IF_MTU}" ] || exit 0
+
+read CUR_DEV_MTU </sys/class/net/${IFACE}/mtu ||:
+read CUR_IPV6_MTU </proc/sys/net/ipv6/conf/${IFACE}/mtu ||:
+[ -n "${CUR_DEV_MTU}" ] && echo ${CUR_DEV_MTU} > /run/network/${IFACE}_dev.mtu
+[ -n "${CUR_IPV6_MTU}" ] &&
+  echo ${CUR_IPV6_MTU} > /run/network/${IFACE}_ipv6.mtu
+exit 0
+"""
+
+IFUPDOWN_IPV6_MTU_POST_HOOK = """#!/bin/bash -e
+# injected by curtin installer
+
+[ "${IFACE}" != "lo" ] || exit 0
+
+# Trigger only if MTU configured
+[ -n "${IF_MTU}" ] || exit 0
+
+read PRE_DEV_MTU </run/network/${IFACE}_dev.mtu ||:
+read CUR_DEV_MTU </sys/class/net/${IFACE}/mtu ||:
+read PRE_IPV6_MTU </run/network/${IFACE}_ipv6.mtu ||:
+read CUR_IPV6_MTU </proc/sys/net/ipv6/conf/${IFACE}/mtu ||:
+
+if [ "${ADDRFAM}" = "inet6" ]; then
+  # We need to check the underlying interface MTU and
+  # raise it if the IPV6 mtu is larger
+  if [ ${CUR_DEV_MTU} -lt ${IF_MTU} ]; then
+      ip link set ${IFACE} mtu ${IF_MTU}
+  fi
+  # sysctl -q -e -w net.ipv6.conf.${IFACE}.mtu=${IF_MTU}
+  echo ${IF_MTU} >/proc/sys/net/ipv6/conf/${IFACE}/mtu ||:
+
+elif [ "${ADDRFAM}" = "inet" ]; then
+  # handle the clobber case where inet mtu changes v6 mtu.
+  # ifupdown will already have set dev mtu, so lower mtu
+  # if needed.  If v6 mtu was larger, it get's clamped down
+  # to the dev MTU value.
+  if [ ${PRE_IPV6_MTU} -lt ${CUR_IPV6_MTU} ]; then
+    # sysctl -q -e -w net.ipv6.conf.${IFACE}.mtu=${PRE_IPV6_MTU}
+    echo ${PRE_IPV6_MTU} >/proc/sys/net/ipv6/conf/${IFACE}/mtu ||:
+  fi
+fi
+exit 0
+"""
+
 
 def apply_net(target, network_state=None, network_config=None):
     if network_state is None and network_config is None:
@@ -45,6 +96,108 @@
 
     net.render_network_state(target=target, network_state=ns)
 
+    _maybe_remove_legacy_eth0(target)
+    LOG.info('Attempting to remove ipv6 privacy extensions')
+    _disable_ipv6_privacy_extensions(target)
+    _patch_ifupdown_ipv6_mtu_hook(target)
+
+
+def _patch_ifupdown_ipv6_mtu_hook(target,
+                                  prehookfn="etc/network/if-pre-up.d/mtuipv6",
+                                  posthookfn="etc/network/if-up.d/mtuipv6"):
+
+    contents = {
+        'prehook': IFUPDOWN_IPV6_MTU_PRE_HOOK,
+        'posthook': IFUPDOWN_IPV6_MTU_POST_HOOK,
+    }
+
+    hookfn = {
+        'prehook': prehookfn,
+        'posthook': posthookfn,
+    }
+
+    for hook in ['prehook', 'posthook']:
+        fn = hookfn[hook]
+        cfg = util.target_path(target, path=fn)
+        LOG.info('Injecting fix for ipv6 mtu settings: %s', cfg)
+        util.write_file(cfg, contents[hook], mode=0o755)
+
+
+def _disable_ipv6_privacy_extensions(target,
+                                     path="etc/sysctl.d/10-ipv6-privacy.conf"):
+
+    """Ubuntu server image sets a preference to use IPv6 privacy extensions
+       by default; this races with the cloud-image desire to disable them.
+       Resolve this by allowing the cloud-image setting to win. """
+
+    cfg = util.target_path(target, path=path)
+    if not os.path.exists(cfg):
+        LOG.warn('Failed to find ipv6 privacy conf file %s', cfg)
+        return
+
+    bmsg = "Disabling IPv6 privacy extensions config may not apply."
+    try:
+        contents = util.load_file(cfg)
+        known_contents = ["net.ipv6.conf.all.use_tempaddr = 2",
+                          "net.ipv6.conf.default.use_tempaddr = 2"]
+        lines = [f.strip() for f in contents.splitlines()
+                 if not f.startswith("#")]
+        if lines == known_contents:
+            LOG.info('deleting file: %s', cfg)
+            util.del_file(cfg)
+            msg = "removed %s with known contents" % cfg
+            curtin_contents = '\n'.join(
+                ["# IPv6 Privacy Extensions (RFC 4941)",
+                 "# Disabled by curtin",
+                 "# net.ipv6.conf.all.use_tempaddr = 2",
+                 "# net.ipv6.conf.default.use_tempaddr = 2"])
+            util.write_file(cfg, curtin_contents)
+        else:
+            LOG.info('skipping, content didnt match')
+            LOG.debug("found content:\n%s", lines)
+            LOG.debug("expected contents:\n%s", known_contents)
+            msg = (bmsg + " '%s' exists with user configured content." % cfg)
+    except:
+        msg = bmsg + " %s exists, but could not be read." % cfg
+        LOG.exception(msg)
+        return
+
+
+def _maybe_remove_legacy_eth0(target,
+                              path="etc/network/interfaces.d/eth0.cfg"):
+    """Ubuntu cloud images previously included a 'eth0.cfg' that had
+       hard coded content.  That file would interfere with the rendered
+       configuration if it was present.
+
+       if the file does not exist do nothing.
+       If the file exists:
+         - with known content, remove it and warn
+         - with unknown content, leave it and warn
+    """
+
+    cfg = util.target_path(target, path=path)
+    if not os.path.exists(cfg):
+        LOG.warn('Failed to find legacy network conf file %s', cfg)
+        return
+
+    bmsg = "Dynamic networking config may not apply."
+    try:
+        contents = util.load_file(cfg)
+        known_contents = ["auto eth0", "iface eth0 inet dhcp"]
+        lines = [f.strip() for f in contents.splitlines()
+                 if not f.startswith("#")]
+        if lines == known_contents:
+            util.del_file(cfg)
+            msg = "removed %s with known contents" % cfg
+        else:
+            msg = (bmsg + " '%s' exists with user configured content." % cfg)
+    except:
+        msg = bmsg + " %s exists, but could not be read." % cfg
+        LOG.exception(msg)
+        return
+
+    LOG.warn(msg)
+
 
 def apply_net_main(args):
     #  curtin apply_net [--net-state=/config/netstate.yml] [--target=/]
@@ -76,8 +229,10 @@
         apply_net(target=state['target'],
                   network_state=state['network_state'],
                   network_config=state['network_config'])
+
     except Exception:
         LOG.exception('failed to apply network config')
+        return 1
 
     LOG.info('Applied network configuration successfully')
     sys.exit(0)
@@ -90,7 +245,7 @@
       'metavar': 'NETSTATE', 'action': 'store',
       'default': os.environ.get('OUTPUT_NETWORK_STATE')}),
      (('-t', '--target'),
-      {'help': ('target filesystem root to add swap file to. '
+      {'help': ('target filesystem root to configure networking to. '
                 'default is env["TARGET_MOUNT_POINT"]'),
        'metavar': 'TARGET', 'action': 'store',
        'default': os.environ.get('TARGET_MOUNT_POINT')}),

=== added file 'curtin/commands/apt_config.py'
--- curtin/commands/apt_config.py	1970-01-01 00:00:00 +0000
+++ curtin/commands/apt_config.py	2016-10-03 18:55:20 +0000
@@ -0,0 +1,668 @@
+#   Copyright (C) 2016 Canonical Ltd.
+#
+#   Author: Christian Ehrhardt <christian.ehrhardt@canonical.com>
+#
+#   Curtin is free software: you can redistribute it and/or modify it under
+#   the terms of the GNU Affero General Public License as published by the
+#   Free Software Foundation, either version 3 of the License, or (at your
+#   option) any later version.
+#
+#   Curtin is distributed in the hope that it will be useful, but WITHOUT ANY
+#   WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+#   FOR A PARTICULAR PURPOSE.  See the GNU Affero General Public License for
+#   more details.
+#
+#   You should have received a copy of the GNU Affero General Public License
+#   along with Curtin.  If not, see <http://www.gnu.org/licenses/>.
+"""
+apt.py
+Handle the setup of apt related tasks like proxies, mirrors, repositories.
+"""
+
+import argparse
+import glob
+import os
+import re
+import sys
+import yaml
+
+from curtin.log import LOG
+from curtin import (config, util, gpg)
+
+from . import populate_one_subcmd
+
+# this will match 'XXX:YYY' (ie, 'cloud-archive:foo' or 'ppa:bar')
+ADD_APT_REPO_MATCH = r"^[\w-]+:\w"
+
+# place where apt stores cached repository data
+APT_LISTS = "/var/lib/apt/lists"
+
+# Files to store proxy information
+APT_CONFIG_FN = "/etc/apt/apt.conf.d/94curtin-config"
+APT_PROXY_FN = "/etc/apt/apt.conf.d/90curtin-aptproxy"
+
+# Default keyserver to use
+DEFAULT_KEYSERVER = "keyserver.ubuntu.com"
+
+# Default archive mirrors
+PRIMARY_ARCH_MIRRORS = {"PRIMARY": "http://archive.ubuntu.com/ubuntu/",
+                        "SECURITY": "http://security.ubuntu.com/ubuntu/"}
+PORTS_MIRRORS = {"PRIMARY": "http://ports.ubuntu.com/ubuntu-ports",
+                 "SECURITY": "http://ports.ubuntu.com/ubuntu-ports"}
+PRIMARY_ARCHES = ['amd64', 'i386']
+PORTS_ARCHES = ['s390x', 'arm64', 'armhf', 'powerpc', 'ppc64el']
+
+
+def get_default_mirrors(arch=None):
+    """returns the default mirrors for the target. These depend on the
+       architecture, for more see:
+       https://wiki.ubuntu.com/UbuntuDevelopment/PackageArchive#Ports"""
+    if arch is None:
+        arch = util.get_architecture()
+    if arch in PRIMARY_ARCHES:
+        return PRIMARY_ARCH_MIRRORS.copy()
+    if arch in PORTS_ARCHES:
+        return PORTS_MIRRORS.copy()
+    raise ValueError("No default mirror known for arch %s" % arch)
+
+
+def handle_apt(cfg, target=None):
+    """ handle_apt
+        process the config for apt_config. This can be called from
+        curthooks if a global apt config was provided or via the "apt"
+        standalone command.
+    """
+    release = util.lsb_release(target=target)['codename']
+    arch = util.get_architecture(target)
+    mirrors = find_apt_mirror_info(cfg, arch)
+    LOG.debug("Apt Mirror info: %s", mirrors)
+
+    apply_debconf_selections(cfg, target)
+
+    if not config.value_as_boolean(cfg.get('preserve_sources_list',
+                                           True)):
+        generate_sources_list(cfg, release, mirrors, target)
+        rename_apt_lists(mirrors, target)
+
+    try:
+        apply_apt_proxy_config(cfg, target + APT_PROXY_FN,
+                               target + APT_CONFIG_FN)
+    except (IOError, OSError):
+        LOG.exception("Failed to apply proxy or apt config info:")
+
+    # Process 'apt_source -> sources {dict}'
+    if 'sources' in cfg:
+        params = mirrors
+        params['RELEASE'] = release
+        params['MIRROR'] = mirrors["MIRROR"]
+
+        matcher = None
+        matchcfg = cfg.get('add_apt_repo_match', ADD_APT_REPO_MATCH)
+        if matchcfg:
+            matcher = re.compile(matchcfg).search
+
+        add_apt_sources(cfg['sources'], target,
+                        template_params=params, aa_repo_match=matcher)
+
+
+def debconf_set_selections(selections, target=None):
+    util.subp(['debconf-set-selections'], data=selections, target=target,
+              capture=True)
+
+
+def dpkg_reconfigure(packages, target=None):
+    # For any packages that are already installed, but have preseed data
+    # we populate the debconf database, but the filesystem configuration
+    # would be preferred on a subsequent dpkg-reconfigure.
+    # so, what we have to do is "know" information about certain packages
+    # to unconfigure them.
+    unhandled = []
+    to_config = []
+    for pkg in packages:
+        if pkg in CONFIG_CLEANERS:
+            LOG.debug("unconfiguring %s", pkg)
+            CONFIG_CLEANERS[pkg](target)
+            to_config.append(pkg)
+        else:
+            unhandled.append(pkg)
+
+    if len(unhandled):
+        LOG.warn("The following packages were installed and preseeded, "
+                 "but cannot be unconfigured: %s", unhandled)
+
+    if len(to_config):
+        util.subp(['dpkg-reconfigure', '--frontend=noninteractive'] +
+                  list(to_config), data=None, target=target, capture=True)
+
+
+def apply_debconf_selections(cfg, target=None):
+    """apply_debconf_selections - push content to debconf"""
+    # debconf_selections:
+    #  set1: |
+    #   cloud-init cloud-init/datasources multiselect MAAS
+    #  set2: pkg pkg/value string bar
+    selsets = cfg.get('debconf_selections')
+    if not selsets:
+        LOG.debug("debconf_selections was not set in config")
+        return
+
+    selections = '\n'.join(
+        [selsets[key] for key in sorted(selsets.keys())])
+    debconf_set_selections(selections.encode() + b"\n", target=target)
+
+    # get a complete list of packages listed in input
+    pkgs_cfgd = set()
+    for key, content in selsets.items():
+        for line in content.splitlines():
+            if line.startswith("#"):
+                continue
+            pkg = re.sub(r"[:\s].*", "", line)
+            pkgs_cfgd.add(pkg)
+
+    pkgs_installed = util.get_installed_packages(target)
+
+    LOG.debug("pkgs_cfgd: %s", pkgs_cfgd)
+    LOG.debug("pkgs_installed: %s", pkgs_installed)
+    need_reconfig = pkgs_cfgd.intersection(pkgs_installed)
+
+    if len(need_reconfig) == 0:
+        LOG.debug("no need for reconfig")
+        return
+
+    dpkg_reconfigure(need_reconfig, target=target)
+
+
+def clean_cloud_init(target):
+    """clean out any local cloud-init config"""
+    flist = glob.glob(
+        util.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*"))
+
+    LOG.debug("cleaning cloud-init config from: %s", flist)
+    for dpkg_cfg in flist:
+        os.unlink(dpkg_cfg)
+
+
+def mirrorurl_to_apt_fileprefix(mirror):
+    """ mirrorurl_to_apt_fileprefix
+        Convert a mirror url to the file prefix used by apt on disk to
+        store cache information for that mirror.
+        To do so do:
+        - take off ???://
+        - drop tailing /
+        - convert in string / to _
+    """
+    string = mirror
+    if string.endswith("/"):
+        string = string[0:-1]
+    pos = string.find("://")
+    if pos >= 0:
+        string = string[pos + 3:]
+    string = string.replace("/", "_")
+    return string
+
+
+def rename_apt_lists(new_mirrors, target=None):
+    """rename_apt_lists - rename apt lists to preserve old cache data"""
+    default_mirrors = get_default_mirrors(util.get_architecture(target))
+
+    pre = util.target_path(target, APT_LISTS)
+    for (name, omirror) in default_mirrors.items():
+        nmirror = new_mirrors.get(name)
+        if not nmirror:
+            continue
+
+        oprefix = pre + os.path.sep + mirrorurl_to_apt_fileprefix(omirror)
+        nprefix = pre + os.path.sep + mirrorurl_to_apt_fileprefix(nmirror)
+        if oprefix == nprefix:
+            continue
+        olen = len(oprefix)
+        for filename in glob.glob("%s_*" % oprefix):
+            newname = "%s%s" % (nprefix, filename[olen:])
+            LOG.debug("Renaming apt list %s to %s", filename, newname)
+            try:
+                os.rename(filename, newname)
+            except OSError:
+                # since this is a best effort task, warn with but don't fail
+                LOG.warn("Failed to rename apt list:", exc_info=True)
+
+
+def mirror_to_placeholder(tmpl, mirror, placeholder):
+    """ mirror_to_placeholder
+        replace the specified mirror in a template with a placeholder string
+        Checks for existance of the expected mirror and warns if not found
+    """
+    if mirror not in tmpl:
+        LOG.warn("Expected mirror '%s' not found in: %s", mirror, tmpl)
+    return tmpl.replace(mirror, placeholder)
+
+
+def map_known_suites(suite):
+    """there are a few default names which will be auto-extended.
+       This comes at the inability to use those names literally as suites,
+       but on the other hand increases readability of the cfg quite a lot"""
+    mapping = {'updates': '$RELEASE-updates',
+               'backports': '$RELEASE-backports',
+               'security': '$RELEASE-security',
+               'proposed': '$RELEASE-proposed',
+               'release': '$RELEASE'}
+    try:
+        retsuite = mapping[suite]
+    except KeyError:
+        retsuite = suite
+    return retsuite
+
+
+def disable_suites(disabled, src, release):
+    """reads the config for suites to be disabled and removes those
+       from the template"""
+    if not disabled:
+        return src
+
+    retsrc = src
+    for suite in disabled:
+        suite = map_known_suites(suite)
+        releasesuite = util.render_string(suite, {'RELEASE': release})
+        LOG.debug("Disabling suite %s as %s", suite, releasesuite)
+
+        newsrc = ""
+        for line in retsrc.splitlines(True):
+            if line.startswith("#"):
+                newsrc += line
+                continue
+
+            # sources.list allow options in cols[1] which can have spaces
+            # so the actual suite can be [2] or later. example:
+            # deb [ arch=amd64,armel k=v ] http://example.com/debian
+            cols = line.split()
+            if len(cols) > 1:
+                pcol = 2
+                if cols[1].startswith("["):
+                    for col in cols[1:]:
+                        pcol += 1
+                        if col.endswith("]"):
+                            break
+
+                if cols[pcol] == releasesuite:
+                    line = '# suite disabled by curtin: %s' % line
+            newsrc += line
+        retsrc = newsrc
+
+    return retsrc
+
+
+def generate_sources_list(cfg, release, mirrors, target=None):
+    """ generate_sources_list
+        create a source.list file based on a custom or default template
+        by replacing mirrors and release in the template
+    """
+    default_mirrors = get_default_mirrors(util.get_architecture(target))
+    aptsrc = "/etc/apt/sources.list"
+    params = {'RELEASE': release}
+    for k in mirrors:
+        params[k] = mirrors[k]
+
+    tmpl = cfg.get('sources_list', None)
+    if tmpl is None:
+        LOG.info("No custom template provided, fall back to modify"
+                 "mirrors in %s on the target system", aptsrc)
+        tmpl = util.load_file(util.target_path(target, aptsrc))
+        # Strategy if no custom template was provided:
+        # - Only replacing mirrors
+        # - no reason to replace "release" as it is from target anyway
+        # - The less we depend upon, the more stable this is against changes
+        # - warn if expected original content wasn't found
+        tmpl = mirror_to_placeholder(tmpl, default_mirrors['PRIMARY'],
+                                     "$MIRROR")
+        tmpl = mirror_to_placeholder(tmpl, default_mirrors['SECURITY'],
+                                     "$SECURITY")
+
+    orig = util.target_path(target, aptsrc)
+    if os.path.exists(orig):
+        os.rename(orig, orig + ".curtin.old")
+
+    rendered = util.render_string(tmpl, params)
+    disabled = disable_suites(cfg.get('disable_suites'), rendered, release)
+    util.write_file(util.target_path(target, aptsrc), disabled, mode=0o644)
+
+    # protect the just generated sources.list from cloud-init
+    cloudfile = "/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg"
+    # this has to work with older cloud-init as well, so use old key
+    cloudconf = yaml.dump({'apt_preserve_sources_list': True}, indent=1)
+    try:
+        util.write_file(util.target_path(target, cloudfile),
+                        cloudconf, mode=0o644)
+    except IOError:
+        LOG.exception("Failed to protect source.list from cloud-init in (%s)",
+                      util.target_path(target, cloudfile))
+        raise
+
+
+def add_apt_key_raw(key, target=None):
+    """
+    actual adding of a key as defined in key argument
+    to the system
+    """
+    LOG.debug("Adding key:\n'%s'", key)
+    try:
+        util.subp(['apt-key', 'add', '-'], data=key.encode(), target=target)
+    except util.ProcessExecutionError:
+        LOG.exception("failed to add apt GPG Key to apt keyring")
+        raise
+
+
+def add_apt_key(ent, target=None):
+    """
+    Add key to the system as defined in ent (if any).
+    Supports raw keys or keyid's
+    The latter will as a first step fetched to get the raw key
+    """
+    if 'keyid' in ent and 'key' not in ent:
+        keyserver = DEFAULT_KEYSERVER
+        if 'keyserver' in ent:
+            keyserver = ent['keyserver']
+
+        ent['key'] = gpg.getkeybyid(ent['keyid'], keyserver)
+
+    if 'key' in ent:
+        add_apt_key_raw(ent['key'], target)
+
+
+def add_apt_sources(srcdict, target=None, template_params=None,
+                    aa_repo_match=None):
+    """
+    add entries in /etc/apt/sources.list.d for each abbreviated
+    sources.list entry in 'srcdict'.  When rendering template, also
+    include the values in dictionary searchList
+    """
+    if template_params is None:
+        template_params = {}
+
+    if aa_repo_match is None:
+        raise ValueError('did not get a valid repo matcher')
+
+    if not isinstance(srcdict, dict):
+        raise TypeError('unknown apt format: %s' % (srcdict))
+
+    for filename in srcdict:
+        ent = srcdict[filename]
+        if 'filename' not in ent:
+            ent['filename'] = filename
+
+        add_apt_key(ent, target)
+
+        if 'source' not in ent:
+            continue
+        source = ent['source']
+        source = util.render_string(source, template_params)
+
+        if not ent['filename'].startswith("/"):
+            ent['filename'] = os.path.join("/etc/apt/sources.list.d/",
+                                           ent['filename'])
+        if not ent['filename'].endswith(".list"):
+            ent['filename'] += ".list"
+
+        if aa_repo_match(source):
+            try:
+                with util.ChrootableTarget(
+                        target, sys_resolvconf=True) as in_chroot:
+                    in_chroot.subp(["add-apt-repository", source])
+            except util.ProcessExecutionError:
+                LOG.exception("add-apt-repository failed.")
+                raise
+            continue
+
+        sourcefn = util.target_path(target, ent['filename'])
+        try:
+            contents = "%s\n" % (source)
+            util.write_file(sourcefn, contents, omode="a")
+        except IOError as detail:
+            LOG.exception("failed write to file %s: %s", sourcefn, detail)
+            raise
+
+    util.apt_update(target=target, force=True,
+                    comment="apt-source changed config")
+
+    return
+
+
+def search_for_mirror(candidates):
+    """
+    Search through a list of mirror urls for one that works
+    This needs to return quickly.
+    """
+    if candidates is None:
+        return None
+
+    LOG.debug("search for mirror in candidates: '%s'", candidates)
+    for cand in candidates:
+        try:
+            if util.is_resolvable_url(cand):
+                LOG.debug("found working mirror: '%s'", cand)
+                return cand
+        except Exception:
+            pass
+    return None
+
+
+def update_mirror_info(pmirror, smirror, arch):
+    """sets security mirror to primary if not defined.
+       returns defaults if no mirrors are defined"""
+    if pmirror is not None:
+        if smirror is None:
+            smirror = pmirror
+        return {'PRIMARY': pmirror,
+                'SECURITY': smirror}
+    return get_default_mirrors(arch)
+
+
+def get_arch_mirrorconfig(cfg, mirrortype, arch):
+    """out of a list of potential mirror configurations select
+       and return the one matching the architecture (or default)"""
+    # select the mirror specification (if-any)
+    mirror_cfg_list = cfg.get(mirrortype, None)
+    if mirror_cfg_list is None:
+        return None
+
+    # select the specification matching the target arch
+    default = None
+    for mirror_cfg_elem in mirror_cfg_list:
+        arches = mirror_cfg_elem.get("arches")
+        if arch in arches:
+            return mirror_cfg_elem
+        if "default" in arches:
+            default = mirror_cfg_elem
+    return default
+
+
+def get_mirror(cfg, mirrortype, arch):
+    """pass the three potential stages of mirror specification
+       returns None is neither of them found anything otherwise the first
+       hit is returned"""
+    mcfg = get_arch_mirrorconfig(cfg, mirrortype, arch)
+    if mcfg is None:
+        return None
+
+    # directly specified
+    mirror = mcfg.get("uri", None)
+
+    # fallback to search if specified
+    if mirror is None:
+        # list of mirrors to try to resolve
+        mirror = search_for_mirror(mcfg.get("search", None))
+
+    return mirror
+
+
+def find_apt_mirror_info(cfg, arch=None):
+    """find_apt_mirror_info
+       find an apt_mirror given the cfg provided.
+       It can check for separate config of primary and security mirrors
+       If only primary is given security is assumed to be equal to primary
+       If the generic apt_mirror is given that is defining for both
+    """
+
+    if arch is None:
+        arch = util.get_architecture()
+        LOG.debug("got arch for mirror selection: %s", arch)
+    pmirror = get_mirror(cfg, "primary", arch)
+    LOG.debug("got primary mirror: %s", pmirror)
+    smirror = get_mirror(cfg, "security", arch)
+    LOG.debug("got security mirror: %s", smirror)
+
+    # Note: curtin has no cloud-datasource fallback
+
+    mirror_info = update_mirror_info(pmirror, smirror, arch)
+
+    # less complex replacements use only MIRROR, derive from primary
+    mirror_info["MIRROR"] = mirror_info["PRIMARY"]
+
+    return mirror_info
+
+
+def apply_apt_proxy_config(cfg, proxy_fname, config_fname):
+    """apply_apt_proxy_config
+       Applies any apt*proxy config from if specified
+    """
+    # Set up any apt proxy
+    cfgs = (('proxy', 'Acquire::http::Proxy "%s";'),
+            ('http_proxy', 'Acquire::http::Proxy "%s";'),
+            ('ftp_proxy', 'Acquire::ftp::Proxy "%s";'),
+            ('https_proxy', 'Acquire::https::Proxy "%s";'))
+
+    proxies = [fmt % cfg.get(name) for (name, fmt) in cfgs if cfg.get(name)]
+    if len(proxies):
+        LOG.debug("write apt proxy info to %s", proxy_fname)
+        util.write_file(proxy_fname, '\n'.join(proxies) + '\n')
+    elif os.path.isfile(proxy_fname):
+        util.del_file(proxy_fname)
+        LOG.debug("no apt proxy configured, removed %s", proxy_fname)
+
+    if cfg.get('conf', None):
+        LOG.debug("write apt config info to %s", config_fname)
+        util.write_file(config_fname, cfg.get('conf'))
+    elif os.path.isfile(config_fname):
+        util.del_file(config_fname)
+        LOG.debug("no apt config configured, removed %s", config_fname)
+
+
+def apt_command(args):
+    """ Main entry point for curtin apt-config standalone command
+        This does not read the global config as handled by curthooks, but
+        instead one can specify a different "target" and a new cfg via --config
+        """
+    cfg = config.load_command_config(args, {})
+
+    if args.target is not None:
+        target = args.target
+    else:
+        state = util.load_command_environment()
+        target = state['target']
+
+    if target is None:
+        sys.stderr.write("Unable to find target.  "
+                         "Use --target or set TARGET_MOUNT_POINT\n")
+        sys.exit(2)
+
+    apt_cfg = cfg.get("apt")
+    # if no apt config section is available, do nothing
+    if apt_cfg is not None:
+        LOG.debug("Handling apt to target %s with config %s",
+                  target, apt_cfg)
+        try:
+            with util.ChrootableTarget(target, sys_resolvconf=True):
+                handle_apt(apt_cfg, target)
+        except (RuntimeError, TypeError, ValueError, IOError):
+            LOG.exception("Failed to configure apt features '%s'", apt_cfg)
+            sys.exit(1)
+    else:
+        LOG.info("No apt config provided, skipping")
+
+    sys.exit(0)
+
+
+def translate_old_apt_features(cfg):
+    """translate the few old apt related features into the new config format"""
+    predef_apt_cfg = cfg.get("apt")
+    if predef_apt_cfg is None:
+        cfg['apt'] = {}
+        predef_apt_cfg = cfg.get("apt")
+
+    if cfg.get('apt_proxy') is not None:
+        if predef_apt_cfg.get('proxy') is not None:
+            msg = ("Error in apt_proxy configuration: "
+                   "old and new format of apt features "
+                   "are mutually exclusive")
+            LOG.error(msg)
+            raise ValueError(msg)
+
+        cfg['apt']['proxy'] = cfg.get('apt_proxy')
+        LOG.debug("Transferred %s into new format: %s", cfg.get('apt_proxy'),
+                  cfg.get('apte'))
+        del cfg['apt_proxy']
+
+    if cfg.get('apt_mirrors') is not None:
+        if predef_apt_cfg.get('mirrors') is not None:
+            msg = ("Error in apt_mirror configuration: "
+                   "old and new format of apt features "
+                   "are mutually exclusive")
+            LOG.error(msg)
+            raise ValueError(msg)
+
+        old = cfg.get('apt_mirrors')
+        cfg['apt']['primary'] = [{"arches": ["default"],
+                                  "uri": old.get('ubuntu_archive')}]
+        cfg['apt']['security'] = [{"arches": ["default"],
+                                   "uri": old.get('ubuntu_security')}]
+        LOG.debug("Transferred %s into new format: %s", cfg.get('apt_mirror'),
+                  cfg.get('apt'))
+        del cfg['apt_mirrors']
+        # to work this also needs to disable the default protection
+        psl = predef_apt_cfg.get('preserve_sources_list')
+        if psl is not None:
+            if config.value_as_boolean(psl) is True:
+                msg = ("Error in apt_mirror configuration: "
+                       "apt_mirrors and preserve_sources_list: True "
+                       "are mutually exclusive")
+                LOG.error(msg)
+                raise ValueError(msg)
+        cfg['apt']['preserve_sources_list'] = False
+
+    if cfg.get('debconf_selections') is not None:
+        if predef_apt_cfg.get('debconf_selections') is not None:
+            msg = ("Error in debconf_selections configuration: "
+                   "old and new format of apt features "
+                   "are mutually exclusive")
+            LOG.error(msg)
+            raise ValueError(msg)
+
+        selsets = cfg.get('debconf_selections')
+        cfg['apt']['debconf_selections'] = selsets
+        LOG.info("Transferred %s into new format: %s",
+                 cfg.get('debconf_selections'),
+                 cfg.get('apt'))
+        del cfg['debconf_selections']
+
+    return cfg
+
+
+CMD_ARGUMENTS = (
+    ((('-c', '--config'),
+      {'help': 'read configuration from cfg', 'action': util.MergedCmdAppend,
+       'metavar': 'FILE', 'type': argparse.FileType("rb"),
+       'dest': 'cfgopts', 'default': []}),
+     (('-t', '--target'),
+      {'help': 'chroot to target. default is env[TARGET_MOUNT_POINT]',
+       'action': 'store', 'metavar': 'TARGET',
+       'default': os.environ.get('TARGET_MOUNT_POINT')}),)
+)
+
+
+def POPULATE_SUBCMD(parser):
+    """Populate subcommand option parsing for apt-config"""
+    populate_one_subcmd(parser, CMD_ARGUMENTS, apt_command)
+
+CONFIG_CLEANERS = {
+    'cloud-init': clean_cloud_init,
+}
+
+# vi: ts=4 expandtab syntax=python

=== added file 'curtin/commands/block_info.py'
--- curtin/commands/block_info.py	1970-01-01 00:00:00 +0000
+++ curtin/commands/block_info.py	2016-10-03 18:55:20 +0000
@@ -0,0 +1,75 @@
+#   Copyright (C) 2016 Canonical Ltd.
+#
+#   Author: Wesley Wiedenmeier <wesley.wiedenmeier@canonical.com>
+#
+#   Curtin is free software: you can redistribute it and/or modify it under
+#   the terms of the GNU Affero General Public License as published by the
+#   Free Software Foundation, either version 3 of the License, or (at your
+#   option) any later version.
+#
+#   Curtin is distributed in the hope that it will be useful, but WITHOUT ANY
+#   WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+#   FOR A PARTICULAR PURPOSE.  See the GNU Affero General Public License for
+#   more details.
+#
+#   You should have received a copy of the GNU Affero General Public License
+#   along with Curtin.  If not, see <http://www.gnu.org/licenses/>.
+
+import os
+from . import populate_one_subcmd
+from curtin import (block, util)
+
+
+def block_info_main(args):
+    """get information about block devices, similar to lsblk"""
+    if not args.devices:
+        raise ValueError('devices to scan must be specified')
+    if not all(block.is_block_device(d) for d in args.devices):
+        raise ValueError('invalid device(s)')
+
+    def add_size_to_holders_tree(tree):
+        """add size information to generated holders trees"""
+        size_file = os.path.join(tree['device'], 'size')
+        # size file is always represented in 512 byte sectors even if
+        # underlying disk uses a larger logical_block_size
+        size = ((512 * int(util.load_file(size_file)))
+                if os.path.exists(size_file) else None)
+        tree['size'] = util.bytes2human(size) if args.human else str(size)
+        for holder in tree['holders']:
+            add_size_to_holders_tree(holder)
+        return tree
+
+    def format_name(tree):
+        """format information for human readable display"""
+        res = {
+            'name': ' - '.join((tree['name'], tree['dev_type'], tree['size'])),
+            'holders': []
+        }
+        for holder in tree['holders']:
+            res['holders'].append(format_name(holder))
+        return res
+
+    trees = [add_size_to_holders_tree(t) for t in
+             [block.clear_holders.gen_holders_tree(d) for d in args.devices]]
+
+    print(util.json_dumps(trees) if args.json else
+          '\n'.join(block.clear_holders.format_holders_tree(t) for t in
+                    [format_name(tree) for tree in trees]))
+
+    return 0
+
+
+CMD_ARGUMENTS = (
+    ('devices',
+     {'help': 'devices to get info for', 'default': [], 'nargs': '+'}),
+    ('--human',
+     {'help': 'output size in human readable format', 'default': False,
+      'action': 'store_true'}),
+    (('-j', '--json'),
+     {'help': 'output data in json format', 'default': False,
+      'action': 'store_true'}),
+)
+
+
+def POPULATE_SUBCMD(parser):
+    populate_one_subcmd(parser, CMD_ARGUMENTS, block_info_main)

=== modified file 'curtin/commands/block_meta.py'
--- curtin/commands/block_meta.py	2016-10-03 18:00:41 +0000
+++ curtin/commands/block_meta.py	2016-10-03 18:55:20 +0000
@@ -17,9 +17,8 @@
 
 from collections import OrderedDict
 from curtin import (block, config, util)
-from curtin.block import mdadm
+from curtin.block import (mdadm, mkfs, clear_holders, lvm)
 from curtin.log import LOG
-from curtin.block import mkfs
 from curtin.reporter import events
 
 from . import populate_one_subcmd
@@ -28,7 +27,7 @@
 import glob
 import os
 import platform
-import re
+import string
 import sys
 import tempfile
 import time
@@ -129,128 +128,6 @@
     return "mbr"
 
 
-def block_find_sysfs_path(devname):
-    # return the path in sys for device named devname
-    # support either short name ('sda') or full path /dev/sda
-    #  sda -> /sys/class/block/sda
-    #  sda1 -> /sys/class/block/sda/sda1
-    if not devname:
-        raise ValueError("empty devname provided to find_sysfs_path")
-
-    sys_class_block = '/sys/class/block/'
-    basename = os.path.basename(devname)
-    # try without parent blockdevice, then prepend parent
-    paths = [
-        os.path.join(sys_class_block, basename),
-        os.path.join(sys_class_block,
-                     re.split('[\d+]', basename)[0], basename),
-    ]
-
-    # find path to devname directory in sysfs
-    devname_sysfs = None
-    for path in paths:
-        if os.path.exists(path):
-            devname_sysfs = path
-
-    if devname_sysfs is None:
-        err = ('No sysfs path to device:'
-               ' {}'.format(devname_sysfs))
-        LOG.error(err)
-        raise ValueError(err)
-
-    return devname_sysfs
-
-
-def get_holders(devname):
-    # Look up any block device holders.
-    # Handle devices and partitions as devnames (vdb, md0, vdb7)
-    devname_sysfs = block_find_sysfs_path(devname)
-    if devname_sysfs:
-        holders = os.listdir(os.path.join(devname_sysfs, 'holders'))
-        LOG.debug("devname '%s' had holders: %s", devname, ','.join(holders))
-        return holders
-
-    LOG.debug('get_holders: did not find sysfs path for %s', devname)
-    return []
-
-
-def clear_holders(sys_block_path):
-    holders = os.listdir(os.path.join(sys_block_path, "holders"))
-    LOG.info("clear_holders running on '%s', with holders '%s'" %
-             (sys_block_path, holders))
-    for holder in holders:
-        # get path to holder in /sys/block, then clear it
-        try:
-            holder_realpath = os.path.realpath(
-                os.path.join(sys_block_path, "holders", holder))
-            clear_holders(holder_realpath)
-        except IOError as e:
-            # something might have already caused the holder to go away
-            if util.is_file_not_found_exc(e):
-                pass
-            pass
-
-    # detect what type of holder is using this volume and shut it down, need to
-    # find more robust name of doing detection
-    if "bcache" in sys_block_path:
-        # bcache device
-        part_devs = []
-        for part_dev in glob.glob(os.path.join(sys_block_path,
-                                               "slaves", "*", "dev")):
-            with open(part_dev, "r") as fp:
-                part_dev_id = fp.read().rstrip()
-                part_devs.append(
-                    os.path.split(os.path.realpath(os.path.join("/dev/block",
-                                  part_dev_id)))[-1])
-        for cache_dev in glob.glob("/sys/fs/bcache/*/bdev*"):
-            for part_dev in part_devs:
-                if part_dev in os.path.realpath(cache_dev):
-                    # This is our bcache device, stop it, wait for udev to
-                    # settle
-                    with open(os.path.join(os.path.split(cache_dev)[0],
-                              "stop"), "w") as fp:
-                        LOG.info("stopping: %s" % fp)
-                        fp.write("1")
-                        udevadm_settle()
-                    break
-        for part_dev in part_devs:
-            block.wipe_volume(os.path.join("/dev", part_dev),
-                              mode="superblock")
-
-    if os.path.exists(os.path.join(sys_block_path, "bcache")):
-        # bcache device that isn't running, if it were, we would have found it
-        # when we looked for holders
-        try:
-            with open(os.path.join(sys_block_path, "bcache", "set", "stop"),
-                      "w") as fp:
-                LOG.info("stopping: %s" % fp)
-                fp.write("1")
-        except IOError as e:
-            if not util.is_file_not_found_exc(e):
-                raise e
-            with open(os.path.join(sys_block_path, "bcache", "stop"),
-                      "w") as fp:
-                LOG.info("stopping: %s" % fp)
-                fp.write("1")
-        udevadm_settle()
-
-    if os.path.exists(os.path.join(sys_block_path, "md")):
-        # md device
-        block_dev = os.path.join("/dev/", os.path.split(sys_block_path)[-1])
-        # if these fail its okay, the array might not be assembled and thats
-        # fine
-        mdadm.mdadm_stop(block_dev)
-        mdadm.mdadm_remove(block_dev)
-
-    elif os.path.exists(os.path.join(sys_block_path, "dm")):
-        # Shut down any volgroups
-        with open(os.path.join(sys_block_path, "dm", "name"), "r") as fp:
-            name = fp.read().split('-')
-        util.subp(["lvremove", "--force", name[0].rstrip(), name[1].rstrip()],
-                  rcs=[0, 5])
-        util.subp(["vgremove", name[0].rstrip()], rcs=[0, 5, 6])
-
-
 def devsync(devpath):
     LOG.debug('devsync for %s', devpath)
     util.subp(['partprobe', devpath], rcs=[0, 1])
@@ -265,14 +142,6 @@
     raise OSError('Failed to find device at path: %s', devpath)
 
 
-def determine_partition_kname(disk_kname, partition_number):
-    for dev_type in ["nvme", "mmcblk"]:
-        if disk_kname.startswith(dev_type):
-            partition_number = "p%s" % partition_number
-            break
-    return "%s%s" % (disk_kname, partition_number)
-
-
 def determine_partition_number(partition_id, storage_config):
     vol = storage_config.get(partition_id)
     partnumber = vol.get('number')
@@ -304,6 +173,18 @@
     return partnumber
 
 
+def sanitize_dname(dname):
+    """
+    dnames should be sanitized before writing rule files, in case maas has
+    emitted a dname with a special character
+
+    only letters, numbers and '-' and '_' are permitted, as this will be
+    used for a device path. spaces are also not permitted
+    """
+    valid = string.digits + string.ascii_letters + '-_'
+    return ''.join(c if c in valid else '-' for c in dname)
+
+
 def make_dname(volume, storage_config):
     state = util.load_command_environment()
     rules_dir = os.path.join(state['scratch'], "rules.d")
@@ -321,7 +202,7 @@
     # we may not always be able to find a uniq identifier on devices with names
     if not ptuuid and vol.get('type') in ["disk", "partition"]:
         LOG.warning("Can't find a uuid for volume: {}. Skipping dname.".format(
-            dname))
+            volume))
         return
 
     rule = [
@@ -346,11 +227,24 @@
         volgroup_name = storage_config.get(vol.get('volgroup')).get('name')
         dname = "%s-%s" % (volgroup_name, dname)
         rule.append(compose_udev_equality("ENV{DM_NAME}", dname))
-    rule.append("SYMLINK+=\"disk/by-dname/%s\"" % dname)
+    else:
+        raise ValueError('cannot make dname for device with type: {}'
+                         .format(vol.get('type')))
+
+    # note: this sanitization is done here instead of for all name attributes
+    #       at the beginning of storage configuration, as some devices, such as
+    #       lvm devices may use the name attribute and may permit special chars
+    sanitized = sanitize_dname(dname)
+    if sanitized != dname:
+        LOG.warning(
+            "dname modified to remove invalid chars. old: '{}' new: '{}'"
+            .format(dname, sanitized))
+
+    rule.append("SYMLINK+=\"disk/by-dname/%s\"" % sanitized)
     LOG.debug("Writing dname udev rule '{}'".format(str(rule)))
     util.ensure_dir(rules_dir)
-    with open(os.path.join(rules_dir, volume), "w") as fp:
-        fp.write(', '.join(rule))
+    rule_file = os.path.join(rules_dir, '{}.rules'.format(sanitized))
+    util.write_file(rule_file, ', '.join(rule))
 
 
 def get_path_to_storage_volume(volume, storage_config):
@@ -368,9 +262,9 @@
         partnumber = determine_partition_number(vol.get('id'), storage_config)
         disk_block_path = get_path_to_storage_volume(vol.get('device'),
                                                      storage_config)
-        (base_path, disk_kname) = os.path.split(disk_block_path)
-        partition_kname = determine_partition_kname(disk_kname, partnumber)
-        volume_path = os.path.join(base_path, partition_kname)
+        disk_kname = block.path_to_kname(disk_block_path)
+        partition_kname = block.partition_kname(disk_kname, partnumber)
+        volume_path = block.kname_to_path(partition_kname)
         devsync_vol = os.path.join(disk_block_path)
 
     elif vol.get('type') == "disk":
@@ -419,13 +313,15 @@
         # block devs are in the slaves dir there. Then, those blockdevs can be
         # checked against the kname of the devs in the config for the desired
         # bcache device. This is not very elegant though
-        backing_device_kname = os.path.split(get_path_to_storage_volume(
-            vol.get('backing_device'), storage_config))[-1]
+        backing_device_path = get_path_to_storage_volume(
+            vol.get('backing_device'), storage_config)
+        backing_device_kname = block.path_to_kname(backing_device_path)
         sys_path = list(filter(lambda x: backing_device_kname in x,
                                glob.glob("/sys/block/bcache*/slaves/*")))[0]
         while "bcache" not in os.path.split(sys_path)[-1]:
             sys_path = os.path.split(sys_path)[0]
-        volume_path = os.path.join("/dev", os.path.split(sys_path)[-1])
+        bcache_kname = block.path_to_kname(sys_path)
+        volume_path = block.kname_to_path(bcache_kname)
         LOG.debug('got bcache volume path {}'.format(volume_path))
 
     else:
@@ -442,62 +338,35 @@
 
 
 def disk_handler(info, storage_config):
+    _dos_names = ['dos', 'msdos']
     ptable = info.get('ptable')
-
     disk = get_path_to_storage_volume(info.get('id'), storage_config)
 
-    # Handle preserve flag
-    if info.get('preserve'):
-        if not ptable:
-            # Don't need to check state, return
-            return
-
-        # Check state of current ptable
-        try:
-            (out, _err) = util.subp(["blkid", "-o", "export", disk],
-                                    capture=True)
-        except util.ProcessExecutionError:
-            raise ValueError("disk '%s' has no readable partition table or \
-                cannot be accessed, but preserve is set to true, so cannot \
-                continue")
-        current_ptable = list(filter(lambda x: "PTTYPE" in x,
-                                     out.splitlines()))[0].split("=")[-1]
-        if current_ptable == "dos" and ptable != "msdos" or \
-                current_ptable == "gpt" and ptable != "gpt":
-            raise ValueError("disk '%s' does not have correct \
-                partition table, but preserve is set to true, so not \
-                creating table, so not creating table." % info.get('id'))
-        LOG.info("disk '%s' marked to be preserved, so keeping partition \
-                 table")
-        return
-
-    # Wipe the disk
-    if info.get('wipe') and info.get('wipe') != "none":
-        # The disk has a lable, clear all partitions
-        mdadm.mdadm_assemble(scan=True)
-        disk_kname = os.path.split(disk)[-1]
-        syspath_partitions = list(
-            os.path.split(prt)[0] for prt in
-            glob.glob("/sys/block/%s/*/partition" % disk_kname))
-        for partition in syspath_partitions:
-            clear_holders(partition)
-            with open(os.path.join(partition, "dev"), "r") as fp:
-                block_no = fp.read().rstrip()
-            partition_path = os.path.realpath(
-                os.path.join("/dev/block", block_no))
-            block.wipe_volume(partition_path, mode=info.get('wipe'))
-
-        clear_holders("/sys/block/%s" % disk_kname)
-        block.wipe_volume(disk, mode=info.get('wipe'))
-
-    # Create partition table on disk
-    if info.get('ptable'):
-        LOG.info("labeling device: '%s' with '%s' partition table", disk,
-                 ptable)
-        if ptable == "gpt":
-            util.subp(["sgdisk", "--clear", disk])
-        elif ptable == "msdos":
-            util.subp(["parted", disk, "--script", "mklabel", "msdos"])
+    if config.value_as_boolean(info.get('preserve')):
+        # Handle preserve flag, verifying if ptable specified in config
+        if config.value_as_boolean(ptable):
+            current_ptable = block.get_part_table_type(disk)
+            if not ((ptable in _dos_names and current_ptable in _dos_names) or
+                    (ptable == 'gpt' and current_ptable == 'gpt')):
+                raise ValueError(
+                    "disk '%s' does not have correct partition table or "
+                    "cannot be read, but preserve is set to true. "
+                    "cannot continue installation." % info.get('id'))
+        LOG.info("disk '%s' marked to be preserved, so keeping partition "
+                 "table" % disk)
+    else:
+        # wipe the disk and create the partition table if instructed to do so
+        if config.value_as_boolean(info.get('wipe')):
+            block.wipe_volume(disk, mode=info.get('wipe'))
+        if config.value_as_boolean(ptable):
+            LOG.info("labeling device: '%s' with '%s' partition table", disk,
+                     ptable)
+            if ptable == "gpt":
+                util.subp(["sgdisk", "--clear", disk])
+            elif ptable in _dos_names:
+                util.subp(["parted", disk, "--script", "mklabel", "msdos"])
+            else:
+                raise ValueError('invalid partition table type: %s', ptable)
 
     # Make the name if needed
     if info.get('name'):
@@ -542,13 +411,12 @@
 
     disk = get_path_to_storage_volume(device, storage_config)
     partnumber = determine_partition_number(info.get('id'), storage_config)
-
-    disk_kname = os.path.split(
-        get_path_to_storage_volume(device, storage_config))[-1]
+    disk_kname = block.path_to_kname(disk)
+    disk_sysfs_path = block.sys_block_path(disk)
     # consider the disks logical sector size when calculating sectors
     try:
-        prefix = "/sys/block/%s/queue/" % disk_kname
-        with open(prefix + "logical_block_size", "r") as f:
+        lbs_path = os.path.join(disk_sysfs_path, 'queue', 'logical_block_size')
+        with open(lbs_path, 'r') as f:
             l = f.readline()
             logical_block_size_bytes = int(l)
     except:
@@ -566,17 +434,14 @@
                     extended_part_no = determine_partition_number(
                         key, storage_config)
                     break
-            partition_kname = determine_partition_kname(
-                disk_kname, extended_part_no)
-            previous_partition = "/sys/block/%s/%s/" % \
-                (disk_kname, partition_kname)
+            pnum = extended_part_no
         else:
             pnum = find_previous_partition(device, info['id'], storage_config)
-            LOG.debug("previous partition number for '%s' found to be '%s'",
-                      info.get('id'), pnum)
-            partition_kname = determine_partition_kname(disk_kname, pnum)
-            previous_partition = "/sys/block/%s/%s/" % \
-                (disk_kname, partition_kname)
+
+        LOG.debug("previous partition number for '%s' found to be '%s'",
+                  info.get('id'), pnum)
+        partition_kname = block.partition_kname(disk_kname, pnum)
+        previous_partition = os.path.join(disk_sysfs_path, partition_kname)
         LOG.debug("previous partition: {}".format(previous_partition))
         # XXX: sys/block/X/{size,start} is *ALWAYS* in 512b value
         previous_size = util.load_file(os.path.join(previous_partition,
@@ -629,9 +494,9 @@
         length_sectors = length_sectors + (logdisks * alignment_offset)
 
     # Handle preserve flag
-    if info.get('preserve'):
+    if config.value_as_boolean(info.get('preserve')):
         return
-    elif storage_config.get(device).get('preserve'):
+    elif config.value_as_boolean(storage_config.get(device).get('preserve')):
         raise NotImplementedError("Partition '%s' is not marked to be \
             preserved, but device '%s' is. At this time, preserving devices \
             but not also the partitions on the devices is not supported, \
@@ -674,11 +539,16 @@
     else:
         raise ValueError("parent partition has invalid partition table")
 
-    # Wipe the partition if told to do so
-    if info.get('wipe') and info.get('wipe') != "none":
-        block.wipe_volume(
-            get_path_to_storage_volume(info.get('id'), storage_config),
-            mode=info.get('wipe'))
+    # Wipe the partition if told to do so, do not wipe dos extended partitions
+    # as this may damage the extended partition table
+    if config.value_as_boolean(info.get('wipe')):
+        if info.get('flag') == "extended":
+            LOG.warn("extended partitions do not need wiping, so skipping: "
+                     "'%s'" % info.get('id'))
+        else:
+            block.wipe_volume(
+                get_path_to_storage_volume(info.get('id'), storage_config),
+                mode=info.get('wipe'))
     # Make the name if needed
     if storage_config.get(device).get('name') and partition_type != 'extended':
         make_dname(info.get('id'), storage_config)
@@ -694,7 +564,7 @@
     volume_path = get_path_to_storage_volume(volume, storage_config)
 
     # Handle preserve flag
-    if info.get('preserve'):
+    if config.value_as_boolean(info.get('preserve')):
         # Volume marked to be preserved, not formatting
         return
 
@@ -776,26 +646,21 @@
                             storage_config))
 
     # Handle preserve flag
-    if info.get('preserve'):
+    if config.value_as_boolean(info.get('preserve')):
         # LVM will probably be offline, so start it
         util.subp(["vgchange", "-a", "y"])
         # Verify that volgroup exists and contains all specified devices
-        current_paths = []
-        (out, _err) = util.subp(["pvdisplay", "-C", "--separator", "=", "-o",
-                                "vg_name,pv_name", "--noheadings"],
-                                capture=True)
-        for line in out.splitlines():
-            if name in line:
-                current_paths.append(line.split("=")[-1])
-        if set(current_paths) != set(device_paths):
-            raise ValueError("volgroup '%s' marked to be preserved, but does \
-                             not exist or does not contain the right physical \
-                             volumes" % info.get('id'))
+        if set(lvm.get_pvols_in_volgroup(name)) != set(device_paths):
+            raise ValueError("volgroup '%s' marked to be preserved, but does "
+                             "not exist or does not contain the right "
+                             "physical volumes" % info.get('id'))
     else:
         # Create vgrcreate command and run
-        cmd = ["vgcreate", name]
-        cmd.extend(device_paths)
-        util.subp(cmd)
+        # capture output to avoid printing it to log
+        util.subp(['vgcreate', name] + device_paths, capture=True)
+
+    # refresh lvmetad
+    lvm.lvm_scan()
 
 
 def lvm_partition_handler(info, storage_config):
@@ -805,28 +670,23 @@
         raise ValueError("lvm volgroup for lvm partition must be specified")
     if not name:
         raise ValueError("lvm partition name must be specified")
+    if info.get('ptable'):
+        raise ValueError("Partition tables on top of lvm logical volumes is "
+                         "not supported")
 
     # Handle preserve flag
-    if info.get('preserve'):
-        (out, _err) = util.subp(["lvdisplay", "-C", "--separator", "=", "-o",
-                                "lv_name,vg_name", "--noheadings"],
-                                capture=True)
-        found = False
-        for line in out.splitlines():
-            if name in line:
-                if volgroup == line.split("=")[-1]:
-                    found = True
-                    break
-        if not found:
-            raise ValueError("lvm partition '%s' marked to be preserved, but \
-                             does not exist or does not mach storage \
-                             configuration" % info.get('id'))
+    if config.value_as_boolean(info.get('preserve')):
+        if name not in lvm.get_lvols_in_volgroup(volgroup):
+            raise ValueError("lvm partition '%s' marked to be preserved, but "
+                             "does not exist or does not mach storage "
+                             "configuration" % info.get('id'))
     elif storage_config.get(info.get('volgroup')).get('preserve'):
-        raise NotImplementedError("Lvm Partition '%s' is not marked to be \
-            preserved, but volgroup '%s' is. At this time, preserving \
-            volgroups but not also the lvm partitions on the volgroup is \
-            not supported, because of the possibility of damaging lvm \
-            partitions intended to be preserved." % (info.get('id'), volgroup))
+        raise NotImplementedError(
+            "Lvm Partition '%s' is not marked to be preserved, but volgroup "
+            "'%s' is. At this time, preserving volgroups but not also the lvm "
+            "partitions on the volgroup is not supported, because of the "
+            "possibility of damaging lvm  partitions intended to be "
+            "preserved." % (info.get('id'), volgroup))
     else:
         cmd = ["lvcreate", volgroup, "-n", name]
         if info.get('size'):
@@ -836,9 +696,8 @@
 
         util.subp(cmd)
 
-    if info.get('ptable'):
-        raise ValueError("Partition tables on top of lvm logical volumes is \
-                         not supported")
+    # refresh lvmetad
+    lvm.lvm_scan()
 
     make_dname(info.get('id'), storage_config)
 
@@ -925,7 +784,7 @@
                   zip(spare_devices, spare_device_paths)))
 
     # Handle preserve flag
-    if info.get('preserve'):
+    if config.value_as_boolean(info.get('preserve')):
         # check if the array is already up, if not try to assemble
         if not mdadm.md_check(md_devname, raidlevel,
                               device_paths, spare_device_paths):
@@ -981,9 +840,6 @@
         raise ValueError("backing device and cache device for bcache"
                          " must be specified")
 
-    # The bcache module is not loaded when bcache is installed by apt-get, so
-    # we will load it now
-    util.subp(["modprobe", "bcache"])
     bcache_sysfs = "/sys/fs/bcache"
     udevadm_settle(exists=bcache_sysfs)
 
@@ -1003,7 +859,7 @@
                           bcache_device, expected)
                 return
             LOG.debug('bcache device path not found: %s', expected)
-            local_holders = get_holders(bcache_device)
+            local_holders = clear_holders.get_holders(bcache_device)
             LOG.debug('got initial holders being "%s"', local_holders)
             if len(local_holders) == 0:
                 raise ValueError("holders == 0 , expected non-zero")
@@ -1033,7 +889,7 @@
 
     if cache_device:
         # /sys/class/block/XXX/YYY/
-        cache_device_sysfs = block_find_sysfs_path(cache_device)
+        cache_device_sysfs = block.sys_block_path(cache_device)
 
         if os.path.exists(os.path.join(cache_device_sysfs, "bcache")):
             LOG.debug('caching device already exists at {}/bcache. Read '
@@ -1058,7 +914,7 @@
         ensure_bcache_is_registered(cache_device, target_sysfs_path)
 
     if backing_device:
-        backing_device_sysfs = block_find_sysfs_path(backing_device)
+        backing_device_sysfs = block.sys_block_path(backing_device)
         target_sysfs_path = os.path.join(backing_device_sysfs, "bcache")
         if not os.path.exists(os.path.join(backing_device_sysfs, "bcache")):
             util.subp(["make-bcache", "-B", backing_device])
@@ -1066,7 +922,7 @@
 
         # via the holders we can identify which bcache device we just created
         # for a given backing device
-        holders = get_holders(backing_device)
+        holders = clear_holders.get_holders(backing_device)
         if len(holders) != 1:
             err = ('Invalid number {} of holding devices:'
                    ' "{}"'.format(len(holders), holders))
@@ -1158,6 +1014,21 @@
     # set up reportstack
     stack_prefix = state.get('report_stack_prefix', '')
 
+    # shut down any already existing storage layers above any disks used in
+    # config that have 'wipe' set
+    with events.ReportEventStack(
+            name=stack_prefix, reporting_enabled=True, level='INFO',
+            description="removing previous storage devices"):
+        clear_holders.start_clear_holders_deps()
+        disk_paths = [get_path_to_storage_volume(k, storage_config_dict)
+                      for (k, v) in storage_config_dict.items()
+                      if v.get('type') == 'disk' and
+                      config.value_as_boolean(v.get('wipe')) and
+                      not config.value_as_boolean(v.get('preserve'))]
+        clear_holders.clear_holders(disk_paths)
+        # if anything was not properly shut down, stop installation
+        clear_holders.assert_clear(disk_paths)
+
     for item_id, command in storage_config_dict.items():
         handler = command_handlers.get(command['type'])
         if not handler:

=== modified file 'curtin/commands/block_wipe.py'
--- curtin/commands/block_wipe.py	2016-05-10 16:13:29 +0000
+++ curtin/commands/block_wipe.py	2016-10-03 18:55:20 +0000
@@ -21,7 +21,6 @@
 
 
 def wipe_main(args):
-    #  curtin clear-holders device [device2 [device3]]
     for blockdev in args.devices:
         try:
             block.wipe_volume(blockdev, mode=args.mode)
@@ -36,7 +35,7 @@
 CMD_ARGUMENTS = (
     ((('-m', '--mode'),
       {'help': 'mode for wipe.', 'action': 'store',
-       'default': 'superblocks',
+       'default': 'superblock',
        'choices': ['zero', 'superblock', 'superblock-recursive', 'random']}),
      ('devices',
       {'help': 'devices to wipe', 'default': [], 'nargs': '+'}),

=== added file 'curtin/commands/clear_holders.py'
--- curtin/commands/clear_holders.py	1970-01-01 00:00:00 +0000
+++ curtin/commands/clear_holders.py	2016-10-03 18:55:20 +0000
@@ -0,0 +1,48 @@
+#   Copyright (C) 2016 Canonical Ltd.
+#
+#   Author: Wesley Wiedenmeier <wesley.wiedenmeier@canonical.com>
+#
+#   Curtin is free software: you can redistribute it and/or modify it under
+#   the terms of the GNU Affero General Public License as published by the
+#   Free Software Foundation, either version 3 of the License, or (at your
+#   option) any later version.
+#
+#   Curtin is distributed in the hope that it will be useful, but WITHOUT ANY
+#   WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+#   FOR A PARTICULAR PURPOSE.  See the GNU Affero General Public License for
+#   more details.
+#
+#   You should have received a copy of the GNU Affero General Public License
+#   along with Curtin.  If not, see <http://www.gnu.org/licenses/>.
+
+from curtin import block
+from . import populate_one_subcmd
+
+
+def clear_holders_main(args):
+    """
+    wrapper for clear_holders accepting cli args
+    """
+    if (not all(block.is_block_device(device) for device in args.devices) or
+            len(args.devices) == 0):
+        raise ValueError('invalid devices specified')
+    block.clear_holders.start_clear_holders_deps()
+    block.clear_holders.clear_holders(args.devices, try_preserve=args.preserve)
+    if args.try_preserve:
+        print('ran clear_holders attempting to preserve data. however, '
+              'hotplug support for some devices may cause holders to restart ')
+    block.clear_holders.assert_clear(args.devices)
+
+
+CMD_ARGUMENTS = (
+    (('devices',
+      {'help': 'devices to free', 'default': [], 'nargs': '+'}),
+     (('-p', '--preserve'),
+      {'help': 'try to shut down holders without erasing anything',
+       'default': False, 'action': 'store_true'}),
+     )
+)
+
+
+def POPULATE_SUBCMD(parser):
+    populate_one_subcmd(parser, CMD_ARGUMENTS, clear_holders_main)

=== modified file 'curtin/commands/curthooks.py'
--- curtin/commands/curthooks.py	2016-10-03 18:00:41 +0000
+++ curtin/commands/curthooks.py	2016-10-03 18:55:20 +0000
@@ -16,10 +16,8 @@
 #   along with Curtin.  If not, see <http://www.gnu.org/licenses/>.
 
 import copy
-import glob
 import os
 import platform
-import re
 import sys
 import shutil
 import textwrap
@@ -30,8 +28,8 @@
 from curtin.log import LOG
 from curtin import swap
 from curtin import util
-from curtin import net
 from curtin.reporter import events
+from curtin.commands import apply_net, apt_config
 
 from . import populate_one_subcmd
 
@@ -90,45 +88,15 @@
                                          info.get('perms', "0644")))
 
 
-def apt_config(cfg, target):
-    # cfg['apt_proxy']
-
-    proxy_cfg_path = os.path.sep.join(
-        [target, '/etc/apt/apt.conf.d/90curtin-aptproxy'])
-    if cfg.get('apt_proxy'):
-        util.write_file(
-            proxy_cfg_path,
-            content='Acquire::HTTP::Proxy "%s";\n' % cfg['apt_proxy'])
+def do_apt_config(cfg, target):
+    cfg = apt_config.translate_old_apt_features(cfg)
+    apt_cfg = cfg.get("apt")
+    if apt_cfg is not None:
+        LOG.info("curthooks handling apt to target %s with config %s",
+                 target, apt_cfg)
+        apt_config.handle_apt(apt_cfg, target)
     else:
-        if os.path.isfile(proxy_cfg_path):
-            os.unlink(proxy_cfg_path)
-
-    # cfg['apt_mirrors']
-    # apt_mirrors:
-    #  ubuntu_archive: http://local.archive/ubuntu
-    #  ubuntu_security: http://local.archive/ubuntu
-    sources_list = os.path.sep.join([target, '/etc/apt/sources.list'])
-    if (isinstance(cfg.get('apt_mirrors'), dict) and
-            os.path.isfile(sources_list)):
-        repls = [
-            ('ubuntu_archive', r'http://\S*[.]*archive.ubuntu.com/\S*'),
-            ('ubuntu_security', r'http://security.ubuntu.com/\S*'),
-        ]
-        content = None
-        for name, regex in repls:
-            mirror = cfg['apt_mirrors'].get(name)
-            if not mirror:
-                continue
-
-            if content is None:
-                with open(sources_list) as fp:
-                    content = fp.read()
-                util.write_file(sources_list + ".dist", content)
-
-            content = re.sub(regex, mirror + " ", content)
-
-        if content is not None:
-            util.write_file(sources_list, content)
+        LOG.info("No apt config provided, skipping")
 
 
 def disable_overlayroot(cfg, target):
@@ -140,51 +108,6 @@
         shutil.move(local_conf, local_conf + ".old")
 
 
-def clean_cloud_init(target):
-    flist = glob.glob(
-        os.path.sep.join([target, "/etc/cloud/cloud.cfg.d/*dpkg*"]))
-
-    LOG.debug("cleaning cloud-init config from: %s" % flist)
-    for dpkg_cfg in flist:
-        os.unlink(dpkg_cfg)
-
-
-def _maybe_remove_legacy_eth0(target,
-                              path="/etc/network/interfaces.d/eth0.cfg"):
-    """Ubuntu cloud images previously included a 'eth0.cfg' that had
-       hard coded content.  That file would interfere with the rendered
-       configuration if it was present.
-
-       if the file does not exist do nothing.
-       If the file exists:
-         - with known content, remove it and warn
-         - with unknown content, leave it and warn
-    """
-
-    cfg = os.path.sep.join([target, path])
-    if not os.path.exists(cfg):
-        LOG.warn('Failed to find legacy conf file %s', cfg)
-        return
-
-    bmsg = "Dynamic networking config may not apply."
-    try:
-        contents = util.load_file(cfg)
-        known_contents = ["auto eth0", "iface eth0 inet dhcp"]
-        lines = [f.strip() for f in contents.splitlines()
-                 if not f.startswith("#")]
-        if lines == known_contents:
-            util.del_file(cfg)
-            msg = "removed %s with known contents" % cfg
-        else:
-            msg = (bmsg + " '%s' exists with user configured content." % cfg)
-    except:
-        msg = bmsg + " %s exists, but could not be read." % cfg
-        LOG.exception(msg)
-        return
-
-    LOG.warn(msg)
-
-
 def setup_zipl(cfg, target):
     if platform.machine() != 's390x':
         return
@@ -232,8 +155,8 @@
 def run_zipl(cfg, target):
     if platform.machine() != 's390x':
         return
-    with util.RunInChroot(target) as in_chroot:
-        in_chroot(['zipl'])
+    with util.ChrootableTarget(target) as in_chroot:
+        in_chroot.subp(['zipl'])
 
 
 def install_kernel(cfg, target):
@@ -250,126 +173,45 @@
     mapping = copy.deepcopy(KERNEL_MAPPING)
     config.merge_config(mapping, kernel_cfg.get('mapping', {}))
 
-    with util.RunInChroot(target) as in_chroot:
-
-        if kernel_package:
-            util.install_packages([kernel_package], target=target)
-            return
-
-        # uname[2] is kernel name (ie: 3.16.0-7-generic)
-        # version gets X.Y.Z, flavor gets anything after second '-'.
-        kernel = os.uname()[2]
-        codename, err = in_chroot(['lsb_release', '--codename', '--short'],
-                                  capture=True)
-        codename = codename.strip()
-        version, abi, flavor = kernel.split('-', 2)
-
-        try:
-            map_suffix = mapping[codename][version]
-        except KeyError:
-            LOG.warn("Couldn't detect kernel package to install for %s."
-                     % kernel)
-            if kernel_fallback is not None:
-                util.install_packages([kernel_fallback], target=target)
-            return
-
-        package = "linux-{flavor}{map_suffix}".format(
-            flavor=flavor, map_suffix=map_suffix)
-
-        if util.has_pkg_available(package, target):
-            if util.has_pkg_installed(package, target):
-                LOG.debug("Kernel package '%s' already installed", package)
-            else:
-                LOG.debug("installing kernel package '%s'", package)
-                util.install_packages([package], target=target)
-        else:
-            if kernel_fallback is not None:
-                LOG.info("Kernel package '%s' not available.  "
-                         "Installing fallback package '%s'.",
-                         package, kernel_fallback)
-                util.install_packages([kernel_fallback], target=target)
-            else:
-                LOG.warn("Kernel package '%s' not available and no fallback."
-                         " System may not boot.", package)
-
-
-def apply_debconf_selections(cfg, target):
-    # debconf_selections:
-    #  set1: |
-    #   cloud-init cloud-init/datasources multiselect MAAS
-    #  set2: pkg pkg/value string bar
-    selsets = cfg.get('debconf_selections')
-    if not selsets:
-        LOG.debug("debconf_selections was not set in config")
-        return
-
-    # for each entry in selections, chroot and apply them.
-    # keep a running total of packages we've seen.
-    pkgs_cfgd = set()
-    for key, content in selsets.items():
-        LOG.debug("setting for %s, %s" % (key, content))
-        util.subp(['chroot', target, 'debconf-set-selections'],
-                  data=content.encode())
-        for line in content.splitlines():
-            if line.startswith("#"):
-                continue
-            pkg = re.sub(r"[:\s].*", "", line)
-            pkgs_cfgd.add(pkg)
-
-    pkgs_installed = get_installed_packages(target)
-
-    LOG.debug("pkgs_cfgd: %s" % pkgs_cfgd)
-    LOG.debug("pkgs_installed: %s" % pkgs_installed)
-    need_reconfig = pkgs_cfgd.intersection(pkgs_installed)
-
-    if len(need_reconfig) == 0:
-        LOG.debug("no need for reconfig")
-        return
-
-    # For any packages that are already installed, but have preseed data
-    # we populate the debconf database, but the filesystem configuration
-    # would be preferred on a subsequent dpkg-reconfigure.
-    # so, what we have to do is "know" information about certain packages
-    # to unconfigure them.
-    unhandled = []
-    to_config = []
-    for pkg in need_reconfig:
-        if pkg in CONFIG_CLEANERS:
-            LOG.debug("unconfiguring %s" % pkg)
-            CONFIG_CLEANERS[pkg](target)
-            to_config.append(pkg)
-        else:
-            unhandled.append(pkg)
-
-    if len(unhandled):
-        LOG.warn("The following packages were installed and preseeded, "
-                 "but cannot be unconfigured: %s", unhandled)
-
-    util.subp(['chroot', target, 'dpkg-reconfigure',
-               '--frontend=noninteractive'] +
-              list(to_config), data=None)
-
-
-def get_installed_packages(target=None):
-    cmd = []
-    if target is not None:
-        cmd = ['chroot', target]
-    cmd.extend(['dpkg-query', '--list'])
-
-    (out, _err) = util.subp(cmd, capture=True)
-    if isinstance(out, bytes):
-        out = out.decode()
-
-    pkgs_inst = set()
-    for line in out.splitlines():
-        try:
-            (state, pkg, other) = line.split(None, 2)
-        except ValueError:
-            continue
-        if state.startswith("hi") or state.startswith("ii"):
-            pkgs_inst.add(re.sub(":.*", "", pkg))
-
-    return pkgs_inst
+    if kernel_package:
+        util.install_packages([kernel_package], target=target)
+        return
+
+    # uname[2] is kernel name (ie: 3.16.0-7-generic)
+    # version gets X.Y.Z, flavor gets anything after second '-'.
+    kernel = os.uname()[2]
+    codename, _ = util.subp(['lsb_release', '--codename', '--short'],
+                            capture=True, target=target)
+    codename = codename.strip()
+    version, abi, flavor = kernel.split('-', 2)
+
+    try:
+        map_suffix = mapping[codename][version]
+    except KeyError:
+        LOG.warn("Couldn't detect kernel package to install for %s."
+                 % kernel)
+        if kernel_fallback is not None:
+            util.install_packages([kernel_fallback], target=target)
+        return
+
+    package = "linux-{flavor}{map_suffix}".format(
+        flavor=flavor, map_suffix=map_suffix)
+
+    if util.has_pkg_available(package, target):
+        if util.has_pkg_installed(package, target):
+            LOG.debug("Kernel package '%s' already installed", package)
+        else:
+            LOG.debug("installing kernel package '%s'", package)
+            util.install_packages([package], target=target)
+    else:
+        if kernel_fallback is not None:
+            LOG.info("Kernel package '%s' not available.  "
+                     "Installing fallback package '%s'.",
+                     package, kernel_fallback)
+            util.install_packages([kernel_fallback], target=target)
+        else:
+            LOG.warn("Kernel package '%s' not available and no fallback."
+                     " System may not boot.", package)
 
 
 def setup_grub(cfg, target):
@@ -498,12 +340,11 @@
         util.subp(args + instdevs, env=env)
 
 
-def update_initramfs(target, all_kernels=False):
+def update_initramfs(target=None, all_kernels=False):
     cmd = ['update-initramfs', '-u']
     if all_kernels:
         cmd.extend(['-k', 'all'])
-    with util.RunInChroot(target) as in_chroot:
-        in_chroot(cmd)
+    util.subp(cmd, target=target)
 
 
 def copy_fstab(fstab, target):
@@ -533,7 +374,6 @@
 
 
 def apply_networking(target, state):
-    netstate = state.get('network_state')
     netconf = state.get('network_config')
     interfaces = state.get('interfaces')
 
@@ -544,22 +384,13 @@
                 return True
         return False
 
-    ns = None
-    if is_valid_src(netstate):
-        LOG.debug("applying network_state")
-        ns = net.network_state.from_state_file(netstate)
-    elif is_valid_src(netconf):
-        LOG.debug("applying network_config")
-        ns = net.parse_net_config(netconf)
-
-    if ns is not None:
-        net.render_network_state(target=target, network_state=ns)
+    if is_valid_src(netconf):
+        LOG.info("applying network_config")
+        apply_net.apply_net(target, network_state=None, network_config=netconf)
     else:
         LOG.debug("copying interfaces")
         copy_interfaces(interfaces, target)
 
-    _maybe_remove_legacy_eth0(target)
-
 
 def copy_interfaces(interfaces, target):
     if not interfaces:
@@ -704,8 +535,8 @@
 
         # FIXME: this assumes grub. need more generic way to update root=
         util.ensure_dir(os.path.sep.join([target, os.path.dirname(grub_dev)]))
-        with util.RunInChroot(target) as in_chroot:
-            in_chroot(['update-grub'])
+        with util.ChrootableTarget(target) as in_chroot:
+            in_chroot.subp(['update-grub'])
 
     else:
         LOG.warn("Not sure how this will boot")
@@ -740,7 +571,7 @@
     }
 
     needed_packages = []
-    installed_packages = get_installed_packages(target)
+    installed_packages = util.get_installed_packages(target)
     for cust_cfg, pkg_reqs in custom_configs.items():
         if cust_cfg not in cfg:
             continue
@@ -820,7 +651,7 @@
             name=stack_prefix, reporting_enabled=True, level="INFO",
             description="writing config files and configuring apt"):
         write_files(cfg, target)
-        apt_config(cfg, target)
+        do_apt_config(cfg, target)
         disable_overlayroot(cfg, target)
 
     # packages may be needed prior to installing kernel
@@ -834,8 +665,8 @@
         copy_mdadm_conf(mdadm_location, target)
         # as per https://bugs.launchpad.net/ubuntu/+source/mdadm/+bug/964052
         # reconfigure mdadm
-        util.subp(['chroot', target, 'dpkg-reconfigure',
-                   '--frontend=noninteractive', 'mdadm'], data=None)
+        util.subp(['dpkg-reconfigure', '--frontend=noninteractive', 'mdadm'],
+                  data=None, target=target)
 
     with events.ReportEventStack(
             name=stack_prefix, reporting_enabled=True, level="INFO",
@@ -843,7 +674,6 @@
         setup_zipl(cfg, target)
         install_kernel(cfg, target)
         run_zipl(cfg, target)
-        apply_debconf_selections(cfg, target)
 
         restore_dist_interfaces(cfg, target)
 
@@ -906,8 +736,4 @@
     populate_one_subcmd(parser, CMD_ARGUMENTS, curthooks)
 
 
-CONFIG_CLEANERS = {
-    'cloud-init': clean_cloud_init,
-}
-
 # vi: ts=4 expandtab syntax=python

=== modified file 'curtin/commands/main.py'
--- curtin/commands/main.py	2016-05-10 16:13:29 +0000
+++ curtin/commands/main.py	2016-10-03 18:55:20 +0000
@@ -26,9 +26,10 @@
 from ..deps import install_deps
 
 SUB_COMMAND_MODULES = [
-    'apply_net', 'block-meta', 'block-wipe', 'curthooks', 'extract',
-    'hook', 'in-target', 'install', 'mkfs', 'net-meta',
-    'pack', 'swap', 'system-install', 'system-upgrade']
+    'apply_net', 'block-info', 'block-meta', 'block-wipe', 'curthooks',
+    'clear-holders', 'extract', 'hook', 'in-target', 'install', 'mkfs',
+    'net-meta', 'apt-config', 'pack', 'swap', 'system-install',
+    'system-upgrade']
 
 
 def add_subcmd(subparser, subcmd):

=== modified file 'curtin/config.py'
--- curtin/config.py	2016-03-18 14:16:45 +0000
+++ curtin/config.py	2016-10-03 18:55:20 +0000
@@ -138,6 +138,5 @@
 
 
 def value_as_boolean(value):
-    if value in (False, None, '0', 0, 'False', 'false', ''):
-        return False
-    return True
+    false_values = (False, None, 0, '0', 'False', 'false', 'None', 'none', '')
+    return value not in false_values

=== added file 'curtin/gpg.py'
--- curtin/gpg.py	1970-01-01 00:00:00 +0000
+++ curtin/gpg.py	2016-10-03 18:55:20 +0000
@@ -0,0 +1,74 @@
+#   Copyright (C) 2016 Canonical Ltd.
+#
+#   Author: Scott Moser <scott.moser@canonical.com>
+#           Christian Ehrhardt <christian.ehrhardt@canonical.com>
+#
+#   Curtin is free software: you can redistribute it and/or modify it under
+#   the terms of the GNU Affero General Public License as published by the
+#   Free Software Foundation, either version 3 of the License, or (at your
+#   option) any later version.
+#
+#   Curtin is distributed in the hope that it will be useful, but WITHOUT ANY
+#   WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+#   FOR A PARTICULAR PURPOSE.  See the GNU Affero General Public License for
+#   more details.
+#
+#   You should have received a copy of the GNU Affero General Public License
+#   along with Curtin.  If not, see <http://www.gnu.org/licenses/>.
+""" gpg.py
+gpg related utilities to get raw keys data by their id
+"""
+
+from curtin import util
+
+from .log import LOG
+
+
+def export_armour(key):
+    """Export gpg key, armoured key gets returned"""
+    try:
+        (armour, _) = util.subp(["gpg", "--export", "--armour", key],
+                                capture=True)
+    except util.ProcessExecutionError as error:
+        # debug, since it happens for any key not on the system initially
+        LOG.debug('Failed to export armoured key "%s": %s', key, error)
+        armour = None
+    return armour
+
+
+def recv_key(key, keyserver):
+    """Receive gpg key from the specified keyserver"""
+    LOG.debug('Receive gpg key "%s"', key)
+    try:
+        util.subp(["gpg", "--keyserver", keyserver, "--recv", key],
+                  capture=True)
+    except util.ProcessExecutionError as error:
+        raise ValueError(('Failed to import key "%s" '
+                          'from server "%s" - error %s') %
+                         (key, keyserver, error))
+
+
+def delete_key(key):
+    """Delete the specified key from the local gpg ring"""
+    try:
+        util.subp(["gpg", "--batch", "--yes", "--delete-keys", key],
+                  capture=True)
+    except util.ProcessExecutionError as error:
+        LOG.warn('Failed delete key "%s": %s', key, error)
+
+
+def getkeybyid(keyid, keyserver='keyserver.ubuntu.com'):
+    """get gpg keyid from keyserver"""
+    armour = export_armour(keyid)
+    if not armour:
+        try:
+            recv_key(keyid, keyserver=keyserver)
+            armour = export_armour(keyid)
+        except ValueError:
+            LOG.exception('Failed to obtain gpg key %s', keyid)
+            raise
+        finally:
+            # delete just imported key to leave environment as it was before
+            delete_key(keyid)
+
+    return armour

=== modified file 'curtin/net/__init__.py'
--- curtin/net/__init__.py	2016-10-03 18:00:41 +0000
+++ curtin/net/__init__.py	2016-10-03 18:55:20 +0000
@@ -299,7 +299,7 @@
             mac = iface.get('mac_address', '')
             # len(macaddr) == 2 * 6 + 5 == 17
             if ifname and mac and len(mac) == 17:
-                content += generate_udev_rule(ifname, mac)
+                content += generate_udev_rule(ifname, mac.lower())
 
     return content
 
@@ -349,7 +349,7 @@
         'subnets',
         'type',
     ]
-    if iface['type'] not in ['bond', 'bridge']:
+    if iface['type'] not in ['bond', 'bridge', 'vlan']:
         ignore_map.append('mac_address')
 
     for key, value in iface.items():
@@ -361,26 +361,52 @@
     return content
 
 
-def render_route(route):
-    content = "up route add"
+def render_route(route, indent=""):
+    """When rendering routes for an iface, in some cases applying a route
+    may result in the route command returning non-zero which produces
+    some confusing output for users manually using ifup/ifdown[1].  To
+    that end, we will optionally include an '|| true' postfix to each
+    route line allowing users to work with ifup/ifdown without using
+    --force option.
+
+    We may at somepoint not want to emit this additional postfix, and
+    add a 'strict' flag to this function.  When called with strict=True,
+    then we will not append the postfix.
+
+    1. http://askubuntu.com/questions/168033/
+             how-to-set-static-routes-in-ubuntu-server
+    """
+    content = []
+    up = indent + "post-up route add"
+    down = indent + "pre-down route del"
+    or_true = " || true"
     mapping = {
         'network': '-net',
         'netmask': 'netmask',
         'gateway': 'gw',
         'metric': 'metric',
     }
-    for k in ['network', 'netmask', 'gateway', 'metric']:
-        if k in route:
-            content += " %s %s" % (mapping[k], route[k])
-
-    content += '\n'
-    return content
-
-
-def iface_start_entry(iface, index):
+    if route['network'] == '0.0.0.0' and route['netmask'] == '0.0.0.0':
+        default_gw = " default gw %s" % route['gateway']
+        content.append(up + default_gw + or_true)
+        content.append(down + default_gw + or_true)
+    elif route['network'] == '::' and route['netmask'] == 0:
+        # ipv6!
+        default_gw = " -A inet6 default gw %s" % route['gateway']
+        content.append(up + default_gw + or_true)
+        content.append(down + default_gw + or_true)
+    else:
+        route_line = ""
+        for k in ['network', 'netmask', 'gateway', 'metric']:
+            if k in route:
+                route_line += " %s %s" % (mapping[k], route[k])
+        content.append(up + route_line + or_true)
+        content.append(down + route_line + or_true)
+    return "\n".join(content)
+
+
+def iface_start_entry(iface):
     fullname = iface['name']
-    if index != 0:
-        fullname += ":%s" % index
 
     control = iface['control']
     if control == "auto":
@@ -397,6 +423,16 @@
             "iface {fullname} {inet} {mode}\n").format(**subst)
 
 
+def subnet_is_ipv6(subnet):
+    # 'static6' or 'dhcp6'
+    if subnet['type'].endswith('6'):
+        # This is a request for DHCPv6.
+        return True
+    elif subnet['type'] == 'static' and ":" in subnet['address']:
+        return True
+    return False
+
+
 def render_interfaces(network_state):
     ''' Given state, emit etc/network/interfaces content '''
 
@@ -424,42 +460,43 @@
             content += "\n"
         subnets = iface.get('subnets', {})
         if subnets:
-            for index, subnet in zip(range(0, len(subnets)), subnets):
+            for index, subnet in enumerate(subnets):
                 if content[-2:] != "\n\n":
                     content += "\n"
                 iface['index'] = index
                 iface['mode'] = subnet['type']
                 iface['control'] = subnet.get('control', 'auto')
                 subnet_inet = 'inet'
-                if iface['mode'].endswith('6'):
-                    # This is a request for DHCPv6.
-                    subnet_inet += '6'
-                elif iface['mode'] == 'static' and ":" in subnet['address']:
-                    # This is a static IPv6 address.
+                if subnet_is_ipv6(subnet):
                     subnet_inet += '6'
                 iface['inet'] = subnet_inet
-                if iface['mode'].startswith('dhcp'):
+                if subnet['type'].startswith('dhcp'):
                     iface['mode'] = 'dhcp'
 
-                content += iface_start_entry(iface, index)
+                # do not emit multiple 'auto $IFACE' lines as older (precise)
+                # ifupdown complains
+                if "auto %s\n" % (iface['name']) in content:
+                    iface['control'] = 'alias'
+
+                content += iface_start_entry(iface)
                 content += iface_add_subnet(iface, subnet)
                 content += iface_add_attrs(iface, index)
-                if len(subnets) > 1 and index == 0:
-                    for i in range(1, len(subnets)):
-                        content += "    post-up ifup %s:%s\n" % (iface['name'],
-                                                                 i)
+
+                for route in subnet.get('routes', []):
+                    content += render_route(route, indent="    ") + '\n'
+
         else:
             # ifenslave docs say to auto the slave devices
-            if 'bond-master' in iface:
+            if 'bond-master' in iface or 'bond-slaves' in iface:
                 content += "auto {name}\n".format(**iface)
             content += "iface {name} {inet} {mode}\n".format(**iface)
-            content += iface_add_attrs(iface, index)
+            content += iface_add_attrs(iface, 0)
 
     for route in network_state.get('routes'):
         content += render_route(route)
 
     # global replacements until v2 format
-    content = content.replace('mac_address', 'hwaddress')
+    content = content.replace('mac_address', 'hwaddress ether')
 
     # Play nice with others and source eni config files
     content += "\nsource /etc/network/interfaces.d/*.cfg\n"

=== modified file 'curtin/net/network_state.py'
--- curtin/net/network_state.py	2015-10-02 16:19:07 +0000
+++ curtin/net/network_state.py	2016-10-03 18:55:20 +0000
@@ -121,6 +121,18 @@
         iface = interfaces.get(command['name'], {})
         for param, val in command.get('params', {}).items():
             iface.update({param: val})
+
+        # convert subnet ipv6 netmask to cidr as needed
+        subnets = command.get('subnets')
+        if subnets:
+            for subnet in subnets:
+                if subnet['type'] == 'static':
+                    if 'netmask' in subnet and ':' in subnet['address']:
+                        subnet['netmask'] = mask2cidr(subnet['netmask'])
+                        for route in subnet.get('routes', []):
+                            if 'netmask' in route:
+                                route['netmask'] = mask2cidr(route['netmask'])
+
         iface.update({
             'name': command.get('name'),
             'type': command.get('type'),
@@ -130,7 +142,7 @@
             'mtu': command.get('mtu'),
             'address': None,
             'gateway': None,
-            'subnets': command.get('subnets'),
+            'subnets': subnets,
         })
         self.network_state['interfaces'].update({command.get('name'): iface})
         self.dump_network_state()
@@ -141,6 +153,7 @@
             iface eth0.222 inet static
                     address 10.10.10.1
                     netmask 255.255.255.0
+                    hwaddress ether BC:76:4E:06:96:B3
                     vlan-raw-device eth0
         '''
         required_keys = [
@@ -332,6 +345,37 @@
     return ".".join([str(x) for x in mask])
 
 
+def ipv4mask2cidr(mask):
+    if '.' not in mask:
+        return mask
+    return sum([bin(int(x)).count('1') for x in mask.split('.')])
+
+
+def ipv6mask2cidr(mask):
+    if ':' not in mask:
+        return mask
+
+    bitCount = [0, 0x8000, 0xc000, 0xe000, 0xf000, 0xf800, 0xfc00, 0xfe00,
+                0xff00, 0xff80, 0xffc0, 0xffe0, 0xfff0, 0xfff8, 0xfffc,
+                0xfffe, 0xffff]
+    cidr = 0
+    for word in mask.split(':'):
+        if not word or int(word, 16) == 0:
+            break
+        cidr += bitCount.index(int(word, 16))
+
+    return cidr
+
+
+def mask2cidr(mask):
+    if ':' in mask:
+        return ipv6mask2cidr(mask)
+    elif '.' in mask:
+        return ipv4mask2cidr(mask)
+    else:
+        return mask
+
+
 if __name__ == '__main__':
     import sys
     import random

=== modified file 'curtin/util.py'
--- curtin/util.py	2016-10-03 18:00:41 +0000
+++ curtin/util.py	2016-10-03 18:55:20 +0000
@@ -16,18 +16,35 @@
 #   along with Curtin.  If not, see <http://www.gnu.org/licenses/>.
 
 import argparse
+import collections
 import errno
 import glob
 import json
 import os
 import platform
+import re
 import shutil
+import socket
 import subprocess
 import stat
 import sys
 import tempfile
 import time
 
+# avoid the dependency to python3-six as used in cloud-init
+try:
+    from urlparse import urlparse
+except ImportError:
+    # python3
+    # avoid triggering pylint, https://github.com/PyCQA/pylint/issues/769
+    # pylint:disable=import-error,no-name-in-module
+    from urllib.parse import urlparse
+
+try:
+    string_types = (basestring,)
+except NameError:
+    string_types = (str,)
+
 from .log import LOG
 
 _INSTALLED_HELPERS_PATH = '/usr/lib/curtin/helpers'
@@ -35,14 +52,22 @@
 
 _LSB_RELEASE = {}
 
+_DNS_REDIRECT_IP = None
+
+# matcher used in template rendering functions
+BASIC_MATCHER = re.compile(r'\$\{([A-Za-z0-9_.]+)\}|\$([A-Za-z0-9_.]+)')
+
 
 def _subp(args, data=None, rcs=None, env=None, capture=False, shell=False,
-          logstring=False, decode="replace"):
+          logstring=False, decode="replace", target=None):
     if rcs is None:
         rcs = [0]
 
     devnull_fp = None
     try:
+        if target_path(target) != "/":
+            args = ['chroot', target] + list(args)
+
         if not logstring:
             LOG.debug(("Running command %s with allowed return codes %s"
                        " (shell=%s, capture=%s)"), args, rcs, shell, capture)
@@ -118,6 +143,8 @@
         a list of times to sleep in between retries.  After each failure
         subp will sleep for N seconds and then try again.  A value of [1, 3]
         means to run, sleep 1, run, sleep 3, run and then return exit code.
+    :param target:
+        run the command as 'chroot target <args>'
     """
     retries = []
     if "retries" in kwargs:
@@ -277,15 +304,29 @@
 
 
 def write_file(filename, content, mode=0o644, omode="w"):
+    """
+    write 'content' to file at 'filename' using python open mode 'omode'.
+    if mode is not set, then chmod file to mode. mode is 644 by default
+    """
     ensure_dir(os.path.dirname(filename))
     with open(filename, omode) as fp:
         fp.write(content)
-    os.chmod(filename, mode)
-
-
-def load_file(path, mode="r"):
+    if mode:
+        os.chmod(filename, mode)
+
+
+def load_file(path, mode="r", read_len=None, offset=0):
     with open(path, mode) as fp:
-        return fp.read()
+        if offset:
+            fp.seek(offset)
+        return fp.read(read_len) if read_len else fp.read()
+
+
+def file_size(path):
+    """get the size of a file"""
+    with open(path, 'rb') as fp:
+        fp.seek(0, 2)
+        return fp.tell()
 
 
 def del_file(path):
@@ -311,7 +352,7 @@
          'done',
          ''])
 
-    fpath = os.path.join(target, "usr/sbin/policy-rc.d")
+    fpath = target_path(target, "/usr/sbin/policy-rc.d")
 
     if os.path.isfile(fpath):
         return False
@@ -322,7 +363,7 @@
 
 def undisable_daemons_in_root(target):
     try:
-        os.unlink(os.path.join(target, "usr/sbin/policy-rc.d"))
+        os.unlink(target_path(target, "/usr/sbin/policy-rc.d"))
     except OSError as e:
         if e.errno != errno.ENOENT:
             raise
@@ -334,7 +375,7 @@
     def __init__(self, target, allow_daemons=False, sys_resolvconf=True):
         if target is None:
             target = "/"
-        self.target = os.path.abspath(target)
+        self.target = target_path(target)
         self.mounts = ["/dev", "/proc", "/sys"]
         self.umounts = []
         self.disabled_daemons = False
@@ -344,20 +385,21 @@
 
     def __enter__(self):
         for p in self.mounts:
-            tpath = os.path.join(self.target, p[1:])
+            tpath = target_path(self.target, p)
             if do_mount(p, tpath, opts='--bind'):
                 self.umounts.append(tpath)
 
         if not self.allow_daemons:
             self.disabled_daemons = disable_daemons_in_root(self.target)
 
-        target_etc = os.path.join(self.target, "etc")
+        rconf = target_path(self.target, "/etc/resolv.conf")
+        target_etc = os.path.dirname(rconf)
         if self.target != "/" and os.path.isdir(target_etc):
             # never muck with resolv.conf on /
             rconf = os.path.join(target_etc, "resolv.conf")
             rtd = None
             try:
-                rtd = tempfile.mkdtemp(dir=os.path.dirname(rconf))
+                rtd = tempfile.mkdtemp(dir=target_etc)
                 tmp = os.path.join(rtd, "resolv.conf")
                 os.rename(rconf, tmp)
                 self.rconf_d = rtd
@@ -375,25 +417,23 @@
             undisable_daemons_in_root(self.target)
 
         # if /dev is to be unmounted, udevadm settle (LP: #1462139)
-        if os.path.join(self.target, "dev") in self.umounts:
+        if target_path(self.target, "/dev") in self.umounts:
             subp(['udevadm', 'settle'])
 
         for p in reversed(self.umounts):
             do_umount(p)
 
-        rconf = os.path.join(self.target, "etc", "resolv.conf")
+        rconf = target_path(self.target, "/etc/resolv.conf")
         if self.sys_resolvconf and self.rconf_d:
             os.rename(os.path.join(self.rconf_d, "resolv.conf"), rconf)
             shutil.rmtree(self.rconf_d)
 
+    def subp(self, *args, **kwargs):
+        kwargs['target'] = self.target
+        return subp(*args, **kwargs)
 
-class RunInChroot(ChrootableTarget):
-    def __call__(self, args, **kwargs):
-        if self.target != "/":
-            chroot = ["chroot", self.target]
-        else:
-            chroot = []
-        return subp(chroot + args, **kwargs)
+    def path(self, path):
+        return target_path(self.target, path)
 
 
 def is_exe(fpath):
@@ -402,14 +442,13 @@
 
 
 def which(program, search=None, target=None):
-    if target is None or os.path.realpath(target) == "/":
-        target = "/"
+    target = target_path(target)
 
     if os.path.sep in program:
         # if program had a '/' in it, then do not search PATH
         # 'which' does consider cwd here. (cd / && which bin/ls) = bin/ls
         # so effectively we set cwd to / (or target)
-        if is_exe(os.path.sep.join((target, program,))):
+        if is_exe(target_path(target, program)):
             return program
 
     if search is None:
@@ -424,8 +463,9 @@
     search = [os.path.abspath(p) for p in search]
 
     for path in search:
-        if is_exe(os.path.sep.join((target, path, program,))):
-            return os.path.sep.join((path, program,))
+        ppath = os.path.sep.join((path, program))
+        if is_exe(target_path(target, ppath)):
+            return ppath
 
     return None
 
@@ -467,33 +507,39 @@
 
 
 def get_architecture(target=None):
-    chroot = []
-    if target is not None:
-        chroot = ['chroot', target]
-    out, _ = subp(chroot + ['dpkg', '--print-architecture'],
-                  capture=True)
+    out, _ = subp(['dpkg', '--print-architecture'], capture=True,
+                  target=target)
     return out.strip()
 
 
 def has_pkg_available(pkg, target=None):
-    chroot = []
-    if target is not None:
-        chroot = ['chroot', target]
-    out, _ = subp(chroot + ['apt-cache', 'pkgnames'], capture=True)
+    out, _ = subp(['apt-cache', 'pkgnames'], capture=True, target=target)
     for item in out.splitlines():
         if pkg == item.strip():
             return True
     return False
 
 
+def get_installed_packages(target=None):
+    (out, _) = subp(['dpkg-query', '--list'], target=target, capture=True)
+
+    pkgs_inst = set()
+    for line in out.splitlines():
+        try:
+            (state, pkg, other) = line.split(None, 2)
+        except ValueError:
+            continue
+        if state.startswith("hi") or state.startswith("ii"):
+            pkgs_inst.add(re.sub(":.*", "", pkg))
+
+    return pkgs_inst
+
+
 def has_pkg_installed(pkg, target=None):
-    chroot = []
-    if target is not None:
-        chroot = ['chroot', target]
     try:
-        out, _ = subp(chroot + ['dpkg-query', '--show', '--showformat',
-                                '${db:Status-Abbrev}', pkg],
-                      capture=True)
+        out, _ = subp(['dpkg-query', '--show', '--showformat',
+                       '${db:Status-Abbrev}', pkg],
+                      capture=True, target=target)
         return out.rstrip() == "ii"
     except ProcessExecutionError:
         return False
@@ -542,13 +588,9 @@
     """Use dpkg-query to extract package pkg's version string
        and parse the version string into a dictionary
     """
-    chroot = []
-    if target is not None:
-        chroot = ['chroot', target]
     try:
-        out, _ = subp(chroot + ['dpkg-query', '--show', '--showformat',
-                                '${Version}', pkg],
-                      capture=True)
+        out, _ = subp(['dpkg-query', '--show', '--showformat',
+                       '${Version}', pkg], capture=True, target=target)
         raw = out.rstrip()
         return parse_dpkg_version(raw, name=pkg, semx=semx)
     except ProcessExecutionError:
@@ -600,11 +642,11 @@
     if comment.endswith("\n"):
         comment = comment[:-1]
 
-    marker = os.path.join(target, marker)
+    marker = target_path(target, marker)
     # if marker exists, check if there are files that would make it obsolete
-    listfiles = [os.path.join(target, "etc/apt/sources.list")]
+    listfiles = [target_path(target, "/etc/apt/sources.list")]
     listfiles += glob.glob(
-        os.path.join(target, "etc/apt/sources.list.d/*.list"))
+        target_path(target, "etc/apt/sources.list.d/*.list"))
 
     if os.path.exists(marker) and not force:
         if len(find_newer(marker, listfiles)) == 0:
@@ -612,7 +654,7 @@
 
     restore_perms = []
 
-    abs_tmpdir = tempfile.mkdtemp(dir=os.path.join(target, 'tmp'))
+    abs_tmpdir = tempfile.mkdtemp(dir=target_path(target, "/tmp"))
     try:
         abs_slist = abs_tmpdir + "/sources.list"
         abs_slistd = abs_tmpdir + "/sources.list.d"
@@ -621,8 +663,8 @@
         ch_slistd = ch_tmpdir + "/sources.list.d"
 
         # this file gets executed on apt-get update sometimes. (LP: #1527710)
-        motd_update = os.path.join(
-            target, "usr/lib/update-notifier/update-motd-updates-available")
+        motd_update = target_path(
+            target, "/usr/lib/update-notifier/update-motd-updates-available")
         pmode = set_unexecutable(motd_update)
         if pmode is not None:
             restore_perms.append((motd_update, pmode),)
@@ -647,8 +689,8 @@
             'update']
 
         # do not using 'run_apt_command' so we can use 'retries' to subp
-        with RunInChroot(target, allow_daemons=True) as inchroot:
-            inchroot(update_cmd, env=env, retries=retries)
+        with ChrootableTarget(target, allow_daemons=True) as inchroot:
+            inchroot.subp(update_cmd, env=env, retries=retries)
     finally:
         for fname, perms in restore_perms:
             os.chmod(fname, perms)
@@ -685,9 +727,8 @@
         return env, cmd
 
     apt_update(target, env=env, comment=' '.join(cmd))
-    ric = RunInChroot(target, allow_daemons=allow_daemons)
-    with ric as inchroot:
-        return inchroot(cmd, env=env)
+    with ChrootableTarget(target, allow_daemons=allow_daemons) as inchroot:
+        return inchroot.subp(cmd, env=env)
 
 
 def system_upgrade(aptopts=None, target=None, env=None, allow_daemons=False):
@@ -716,7 +757,7 @@
     """
     Look for "hook" in "target" and run it
     """
-    target_hook = os.path.join(target, 'curtin', hook)
+    target_hook = target_path(target, '/curtin/' + hook)
     if os.path.isfile(target_hook):
         LOG.debug("running %s" % target_hook)
         subp([target_hook])
@@ -828,6 +869,18 @@
     return val
 
 
+def bytes2human(size):
+    """convert size in bytes to human readable"""
+    if not (isinstance(size, (int, float)) and
+            int(size) == size and
+            int(size) >= 0):
+        raise ValueError('size must be a integral value')
+    mpliers = {'B': 1, 'K': 2 ** 10, 'M': 2 ** 20, 'G': 2 ** 30, 'T': 2 ** 40}
+    unit_order = sorted(mpliers, key=lambda x: -1 * mpliers[x])
+    unit = next((u for u in unit_order if (size / mpliers[u]) >= 1), 'B')
+    return str(int(size / mpliers[unit])) + unit
+
+
 def import_module(import_str):
     """Import a module."""
     __import__(import_str)
@@ -843,30 +896,42 @@
 
 
 def is_file_not_found_exc(exc):
-    return (isinstance(exc, IOError) and exc.errno == errno.ENOENT)
-
-
-def lsb_release():
+    return (isinstance(exc, (IOError, OSError)) and
+            hasattr(exc, 'errno') and
+            exc.errno in (errno.ENOENT, errno.EIO, errno.ENXIO))
+
+
+def _lsb_release(target=None):
     fmap = {'Codename': 'codename', 'Description': 'description',
             'Distributor ID': 'id', 'Release': 'release'}
+
+    data = {}
+    try:
+        out, _ = subp(['lsb_release', '--all'], capture=True, target=target)
+        for line in out.splitlines():
+            fname, _, val = line.partition(":")
+            if fname in fmap:
+                data[fmap[fname]] = val.strip()
+        missing = [k for k in fmap.values() if k not in data]
+        if len(missing):
+            LOG.warn("Missing fields in lsb_release --all output: %s",
+                     ','.join(missing))
+
+    except ProcessExecutionError as err:
+        LOG.warn("Unable to get lsb_release --all: %s", err)
+        data = {v: "UNAVAILABLE" for v in fmap.values()}
+
+    return data
+
+
+def lsb_release(target=None):
+    if target_path(target) != "/":
+        # do not use or update cache if target is provided
+        return _lsb_release(target)
+
     global _LSB_RELEASE
     if not _LSB_RELEASE:
-        data = {}
-        try:
-            out, err = subp(['lsb_release', '--all'], capture=True)
-            for line in out.splitlines():
-                fname, tok, val = line.partition(":")
-                if fname in fmap:
-                    data[fmap[fname]] = val.strip()
-            missing = [k for k in fmap.values() if k not in data]
-            if len(missing):
-                LOG.warn("Missing fields in lsb_release --all output: %s",
-                         ','.join(missing))
-
-        except ProcessExecutionError as e:
-            LOG.warn("Unable to get lsb_release --all: %s", e)
-            data = {v: "UNAVAILABLE" for v in fmap.values()}
-
+        data = _lsb_release()
         _LSB_RELEASE.update(data)
     return _LSB_RELEASE
 
@@ -881,8 +946,7 @@
 
 
 def json_dumps(data):
-    return json.dumps(data, indent=1, sort_keys=True,
-                      separators=(',', ': ')).encode('utf-8')
+    return json.dumps(data, indent=1, sort_keys=True, separators=(',', ': '))
 
 
 def get_platform_arch():
@@ -895,4 +959,137 @@
     }
     return platform2arch.get(platform.machine(), platform.machine())
 
+
+def basic_template_render(content, params):
+    """This does simple replacement of bash variable like templates.
+
+    It identifies patterns like ${a} or $a and can also identify patterns like
+    ${a.b} or $a.b which will look for a key 'b' in the dictionary rooted
+    by key 'a'.
+    """
+
+    def replacer(match):
+        """ replacer
+            replacer used in regex match to replace content
+        """
+        # Only 1 of the 2 groups will actually have a valid entry.
+        name = match.group(1)
+        if name is None:
+            name = match.group(2)
+        if name is None:
+            raise RuntimeError("Match encountered but no valid group present")
+        path = collections.deque(name.split("."))
+        selected_params = params
+        while len(path) > 1:
+            key = path.popleft()
+            if not isinstance(selected_params, dict):
+                raise TypeError("Can not traverse into"
+                                " non-dictionary '%s' of type %s while"
+                                " looking for subkey '%s'"
+                                % (selected_params,
+                                   selected_params.__class__.__name__,
+                                   key))
+            selected_params = selected_params[key]
+        key = path.popleft()
+        if not isinstance(selected_params, dict):
+            raise TypeError("Can not extract key '%s' from non-dictionary"
+                            " '%s' of type %s"
+                            % (key, selected_params,
+                               selected_params.__class__.__name__))
+        return str(selected_params[key])
+
+    return BASIC_MATCHER.sub(replacer, content)
+
+
+def render_string(content, params):
+    """ render_string
+        render a string following replacement rules as defined in
+        basic_template_render returning the string
+    """
+    if not params:
+        params = {}
+    return basic_template_render(content, params)
+
+
+def is_resolvable(name):
+    """determine if a url is resolvable, return a boolean
+    This also attempts to be resilent against dns redirection.
+
+    Note, that normal nsswitch resolution is used here.  So in order
+    to avoid any utilization of 'search' entries in /etc/resolv.conf
+    we have to append '.'.
+
+    The top level 'invalid' domain is invalid per RFC.  And example.com
+    should also not exist.  The random entry will be resolved inside
+    the search list.
+    """
+    global _DNS_REDIRECT_IP
+    if _DNS_REDIRECT_IP is None:
+        badips = set()
+        badnames = ("does-not-exist.example.com.", "example.invalid.")
+        badresults = {}
+        for iname in badnames:
+            try:
+                result = socket.getaddrinfo(iname, None, 0, 0,
+                                            socket.SOCK_STREAM,
+                                            socket.AI_CANONNAME)
+                badresults[iname] = []
+                for (_, _, _, cname, sockaddr) in result:
+                    badresults[iname].append("%s: %s" % (cname, sockaddr[0]))
+                    badips.add(sockaddr[0])
+            except (socket.gaierror, socket.error):
+                pass
+        _DNS_REDIRECT_IP = badips
+        if badresults:
+            LOG.debug("detected dns redirection: %s", badresults)
+
+    try:
+        result = socket.getaddrinfo(name, None)
+        # check first result's sockaddr field
+        addr = result[0][4][0]
+        if addr in _DNS_REDIRECT_IP:
+            LOG.debug("dns %s in _DNS_REDIRECT_IP", name)
+            return False
+        LOG.debug("dns %s resolved to '%s'", name, result)
+        return True
+    except (socket.gaierror, socket.error):
+        LOG.debug("dns %s failed to resolve", name)
+        return False
+
+
+def is_resolvable_url(url):
+    """determine if this url is resolvable (existing or ip)."""
+    return is_resolvable(urlparse(url).hostname)
+
+
+def target_path(target, path=None):
+    # return 'path' inside target, accepting target as None
+    if target in (None, ""):
+        target = "/"
+    elif not isinstance(target, string_types):
+        raise ValueError("Unexpected input for target: %s" % target)
+    else:
+        target = os.path.abspath(target)
+        # abspath("//") returns "//" specifically for 2 slashes.
+        if target.startswith("//"):
+            target = target[1:]
+
+    if not path:
+        return target
+
+    # os.path.join("/etc", "/foo") returns "/foo". Chomp all leading /.
+    while len(path) and path[0] == "/":
+        path = path[1:]
+
+    return os.path.join(target, path)
+
+
+class RunInChroot(ChrootableTarget):
+    """Backwards compatibility for RunInChroot (LP: #1617375).
+    It needs to work like:
+        with RunInChroot("/target") as in_chroot:
+            in_chroot(["your", "chrooted", "command"])"""
+    __call__ = ChrootableTarget.subp
+
+
 # vi: ts=4 expandtab syntax=python

=== modified file 'debian/changelog'
--- debian/changelog	2016-10-03 17:23:32 +0000
+++ debian/changelog	2016-10-03 18:55:20 +0000
@@ -1,8 +1,38 @@
-curtin (0.1.0~bzr399-0ubuntu1~16.04.1ubuntu1) UNRELEASED; urgency=medium
+curtin (0.1.0~bzr425-0ubuntu1~16.04.1) xenial-proposed; urgency=medium
 
+  [ Scott Moser ]
   * debian/new-upstream-snapshot: add writing of debian changelog entries. 
 
- -- Scott Moser <smoser@ubuntu.com>  Mon, 03 Oct 2016 13:23:11 -0400
+  [ Ryan Harper ]
+  * New upstream snapshot.
+    - unittest,tox.ini: catch and fix issue with trusty-level mock of open 
+    - block/mdadm: add option to ignore mdadm_assemble errors  (LP: #1618429)
+    - curtin/doc: overhaul curtin documentation for readthedocs.org  (LP: #1351085)
+    - curtin.util: re-add support for RunInChroot  (LP: #1617375)
+    - curtin/net: overhaul of eni rendering to handle mixed ipv4/ipv6 configs 
+    - curtin.block: refactor clear_holders logic into block.clear_holders and cli cmd 
+    - curtin.apply_net should exit non-zero upon exception.  (LP: #1615780)
+    - apt: fix bug in disable_suites if sources.list line is blank. 
+    - vmtests: disable Wily in vmtests 
+    - Fix the unittests for test_apt_source. 
+    - get CURTIN_VMTEST_PARALLEL shown correctly in jenkins-runner output 
+    - fix vmtest check_file_strippedline to strip lines before comparing 
+    - fix whitespace damage in tests/vmtests/__init__.py 
+    - fix dpkg-reconfigure when debconf_selections was provided.  (LP: #1609614)
+    - fix apt tests on non-intel arch 
+    - Add apt features to curtin.  (LP: #1574113)
+    - vmtest: easier use of parallel and controlling timeouts 
+    - mkfs.vfat: add force flag for formating whole disks  (LP: #1597923)
+    - block.mkfs: fix sectorsize flag  (LP: #1597522)
+    - block_meta: cleanup use of sys_block_path and handle cciss knames  (LP: #1562249)
+    - block.get_blockdev_sector_size: handle _lsblock multi result return  (LP: #1598310)
+    - util: add target (chroot) support to subp, add target_path helper. 
+    - block_meta: fallback to parted if blkid does not produce output  (LP: #1524031)
+    - commands.block_wipe:  correct default wipe mode to 'superblock' 
+    - tox.ini: run coverage normally rather than separately 
+    - move uefi boot knowledge from launch and vmtest to xkvm 
+
+ -- Ryan Harper <ryan.harper@canonical.com>  Mon, 03 Oct 2016 13:43:54 -0500
 
 curtin (0.1.0~bzr399-0ubuntu1~16.04.1) xenial-proposed; urgency=medium
 

=== modified file 'doc/conf.py'
--- doc/conf.py	2015-10-02 16:19:07 +0000
+++ doc/conf.py	2016-10-03 18:55:20 +0000
@@ -13,6 +13,11 @@
 
 import sys, os
 
+# Fix path so we can import curtin.__version__
+sys.path.insert(1, os.path.realpath(os.path.join(
+                                    os.path.dirname(__file__), '..')))
+import curtin
+
 # If extensions (or modules to document with autodoc) are in another directory,
 # add these directories to sys.path here. If the directory is relative to the
 # documentation root, use os.path.abspath to make it absolute, like shown here.
@@ -41,16 +46,16 @@
 
 # General information about the project.
 project = u'curtin'
-copyright = u'2013, Scott Moser'
+copyright = u'2016, Scott Moser, Ryan Harper'
 
 # The version info for the project you're documenting, acts as replacement for
 # |version| and |release|, also used in various other places throughout the
 # built documents.
 #
 # The short X.Y version.
-version = '0.3'
+version = curtin.__version__
 # The full version, including alpha/beta/rc tags.
-release = '0.3'
+release = version
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
@@ -93,6 +98,18 @@
 # a list of builtin themes.
 html_theme = 'classic'
 
+# on_rtd is whether we are on readthedocs.org, this line of code grabbed from
+# docs.readthedocs.org
+on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
+
+if not on_rtd:  # only import and set the theme if we're building docs locally
+    import sphinx_rtd_theme
+    html_theme = 'sphinx_rtd_theme'
+    html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
+
+# otherwise, readthedocs.org uses their theme by default, so no need to specify
+# it
+
 # Theme options are theme-specific and customize the look and feel of a theme
 # further.  For a list of options available for each theme, see the
 # documentation.
@@ -120,7 +137,7 @@
 # Add any paths that contain custom static files (such as style sheets) here,
 # relative to this directory. They are copied after the builtin static files,
 # so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['static']
+#html_static_path = ['static']
 
 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
 # using the given strftime format.

=== removed file 'doc/devel/README-vmtest.txt'
--- doc/devel/README-vmtest.txt	2016-02-12 21:54:46 +0000
+++ doc/devel/README-vmtest.txt	1970-01-01 00:00:00 +0000
@@ -1,152 +0,0 @@
-== Background ==
-Curtin includes a mechanism called 'vmtest' that allows it to actually
-do installs and validate a number of configurations.
-
-The general flow of the vmtests is:
- 1. each test has an associated yaml config file for curtin in examples/tests
- 2. uses curtin-pack to create the user-data for cloud-init to trigger install
- 3. create and install a system using 'tools/launch'.
-    3.1 The install environment is booted from a maas ephemeral image.
-    3.2 kernel & initrd used are from maas images (not part of the image)
-    3.3 network by default is handled via user networking
-    3.4 It creates all empty disks required
-    3.5 cloud-init datasource is provided by launch
-      a) like: ds=nocloud-net;seedfrom=http://10.7.0.41:41518/
-         provided by python webserver start_http
-      b) via -drive file=/tmp/launch.8VOiOn/seed.img,if=virtio,media=cdrom
-         as a seed disk (if booted without external kernel)
-    3.6 dependencies and other preparations are installed at the beginning by
-        curtin inside the ephemeral image prior to configuring the target
- 4. power off the system.
- 5. configure a 'NoCloud' datasource seed image that provides scripts that
-    will run on first boot.
-    5.1 this will contain all our code to gather health data on the install
-    5.2 by cloud-init design this runs only once per instance, if you start
-        the system again this won't be called again
- 6. boot the installed system with 'tools/xkvm'.
-    6.1 reuses the disks that were installed/configured in the former steps
-    6.2 also adds an output disk
-    6.3 additionally the seed image for the data gathering is added
-    6.4 On this boot it will run the provided scripts, write their output to a
-        "data" disk and then shut itself down.
- 7. extract the data from the output disk
- 8. vmtest python code now verifies if the output is as expected.
-
-== Debugging ==
-At 3.1
-  - one can pull data out of the maas image with
-    sudo mount-image-callback your.img -- sh -c 'COMMAND'
-    e.g. sudo mount-image-callback your.img -- sh -c 'cp $MOUNTPOINT/boot/* .'
-At step 3.6 -> 4.
-  - tools/launch can be called in a way to give you console access
-    to do so just call tools/launch but drop the -serial=x parameter.
-    One might want to change "'power_state': {'mode': 'poweroff'}" to avoid
-    the auto reboot before getting control
-    Replace the directory usually seen in the launch calls with a clean fresh
-    directory
-  - In /curtin curtin and its config can be found
-  - if the system gets that far cloud-init will create a user ubuntu/passw0rd
-  - otherwise one can use a cloud-image from  https://cloud-images.ubuntu.com/
-    and add a backdoor user via
-    bzr branch lp:~maas-maintainers/maas/backdoor-image backdoor-image
-    sudo ./backdoor-image -v --user=<USER> --password-auth --password=<PW> IMG
-At step 6 -> 7
-  - You might want to keep all the temporary images around.
-    To do so you can set CURTIN_VMTEST_KEEP_DATA_PASS=all:
-    export CURTIN_VMTEST_KEEP_DATA_PASS=all CURTIN_VMTEST_KEEP_DATA_FAIL=all
-    That will keep the /tmp/tmpXXXXX directories and all files in there for
-    further execution.
-At step 7
-  - You might want to take a look at the output disk yourself.
-    It is a normal qcow image, so one can use mount-image-callback as described
-    above
-  - to invoke xkvm on your own take the command you see in the output and
-    remove the "-serial ..." but add -nographic instead
-    For graphical console one can add --vnc 127.0.0.1:1
-
-== Setup ==
-In order to run vmtest you'll need some dependencies.  To get them, you 
-can run:
-  make vmtest-deps
-
-That will install all necessary dependencies.
-
-== Running ==
-Running tests is done most simply by:
-
-  make vmtest
-
-If you wish to all tests in test_network.py, do so with:
-  sudo PATH=$PWD/tools:$PATH nosetests3 tests/vmtests/test_network.py
-
-Or run a single test with:
-  sudo PATH=$PWD/tools:$PATH nosetests3 tests/vmtests/test_network.py:WilyTestBasic
-
-Note:
-  * currently, the tests have to run as root.  The reason for this is that
-    the kernel and initramfs to boot are extracted from the maas ephemeral
-    image.  This should be fixed at some point, and then 'make vmtest'
-
-    The tests themselves don't actually have to run as root, but the
-    test setup does.
-  * the 'tools' directory must be in your path.
-  * test will set apt_proxy in the guests to the value of
-    'apt_proxy' environment variable.  If that is not set it will 
-    look at the host's apt config and read 'Acquire::HTTP::Proxy'
-
-== Environment Variables ==
-Some environment variables affect the running of vmtest
-  * apt_proxy: 
-    test will set apt_proxy in the guests to the value of 'apt_proxy'.
-    If that is not set it will look at the host's apt config and read
-    'Acquire::HTTP::Proxy'
-
-  * CURTIN_VMTEST_KEEP_DATA_PASS CURTIN_VMTEST_KEEP_DATA_FAIL:
-    default:
-      CURTIN_VMTEST_KEEP_DATA_PASS=none
-      CURTIN_VMTEST_KEEP_DATA_FAIL=all
-    These 2 variables determine what portions of the temporary
-    test data are kept.
-
-    The variables contain a comma ',' delimited list of directories
-    that should be kept in the case of pass or fail.  Additionally,
-    the values 'all' and 'none' are accepted.
-
-    Each vmtest that runs has its own sub-directory under the top level
-    CURTIN_VMTEST_TOPDIR.  In that directory are directories:
-      boot: inputs to the system boot (after install)
-      install: install phase related files
-      disks: the disks used for installation and boot
-      logs: install and boot logs
-      collect: data collected by the boot phase
-
-  * CURTIN_VMTEST_TOPDIR: default $TMPDIR/vmtest-<timestamp>
-    vmtest puts all test data under this value.  By default, it creates
-    a directory in TMPDIR (/tmp) named with as "vmtest-<timestamp>"
-
-    If you set this value, you must ensure that the directory is either
-    non-existant or clean.
-
-  * CURTIN_VMTEST_LOG: default $TMPDIR/vmtest-<timestamp>.log
-    vmtest writes extended log information to this file.
-    The default puts the log along side the TOPDIR.
-
-  * CURTIN_VMTEST_IMAGE_SYNC: default false (boolean)
-    if set to true, each run will attempt a sync of images.
-    If you want to make sure images are always up to date, then set to true.
-
-  * CURTIN_VMTEST_BRIDGE: default 'user'
-    the network devices will be attached to this bridge.  The default is
-    'user', which means to use qemu user mode networking.  Set it to
-    'virbr0' or 'lxcbr0' to use those bridges and then be able to ssh
-    in directly.
-
-  * IMAGE_DIR: default /srv/images
-    vmtest keeps a mirror of maas ephemeral images in this directory.
-
-  * IMAGES_TO_KEEP: default 1
-    keep this number of images of each release in the IMAGE_DIR.
-
-Environment 'boolean' values:
-   For boolean environment variables the value is considered True
-   if it is any value other than case insensitive 'false', '' or "0"

=== removed file 'doc/devel/README.txt'
--- doc/devel/README.txt	2015-03-11 13:19:43 +0000
+++ doc/devel/README.txt	1970-01-01 00:00:00 +0000
@@ -1,55 +0,0 @@
-## curtin development ##
-
-This document describes how to use kvm and ubuntu cloud images
-to develop curtin or test install configurations inside kvm.
-
-## get some dependencies ##
-sudo apt-get -qy install kvm libvirt-bin cloud-utils bzr
-
-## get cloud image to boot (-disk1.img) and one to install (-root.tar.gz)
-mkdir -p ~/download
-DLDIR=$( cd ~/download && pwd )
-rel="trusty"
-arch=amd64
-burl="http://cloud-images.ubuntu.com/$rel/current/"
-for f in $rel-server-cloudimg-${arch}-root.tar.gz $rel-server-cloudimg-${arch}-disk1.img; do
-  wget "$burl/$f" -O $DLDIR/$f; done
-( cd $DLDIR && qemu-img convert -O qcow $rel-server-cloudimg-${arch}-disk1.img $rel-server-cloudimg-${arch}-disk1.qcow2)
-
-BOOTIMG="$DLDIR/$rel-server-cloudimg-${arch}-disk1.qcow2"
-ROOTTGZ="$DLDIR/$rel-server-cloudimg-${arch}-root.tar.gz"
-
-## get curtin
-mkdir -p ~/src
-bzr init-repo ~/src/curtin
-( cd ~/src/curtin && bzr  branch lp:curtin trunk.dist )
-( cd ~/src/curtin && bzr branch trunk.dist trunk )
-
-## work with curtin
-cd ~/src/curtin/trunk
-# use 'launch' to launch a kvm instance with user data to pack
-# up local curtin and run it inside instance.
-./tools/launch $BOOTIMG --publish $ROOTTGZ -- curtin install "PUBURL/${ROOTTGZ##*/}"
-
-## notes about 'launch' ##
- * launch has --help so you can see that for some info.
- * '--publish' adds a web server at ${HTTP_PORT:-9923}
-   and puts the files you want available there.  You can reference
-   this url in config or cmdline with 'PUBURL'.  For example
-   '--publish foo.img' will put 'foo.img' at PUBURL/foo.img.
- * launch sets 'ubuntu' user password to 'passw0rd'
- * launch runs 'kvm -curses'
-   kvm -curses keyboard info:
-     'alt-2' to go to qemu console
- * launch puts serial console to 'serial.log' (look there for stuff)
- * when logged in
-   * you can look at /var/log/cloud-init-output.log
-   * archive should be extracted in /curtin
-   * shell archive should be in /var/lib/cloud/instance/scripts/part-002
- * when logged in, and archive available at
-
-
-## other notes ##
- * need to add '--install-deps' or something for curtin
-   cloud-image in 12.04 has no 'python3'
-   ideally 'curtin --install-deps install' would get the things it needs

=== added file 'doc/devel/clear_holders_doc.txt'
--- doc/devel/clear_holders_doc.txt	1970-01-01 00:00:00 +0000
+++ doc/devel/clear_holders_doc.txt	2016-10-03 18:55:20 +0000
@@ -0,0 +1,85 @@
+The new version of clear_holders is based around a data structure called a
+holder_tree which represents the current storage hirearchy above a specified
+starting device. Each node in a holders tree contains data about the node and a
+key 'holders' which contains a list of all nodes that depend on it. The keys in
+a holdres_tree node are:
+  - device: the path to the device in /sys/class/block
+  - dev_type: what type of storage layer the device is. possible values:
+      - disk
+      - lvm
+      - crypt
+      - raid
+      - bcache
+      - disk
+  - name: the kname of the device (used for display)
+  - holders: holders_trees for devices depending on the current device
+
+A holders tree can be generated for a device using the function
+clear_holders.gen_holders_tree. The device can be specified either as a path in
+/sys/class/block or as a path in /dev.
+
+The new implementation of block.clear_holders shuts down storage devices in a
+holders tree starting from the leaves of the tree and ascending towards the
+root. The old implementation of clear_holders ascended up each path of the tree
+separately, in a pattern similar to depth first search. The problem with the
+old implementation is that in some cases either an attempt would be made to
+remove one storage device while other devices depended on it or clear_holders
+would attempt to shut down the same storage device several times. In order to
+cope with this the old version of clear_holders had logic to handle expected
+failures and hope for the best moving forward. The new version of clear_holders
+is able to run without many anticipated failures.
+
+The logic to plan what order to shut down storage layers in is in
+clear_holders.plan_shutdown_holders_trees. This function accepts either a
+single holders tree or a list of holders trees. When run with a list of holders
+trees, it assumes that all of these trees start at basically the same layer in
+the overall storage hirearcy for the system (i.e. a list of holders trees
+starting from all of the target installation disks). This function returns a
+list of dictionaries, with each dictionary containing the keys:
+  - device: the path to the device in /sys/class/block
+  - dev_type: what type of storage layer the device is. possible values:
+      - disk
+      - lvm
+      - crypt
+      - raid
+      - bcache
+      - disk
+  - level: the level of the device in the current storage hirearchy
+           (starting from 0)
+
+The items in the list returned by clear_holders.plan_shutdown_holders_trees
+should be processed in order to make sure the holders trees are shutdown fully
+
+The main interface for clear_holders is the function
+clear_holders.clear_holders. If the system has just been booted it could be
+beneficial to run the function clear_holders.start_clear_holders_deps before
+using clear_holders.clear_holders. This ensures clear_holders will be able to
+properly storage devices. The function clear_holders.clear_holders can be
+passed either a single device or a list of devices and will shut down all
+storage devices above the device(s). The devices can be specified either by
+path in /dev or by path in /sys/class/block.
+
+In order to test if a device or devices are free to be partitioned/formatted,
+the function clear_holders.assert_clear can be passed either a single device or
+a list of devices, with devices specified either by path in /dev or by path in
+/sys/class/block. If there are any storage devices that depend on one of the
+devices passed to clear_holders.assert_clear, then an OSError will be raised.
+If clear_holders.assert_clear does not raise any errors, then the devices
+specified should be ready for partitioning.
+
+It is possible to query further information about storage devices using
+clear_holders.
+
+Holders for a individual device can be queried using clear_holders.get_holders.
+Results are returned as a list or knames for holding devices.
+
+A holders tree can be printed in a human readable format using
+clear_holders.format_holders_tree(). Example output:
+sda
+|-- sda1
+|-- sda2
+`-- sda5
+    `-- dm-0
+        |-- dm-1
+        `-- dm-2
+            `-- dm-3

=== modified file 'doc/index.rst'
--- doc/index.rst	2015-10-02 16:19:07 +0000
+++ doc/index.rst	2016-10-03 18:55:20 +0000
@@ -13,7 +13,13 @@
    :maxdepth: 2
 
    topics/overview
+   topics/config
+   topics/apt_source
+   topics/networking
+   topics/storage
    topics/reporting
+   topics/development
+   topics/integration-testing
 
 
 

=== added file 'doc/topics/apt_source.rst'
--- doc/topics/apt_source.rst	1970-01-01 00:00:00 +0000
+++ doc/topics/apt_source.rst	2016-10-03 18:55:20 +0000
@@ -0,0 +1,164 @@
+==========
+APT Source
+==========
+
+This part of curtin is meant to allow influencing the apt behaviour and configuration.
+
+By default - if no apt config is provided - it does nothing. That keeps behavior compatible on upgrades.
+
+The feature has an optional target argument which - by default - is used to modify the environment that curtin currently installs (@TARGET_MOUNT_POINT).
+
+Features
+~~~~~~~~
+
+* Add PGP keys to the APT trusted keyring
+
+  - add via short keyid
+
+  - add via long key fingerprint
+
+  - specify a custom keyserver to pull from
+
+  - add raw keys (which makes you independent of keyservers)
+
+* Influence global apt configuration
+
+  - adding ppa's
+
+  - replacing mirror, security mirror and release in sources.list
+
+  - able to provide a fully custom template for sources.list
+
+  - add arbitrary apt.conf settings
+
+  - provide debconf configurations
+
+  - disabling suites (=pockets)
+
+  - per architecture mirror definition
+
+
+Configuration
+~~~~~~~~~~~~~
+
+The general configuration of the apt feature is under an element called ``apt``.
+
+This can have various "global" subelements as listed in the examples below.
+The file ``apt-source.yaml`` holds more examples.
+
+These global configurations are valid throughput all of the apt feature.
+So for exmaple a global specification of a ``primary`` mirror will apply to all rendered sources entries.
+
+Then there is a section ``sources`` which can hold any number of source subelements itself.
+The key is the filename and will be prepended by /etc/apt/sources.list.d/ if it doesn't start with a ``/``.
+There are certain cases - where no content is written into a source.list file where the filename will be ignored - yet it can still be used as index for merging.
+
+The values inside the entries consist of the following optional entries
+
+* ``source``: a sources.list entry (some variable replacements apply)
+
+* ``keyid``: providing a key to import via shortid or fingerprint
+
+* ``key``: providing a raw PGP key
+
+* ``keyserver``: specify an alternate keyserver to pull keys from that were specified by keyid
+
+The section "sources" is is a dictionary (unlike most block/net configs which are lists). This format allows merging between multiple input files than a list like ::
+
+  sources:
+     s1: {'key': 'key1', 'source': 'source1'}
+
+  sources:
+     s2: {'key': 'key2'}
+     s1: {'keyserver': 'foo'}
+
+  This would be merged into
+     s1: {'key': 'key1', 'source': 'source1', keyserver: 'foo'}
+     s2: {'key': 'key2'}
+
+Here is just one of the most common examples for this feature: install with curtin in an isolated environment (derived repository):
+
+For that we need to:
+* insert the PGP key of the local repository to be trusted
+
+  - since you are locked down you can't pull from keyserver.ubuntu.com
+
+  - if you have an internal keyserver you could pull from there, but let us assume you don't even have that; so you have to provide the raw key
+
+  - in the example I'll use the key of the "Ubuntu CD Image Automatic Signing Key" which makes no sense as it is in the trusted keyring anyway, but it is a good example. (Also the key is shortened to stay readable)
+
+::
+
+      -----BEGIN PGP PUBLIC KEY BLOCK-----
+      Version: GnuPG v1
+      mQGiBEFEnz8RBAC7LstGsKD7McXZgd58oN68KquARLBl6rjA2vdhwl77KkPPOr3O
+      RwIbDAAKCRBAl26vQ30FtdxYAJsFjU+xbex7gevyGQ2/mhqidES4MwCggqQyo+w1
+      Twx6DKLF+3rF5nf1F3Q=
+      =PBAe
+      -----END PGP PUBLIC KEY BLOCK-----
+
+* replace the mirrors used to some mirrors available inside the isolated environment for apt to pull repository data from.
+
+  - lets consider we have a local mirror at ``mymirror.local`` but otherwise following the usual paths
+
+  - make an example with a partial mirror that doesn't mirror the backports suite, so backports have to be disabled
+
+That would be specified as ::
+
+  apt:
+    primary:
+      - arches [default]
+        uri: http://mymirror.local/ubuntu/
+    disable_suites: [backports]
+    sources:
+      localrepokey:
+        key: | # full key as block
+          -----BEGIN PGP PUBLIC KEY BLOCK-----
+          Version: GnuPG v1
+
+          mQGiBEFEnz8RBAC7LstGsKD7McXZgd58oN68KquARLBl6rjA2vdhwl77KkPPOr3O
+          RwIbDAAKCRBAl26vQ30FtdxYAJsFjU+xbex7gevyGQ2/mhqidES4MwCggqQyo+w1
+          Twx6DKLF+3rF5nf1F3Q=
+          =PBAe
+          -----END PGP PUBLIC KEY BLOCK-----
+
+The file examples/apt-source.yaml holds various further examples that can be configured with this feature.
+
+
+Common snippets
+~~~~~~~~~~~~~~~
+This is a collection of additional ideas people can use the feature for customizing their to-be-installed system.
+
+* enable proposed on installing
+
+::
+
+ apt:
+   sources:
+     proposed.list: deb $MIRROR $RELEASE-proposed main restricted universe multiverse
+
+* Make debug symbols available
+
+::
+
+ apt:
+   sources:
+     ddebs.list: |
+       deb http://ddebs.ubuntu.com $RELEASE main restricted universe multiverse
+       deb http://ddebs.ubuntu.com $RELEASE-updates main restricted universe multiverse
+       deb http://ddebs.ubuntu.com $RELEASE-security main restricted universe multiverse
+       deb http://ddebs.ubuntu.com $RELEASE-proposed main restricted universe multiverse
+
+Timing
+~~~~~~
+The feature is implemented at the stage of curthooks_commands, which runs just after curtin has extracted the image to the target.
+Additionally it can be ran as standalong command "curtin -v --config <yourconfigfile> apt-config".
+
+This will pick up the target from the environment variable that is set by curtin, if you want to use it to a different target or outside of usual curtin handling you can add ``--target <path>`` to it to overwrite the target path.
+This target should have at least a minimal system with apt, apt-add-repository and dpkg being installed for the functionality to work.
+
+
+Dependencies
+~~~~~~~~~~~~
+Cloud-init might need to resolve dependencies and install packages in the ephemeral environment to run curtin.
+Therefore it is recommended to not only provide an apt configuration to curtin for the target, but also one to the install environment via cloud-init.

=== added file 'doc/topics/config.rst'
--- doc/topics/config.rst	1970-01-01 00:00:00 +0000
+++ doc/topics/config.rst	2016-10-03 18:55:20 +0000
@@ -0,0 +1,551 @@
+====================
+Curtin Configuration
+====================
+
+Curtin exposes a number of configuration options for controlling Curtin
+behavior during installation.
+
+
+Configuration options
+---------------------
+Curtin's top level config keys are as follows:
+
+
+- apt_mirrors (``apt_mirrors``)
+- apt_proxy (``apt_proxy``)
+- block-meta (``block``)
+- debconf_selections (``debconf_selections``)
+- disable_overlayroot (``disable_overlayroot``)
+- grub (``grub``)
+- http_proxy (``http_proxy``)
+- install (``install``)
+- kernel (``kernel``)
+- kexec (``kexec``)
+- multipath (``multipath``)
+- network (``network``)
+- power_state (``power_state``)
+- reporting (``reporting``)
+- restore_dist_interfaces: (``restore_dist_interfaces``)
+- sources (``sources``)
+- stages (``stages``)
+- storage (``storage``)
+- swap (``swap``)
+- system_upgrade (``system_upgrade``)
+- write_files (``write_files``)
+
+
+apt_mirrors
+~~~~~~~~~~~
+Configure APT mirrors for ``ubuntu_archive`` and ``ubuntu_security``
+
+**ubuntu_archive**: *<http://local.archive/ubuntu>*
+
+**ubuntu_security**: *<http://local.archive/ubuntu>*
+
+If the target OS includes /etc/apt/sources.list, Curtin will replace
+the default values for each key set with the supplied mirror URL.
+
+**Example**::
+
+  apt_mirrors:
+    ubuntu_archive: http://local.archive/ubuntu
+    ubuntu_security: http://local.archive/ubuntu
+
+
+apt_proxy
+~~~~~~~~~
+Curtin will configure an APT HTTP proxy in the target OS
+
+**apt_proxy**: *<URL to APT proxy>*
+
+**Example**::
+
+  apt_proxy: http://squid.mirror:3267/
+
+
+block-meta
+~~~~~~~~~~
+Configure how Curtin selects and configures disks on the target
+system without providing a custom configuration (mode=simple).
+
+**devices**: *<List of block devices for use>*
+
+The ``devices`` parameter is a list of block device paths that Curtin may
+select from with choosing where to install the OS.
+
+**boot-partition**: *<dictionary of configuration>*
+
+The ``boot-partition`` parameter controls how to configure the boot partition
+with the following parameters:
+
+**enabled**: *<boolean>*
+
+Enabled will forcibly setup a partition on the target device for booting.
+
+**format**: *<['uefi', 'gpt', 'prep', 'mbr']>*
+
+Specify the partition format.  Some formats, like ``uefi`` and ``prep``
+are restricted by platform characteristics.
+
+**fstype**: *<filesystem type: one of ['ext3', 'ext4'], defaults to 'ext4'>*
+
+Specify the filesystem format on the boot partition.
+
+**label**: *<filesystem label: defaults to 'boot'>*
+
+Specify the filesystem label on the boot partition.
+
+**Example**::
+
+  block-meta:
+      devices:
+        - /dev/sda
+        - /dev/sdb
+      boot-partition:
+        - enabled: True
+          format: gpt
+          fstype: ext4
+          label: my-boot-partition
+
+
+debconf_selections
+~~~~~~~~~~~~~~~~~~
+Curtin will update the target with debconf set-selection values.  Users will
+need to be familiar with the package debconf options.  Users can probe a
+packages' debconf settings by using ``debconf-get-selections``.
+
+**selection_name**: *<debconf-set-selections input>*
+
+``debconf-set-selections`` is in the form::
+
+  <packagename> <packagename/option-name> <type> <value>
+
+**Example**::
+
+  debconf_selections:
+    set1: |
+      cloud-init cloud-init/datasources multiselect MAAS
+      lxd lxd/bridge-name string lxdbr0
+    set2: lxd lxd/setup-bridge boolean true
+
+
+
+disable_overlayroot
+~~~~~~~~~~~~~~~~~~~
+Curtin disables overlayroot in the target by default.
+
+**disable_overlayroot**: *<boolean: default True>*
+
+**Example**::
+
+  disable_overlayroot: False
+
+
+grub
+~~~~
+Curtin configures grub as the target machine's boot loader.  Users
+can control a few options to tailor how the system will boot after
+installation.
+
+**install_devices**: *<list of block device names to install grub>*
+
+Specify a list of devices onto which grub will attempt to install.
+
+**replace_linux_default**: *<boolean: default True>*
+
+Controls whether grub-install will update the Linux Default target
+value during installation.
+
+**update_nvram**: *<boolean: default False>*
+
+Certain platforms, like ``uefi`` and ``prep`` systems utilize
+NVRAM to hold boot configuration settings which control the order in
+which devices are booted.  Curtin by default will not attempt to
+update the NVRAM settings to preserve the system configuration.
+Users may want to force NVRAM to be updated such that the next boot
+of the system will boot from the installed device.
+
+**Example**::
+
+  grub:
+     install_devices:
+       - /dev/sda1
+     replace_linux_default: False
+     update_nvram: True
+
+
+http_proxy
+~~~~~~~~~~
+Curtin will export ``http_proxy`` value into the installer environment.
+
+**http_proxy**: *<HTTP Proxy URL>*
+
+**Example**::
+
+  http_proxy: http://squid.proxy:3728/
+
+
+
+install
+~~~~~~~
+Configure Curtin's install options.
+
+**log_file**: *<path to write Curtin's install.log data>*
+
+Curtin logs install progress by default to /var/log/curtin/install.log
+
+**post_files**: *<List of files to read from host to include in reporting data>*
+
+Curtin by default will post the ``log_file`` value to any configured reporter.
+
+**save_install_config**: *<Path to save merged curtin configuration file>*
+
+Curtin will save the merged configuration data into the target OS at 
+the path of ``save_install_config``.  This defaults to /root/curtin-install-cfg.yaml
+
+**Example**::
+
+  install:
+     log_file: /tmp/install.log
+     post_files:
+       - /tmp/install.log
+       - /var/log/syslog
+     save_install_config: /root/myconf.yaml
+
+
+kernel
+~~~~~~
+Configure how Curtin selects which kernel to install into the target image.
+If ``kernel`` is not configured, Curtin will use the default mapping below
+and determine which ``package`` value by looking up the current release
+and current kernel version running.
+
+
+**fallback-package**: *<kernel package-name to be used as fallback>*
+
+Specify a kernel package name to be used if the default package is not
+available.
+
+**mapping**: *<Dictionary mapping Ubuntu release to HWE kernel names>*
+
+Default mapping for Releases to package names is as follows::
+
+ precise:
+    3.2.0: 
+    3.5.0: -lts-quantal
+    3.8.0: -lts-raring
+    3.11.0: -lts-saucy
+    3.13.0: -lts-trusty
+  trusty:
+    3.13.0: 
+    3.16.0: -lts-utopic
+    3.19.0: -lts-vivid
+    4.2.0: -lts-wily
+    4.4.0: -lts-xenial
+  xenial:
+    4.3.0:
+    4.4.0:
+ 
+
+**package**: *<Linux kernel package name>*
+
+Specify the exact package to install in the target OS.
+
+**Example**::
+
+  kernel:
+    fallback-package: linux-image-generic
+    package: linux-image-generic-lts-xenial
+    mapping:
+      - xenial:
+        - 4.4.0: -my-custom-kernel    
+
+
+kexec
+~~~~~
+Curtin can use kexec to "reboot" into the target OS.
+
+**mode**: *<on>*
+
+Enable rebooting with kexec.
+
+**Example**::
+
+  kexec: on
+
+
+multipath
+~~~~~~~~~
+Curtin will detect and autoconfigure multipath by default to enable
+boot for systems with multipath.  Curtin does not apply any advanced
+configuration or tuning, rather it uses distro defaults and provides
+enough configuration to enable booting.
+
+**mode**: *<['auto', ['disabled']>*
+
+Defaults to auto which will configure enough to enable booting on multipath
+devices.  Disabled will prevent curtin from installing or configuring
+multipath.
+
+**overwrite_bindings**: *<boolean>*
+
+If ``overwrite_bindings`` is True then Curtin will generate new bindings
+file for multipath, overriding any existing binding in the target image.
+
+**Example**::
+
+  multipath:
+      mode: auto
+      overwrite_bindings: True
+
+
+network
+~~~~~~~
+Configure networking (see Networking section for details).
+
+**network_option_1**: *<option value>*
+
+**Example**::
+
+  network:
+     version: 1
+     config:
+       - type: physical
+         name: eth0
+         mac_address: "c0:d6:9f:2c:e8:80"
+         subnets:
+           - type: dhcp4
+
+
+power_state
+~~~~~~~~~~~
+Curtin can configure the target machine into a specific power state after
+completing an installation.  Default is to do nothing.
+
+**delay**: *<Integer seconds to delay change in state>*
+
+Curtin will wait ``delay`` seconds before changing the power state.
+
+**mode**: *<New power state is one of: [halt, poweroff, reboot]>*
+
+Curtin will transition the node into one of the new states listed.
+
+``halt`` will stop a machine, but may not cut the power to the system.
+``poweroff`` will stop a machine and request it shut off the power.
+``reboot`` will perform a platform reset.
+
+**message**:  *<message string>*
+
+The ``message`` string will be broadcast to system consoles prior to
+power state change.
+
+
+**Example**::
+
+  power_state:
+    mode: poweroff
+    delay: 5
+    message: Bye Bye
+
+
+reporting
+~~~~~~~~~
+Configure installation reporting (see Reporting section for details).
+
+**Example**::
+
+  reporting:
+    maas:
+      level: DEBUG
+      type: webhook
+      endpoint: http://localhost:8000/
+
+
+restore_dist_interfaces
+~~~~~~~~~~~~~~~~~~~~~~~
+Curtin can restore a copy of /etc/network/interfaces built in to cloud images.
+
+**restore_dist_interfaces**: *<boolean>*
+
+If True, then Curtin will restore the interfaces file into the target.
+
+
+**Example**::
+
+  restore_dist_interfaces: True
+
+
+sources
+~~~~~~~
+Specify the root image to install on to the target system.  The URI also
+configures the method used to copy the image to the target system.
+
+**sources**: *<List of source URIs>*
+
+``source URI`` may be one of:
+
+- **dd-**:  Use ``dd`` command to write image to target.
+- **cp://**: Use ``rsync`` command to copy source directory to target.
+- **file://**: Use ``tar`` command to extract source to target.
+- **http[s]://**: Use ``wget | tar`` commands to extract source to target.
+
+**Example Cloud-image**::
+
+  sources: 
+    - https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-root.tar.gz
+
+**Example Custom DD image**::
+
+  sources: 
+    - dd-img: https://localhost/raw_images/centos-6-3.img
+
+**Example Copy from booted environment**::
+
+  sources: 
+    - cp:///
+
+
+**Example Copy from local tarball**::
+
+  sources: 
+    - file:///tmp/root.tar.gz
+
+
+stages
+~~~~~~
+Curtin installation executes in stages.  At each stage, Curtin will look for
+a list of commands to run at each stage by reading in from the Curtin config
+*<stage_name>_commands* which is a dictionary and each key contains a list
+of commands to run.  Users may override the stages value to control
+what curtin stages execute.  During each stage, the commands are executed
+in C Locale sort order.  Users should name keys in a NN-XXX format where NN
+is a two-digit number to exercise control over execution order.
+
+The following stages are defined in Curtin and 
+run by default.
+
+- **early**: *Preparing for Installation*
+
+This stage runs before any actions are taken for installation.  By default
+this stage does nothing.
+
+- **partitioning**: *Select and partition disks for installation*
+
+This stage runs ``curtin block-meta simple`` by default.
+
+- **network**: *Probe and configure networking*
+
+This stage runs ``curtin net-meta auto`` by default.
+
+- **extract**: *Writing install sources to disk*
+
+This stage runs ``curtin extract`` by default.
+
+- **extract**: *Writing install sources to disk*
+
+This stage runs ``curtin extract`` by default.
+
+- **curthooks**: *Configuring installed system*
+
+This stage runs ``curtin curthooks`` by default.
+
+- **hooks**: *Finalizing installation*
+
+This stage runs ``curtin hook`` by default.
+
+- **late**: *Executing late commands*
+
+This stage runs after Curtin has completed the installation.  By default
+this stage does nothing.
+
+**Example Custom Stages**::
+
+  # Skip the whole install and just run `mystage`
+  stages: ['early', 'late', 'mystage']
+  mystage_commands:
+     00-cmd: ['/usr/bin/foo']
+
+**Example Early and Late commands**::
+
+  early_commands:
+      99-cmd:  ['echo', 'I ran last']
+      00-cmd:  ['echo', 'I ran first']
+  late_commands:
+      50-cmd: ['curtin', 'in-target' '--', 'touch', '/etc/disable_overlayroot']
+    
+
+swap
+~~~~
+Curtin can configure a swapfile on the filesystem in the target system.
+Size settings can be integer or string values with suffix.  Curtin
+supports the following suffixes which multiply the value.
+
+- **B**: *1*
+- **K[B]**: *1 << 10*
+- **M[B]**: *1 << 20*
+- **G[B]**: *1 << 30*
+- **T[B]**: *1 << 40*
+
+Curtin will use a heuristic to configure the swapfile size if the ``size``
+parameter is not set to a specific value.  The ``maxsize`` sets the upper
+bound of the heuristic calculation.
+
+**filename**: *<path to swap file>* 
+
+Configure the filename of the swap file. Defaults to /swap.img
+
+**maxsize**: *<Size string>*
+
+Configure the max size of the swapfile, defaults to 8GB
+
+**size**: *<Size string>*
+
+Configure the exact size of the swapfile.  Setting ``size`` to 0 will
+disable swap.
+
+**Example**::
+
+  swap:
+    filename: swap.img
+    size: None
+    maxsize: 4GB
+
+
+system_upgrade
+~~~~~~~~~~~~~~
+Control if Curtin runs `dist-upgrade` in target after install.  Defaults to
+False.
+
+**enabled**: *<boolean>*
+
+**Example**::
+
+  system_upgrade:
+    enabled: False
+
+
+write_files
+~~~~~~~~~~~
+Curtin supports writing out arbitrary data to a file.
+``write_files`` accepts a dictionary of entries formatted as follows:
+
+**path**: *<path and filename to save content>*
+
+Specify the name and location of where to write the content.
+
+**permissions**: *<Unix permission string>*
+
+Specify the permissions mode as an integer or string of numbers.
+
+**content**: *<data>*
+
+Specify the content.
+
+**Example**::
+
+  write_files:
+    f1:
+      path: /file1
+      content: !!binary |
+        f0VMRgIBAQAAAAAAAAAAAAIAPgABAAAAwARAAAAAAABAAAAAAAAAAJAVAAAAAAA
+    f2: {path: /file2, content: "foobar", permissions: '0666'}

=== added file 'doc/topics/development.rst'
--- doc/topics/development.rst	1970-01-01 00:00:00 +0000
+++ doc/topics/development.rst	2016-10-03 18:55:20 +0000
@@ -0,0 +1,68 @@
+=================
+Developing Curtin 
+=================
+
+Curtin developers make use of cloud-images and kvm to help develop and test new
+curtin features.
+
+Install dependencies
+====================
+
+Install some virtualization and cloud packages to get started.::
+
+  sudo apt-get -qy install kvm libvirt-bin cloud-utils bzr
+
+
+Download cloud images
+=====================
+Curtin will use two cloud images (-disk1.img) is for booting, 
+(-root.tar.gz) is used for installing.::
+
+  mkdir -p ~/download
+  DLDIR=$( cd ~/download && pwd )
+  rel="trusty"
+  arch=amd64
+  burl="http://cloud-images.ubuntu.com/$rel/current/"
+  for f in $rel-server-cloudimg-${arch}-root.tar.gz $rel-server-cloudimg-${arch}-disk1.img; do
+    wget "$burl/$f" -O $DLDIR/$f; done
+  ( cd $DLDIR && qemu-img convert -O qcow $rel-server-cloudimg-${arch}-disk1.img $rel-server-cloudimg-${arch}-disk1.qcow2)
+
+  export BOOTIMG="$DLDIR/$rel-server-cloudimg-${arch}-disk1.qcow2"
+  export ROOTTGZ="$DLDIR/$rel-server-cloudimg-${arch}-root.tar.gz"
+
+
+Getting the source
+==================
+Download curtin source from launchpad via `bzr` command.::
+
+  mkdir -p ~/src
+  bzr init-repo ~/src/curtin
+  ( cd ~/src/curtin && bzr  branch lp:curtin trunk.dist )
+  ( cd ~/src/curtin && bzr branch trunk.dist trunk )
+
+Using curtin
+============
+Use `launch` to launch a kvm instance with user data to pack up
+local curtin and run it inside an instance.::
+
+  cd ~/src/curtin/trunk
+  ./tools/launch $BOOTIMG --publish $ROOTTGZ -- curtin install "PUBURL/${ROOTTGZ##*/}"
+
+
+Notes about 'launch'
+====================
+
+- launch has --help so you can see that for some info.
+- `--publish` adds a web server at ${HTTP_PORT:-$RANDOM_PORT}
+  and puts the files you want available there.  You can reference
+  this url in config or cmdline with 'PUBURL'.  For example
+  `--publish foo.img` will put `foo.img` at PUBURL/foo.img.
+- launch sets 'ubuntu' user password to 'passw0rd'
+- launch runs 'kvm -curses'
+  kvm -curses keyboard info:
+  'alt-2' to go to qemu console
+- launch puts serial console to 'serial.log' (look there for stuff)
+- when logged in
+  - you can look at /var/log/cloud-init-output.log
+  - archive should be extracted in /curtin
+  - shell archive should be in /var/lib/cloud/instance/scripts/part-002

=== added file 'doc/topics/integration-testing.rst'
--- doc/topics/integration-testing.rst	1970-01-01 00:00:00 +0000
+++ doc/topics/integration-testing.rst	2016-10-03 18:55:20 +0000
@@ -0,0 +1,245 @@
+===================
+Integration Testing
+===================
+
+Curtin includes an in-tree integration suite that runs hundreds of tests
+validating various forms of custom storage and network configurations across
+all of the supported Ubuntu LTS releases as well as some of the currently 
+supported interim releases.
+
+Background
+==========
+
+Curtin includes a mechanism called 'vmtest' that allows it to actually
+do installs and validate a number of configurations.
+
+The general flow of the vmtests is:
+
+#. each test has an associated yaml config file for curtin in examples/tests
+#. uses curtin-pack to create the user-data for cloud-init to trigger install
+#. create and install a system using 'tools/launch'.
+
+   #. The install environment is booted from a maas ephemeral image.
+   #. kernel & initrd used are from maas images (not part of the image)
+   #. network by default is handled via user networking
+   #. It creates all empty disks required
+   #. cloud-init datasource is provided by launch
+
+      #. like: ds=nocloud-net;seedfrom=http://10.7.0.41:41518/
+         provided by python webserver start_http
+      #. via -drive file=/tmp/launch.8VOiOn/seed.img,if=virtio,media=cdrom
+         as a seed disk (if booted without external kernel)
+
+   #. dependencies and other preparations are installed at the beginning by
+      curtin inside the ephemeral image prior to configuring the target
+
+#. power off the system.
+#. configure a 'NoCloud' datasource seed image that provides scripts that
+   will run on first boot.
+
+   #. this will contain all our code to gather health data on the install
+   #. by cloud-init design this runs only once per instance, if you start
+      the system again this won't be called again
+
+#. boot the installed system with 'tools/xkvm'.
+
+   #. reuses the disks that were installed/configured in the former steps
+   #. also adds an output disk
+   #. additionally the seed image for the data gathering is added
+   #. On this boot it will run the provided scripts, write their output to a
+      "data" disk and then shut itself down.
+
+#. extract the data from the output disk
+#. vmtest python code now verifies if the output is as expected.
+
+Debugging
+=========
+
+At 3.1 one can pull data out of the maas image the command 
+``mount-image-callback``.  For example::
+
+  sudo mount-image-callback your.img -- sh -c 'cp $MOUNTPOINT/boot/* .'
+
+At step 3.6 through 4 ``tools/launch`` can be called in a way to give you
+console access.  To do so just call tools/launch but drop the -serial=x
+parameter.  One might want to change "'power_state': {'mode': 'poweroff'}" to
+avoid the auto reboot before getting control.  Replace the directory usually
+seen in the launch calls with a clean fresh directory.
+
+In /curtin curtin and its config can be found. If the system gets that far
+cloud-init will create a user creds: ubuntu/passw0rd , otherwise one can use a
+cloud-image from  https://cloud-images.ubuntu.com/ and add a backdoor user
+via::
+
+  bzr branch lp:~maas-maintainers/maas/backdoor-image backdoor-image
+  sudo ./backdoor-image -v --user=<USER> --password-auth --password=<PW> IMG
+
+At step 6 -> 7 you might want to keep all the temporary images around.  To do
+so you can set ``CURTIN_VMTEST_KEEP_DATA_PASS=all`` in your environment. ::
+
+  export CURTIN_VMTEST_KEEP_DATA_PASS=all CURTIN_VMTEST_KEEP_DATA_FAIL=all
+
+That will keep the /tmp/tmpXXXXX directories and all files in there for further
+execution.
+
+At step 7 you might want to take a look at the output disk yourself.  It is a
+normal qcow image, so one can use ``mount-image-callback`` as described above.
+
+To invoke xkvm on your own take the command you see in the output and remove
+the "-serial ..." but add ``-nographic`` instead For graphical console one can
+add ``--vnc 127.0.0.1:1``
+
+Setup
+=====
+
+In order to run vmtest you'll need some dependencies.  To get them, you 
+can run::
+
+  make vmtest-deps
+
+Running
+=======
+
+Running tests is done most simply by::
+
+  make vmtest
+
+If you wish to all tests in test_network.py, do so with::
+
+  nosetests3 tests/vmtests/test_network.py
+
+Or run a single test with::
+
+  nosetests3 tests/vmtests/test_network.py:WilyTestBasic
+
+
+Environment Variables
+=====================
+
+Some environment variables affect the running of vmtest
+
+- ``apt_proxy``:
+
+    test will set apt: { proxy } in the guests to the value of ``apt_proxy``
+    environment variable.  If that is not set it will look at the host's apt
+    config and read ``Acquire::HTTP::Proxy``
+
+- ``CURTIN_VMTEST_KEEP_DATA_PASS``: Defaults to none.
+- ``CURTIN_VMTEST_KEEP_DATA_FAIL``: Defaults to all.
+
+  These 2 variables determine what portions of the temporary
+  test data are kept.
+
+  The variables contain a comma ',' delimited list of directories
+  that should be kept in the case of pass or fail.  Additionally,
+  the values 'all' and 'none' are accepted.
+
+  Each vmtest that runs has its own sub-directory under the top level
+  ``CURTIN_VMTEST_TOPDIR``.  In that directory are directories:
+
+    - ``boot``: inputs to the system boot (after install)
+    - ``install``: install phase related files
+    - ``disks``: the disks used for installation and boot
+    - ``logs``: install and boot logs
+    - ``collect``: data collected by the boot phase
+
+- ``CURTIN_VMTEST_TOPDIR``: default $TMPDIR/vmtest-<timestamp>
+
+  Vmtest puts all test data under this value.  By default, it creates
+  a directory in TMPDIR (/tmp) named with as ``vmtest-<timestamp>``
+
+  If you set this value, you must ensure that the directory is either
+  non-existent or clean.
+
+- ``CURTIN_VMTEST_LOG``: default $TMPDIR/vmtest-<timestamp>.log
+
+  Vmtest writes extended log information to this file.
+  The default puts the log along side the TOPDIR.
+
+- ``CURTIN_VMTEST_IMAGE_SYNC``: default false (boolean)
+
+  If set to true, each run will attempt a sync of images.
+  If you want to make sure images are always up to date, then set to true.
+
+- ``CURTIN_VMTEST_BRIDGE``: ``user``
+
+  The network devices will be attached to this bridge.  The default is
+  ``user``, which means to use qemu user mode networking.  Set it to
+  ``virbr0`` or ``lxdbr0`` to use those bridges and then be able to ssh
+  in directly.
+
+- ``CURTIN_VMTEST_BOOT_TIMEOUT``: default 300
+
+    timeout before giving up on the boot of the installed system.
+
+- ``CURTIN_VMTEST_INSTALL_TIMEOUT``: default 3000
+
+    timeout before giving up on installation.
+
+- ``CURTIN_VMTEST_PARALLEL``: default ''
+
+    only supported through ./tools/jenkins-runner .
+
+    - ``-1``: then run one per core.
+    - ``0`` or ``''``: run with no parallel
+    - ``>0``: run with N processes
+
+    This modifies the  invocation of nosetets to add '--processes' and other
+    necessary nose arguments (--process-timeout)
+
+- ``IMAGE_DIR``: default /srv/images
+
+  Vmtest keeps a mirror of maas ephemeral images in this directory.
+
+- ``IMAGES_TO_KEEP``: default 1
+
+  Controls the number of images of each release retained in the IMAGE_DIR.
+
+Environment 'boolean' values
+============================
+
+For boolean environment variables the value is considered True
+if it is any value other than case insensitive 'false', '' or "0".
+
+Test Class Variables
+====================
+
+The base VMBaseClass defines several variables that help creating a new test
+easily. Among those the common ones are:
+
+Generic:
+
+- ``arch_skip``: 
+
+  If a test is not supported on an architecture it can list the arch in this
+  variable to auto-skip the test if executed on that arch.
+
+- ``conf_file``:
+
+  The configuration that will be processed by this vmtest.
+
+- ``extra_kern_args``:
+
+  Extra arguments to the guest kernel on boot.
+
+Data Collection:
+
+- ``collect_scripts``:
+
+  The commands run when booting into the installed environment to collect the
+  data for the test to verify a proper execution.
+
+- ``boot_cloudconf``:
+
+  Extra cloud-init config content for the install phase.  This allows to gather
+  content of the install phase if needed for test verification.
+
+Disk Setup:
+
+- ``disk_block_size``:
+
+  Default block size ``512`` bytes.
+
+- ``disk_driver``:
+
+  Default block device driver is ``virtio-blk``.

=== added file 'doc/topics/networking.rst'
--- doc/topics/networking.rst	1970-01-01 00:00:00 +0000
+++ doc/topics/networking.rst	2016-10-03 18:55:20 +0000
@@ -0,0 +1,522 @@
+==========
+Networking
+==========
+
+Curtin supports a user-configurable networking configuration format.
+This format lets users (including via MAAS) to customize their machines'
+networking interfaces by assigning subnet configuration, virtual device
+creation (bonds, bridges, vlans) routes and DNS configuration.
+
+Curtin accepts a YAML input under the top-level ``network`` key
+to indicate that a user would like to specify a custom networking
+configuration.  Required elements of a network configuration are
+``config`` and ``version``.  Currently curtin only supports 
+network config version=1. ::
+
+  network:
+    version: 1
+    config: []
+       
+Configuration Types
+-------------------
+Within the network ``config`` portion, users include a list of configuration
+types.  The current list of support ``type`` values are as follows:
+  
+- Physical (``physical``)
+- Bond (``bond``)
+- Bridge (``bridge``)
+- VLAN (``vlan``)
+- Nameserver (``nameserver``)
+- Route (``route``)
+
+Physical, Bond, Bridge and VLAN types may also include IP configuration under
+the key ``subnets``.
+
+- Subnet/IP (``subnets``)
+
+
+Physical
+~~~~~~~~
+The ``physical`` type configuration represents a "physical" network device,
+typically Ethernet-based.  At least one of of these entries is required for
+external network connectivity.  Type ``physical`` requires only one key:
+``name``.  A ``physical`` device may contain some or all of the following keys:
+
+**name**: *<desired device name>*
+
+A devices name must be less than 15 characters.  Names exceeding the maximum
+will be truncated. This is a limitation of the Linux kernel network-device
+structure.
+
+**mac_address**: *<MAC Address>*
+
+The MAC Address is a device unique identifier that most Ethernet-based network
+devices possess.  Specifying a MAC Address is optional.
+
+
+.. note::
+
+  Curtin will emit a udev rule to provide a persistent mapping between a
+  device's ``name`` and the ``mac_address``.
+
+**mtu**: *<MTU SizeBytes>* 
+
+The MTU key represents a device's Maximum Transmission Unit, the largest size
+packet or frame, specified in octets (eight-bit bytes), that can be sent in a
+packet- or frame-based network.  Specifying ``mtu`` is optional.
+
+.. note::
+
+  The possible supported values of a device's MTU is not available at
+  configuration time.  It's possible to specify a value too large or to
+  small for a device and may be ignored by the device.
+
+
+**Physical Example**::
+  
+  network:
+    version: 1
+    config:
+      # Simple network adapter
+      - type: physical
+        name: interface0
+        mac_address: 00:11:22:33:44:55
+      # Second nic with Jumbo frames
+      - type: physical
+        name: jumbo0
+        mac_address: aa:11:22:33:44:55
+        mtu: 9000
+      # 10G pair
+      - type: physical
+        name: gbe0
+        mac_address: cd:11:22:33:44:00
+      - type: physical
+        name: gbe1
+        mac_address: cd:11:22:33:44:02
+
+Bond
+~~~~
+A ``bond`` type will configure a Linux software Bond with one or more network
+devices.  A ``bond`` type requires the following keys:
+
+**name**: *<desired device name>*
+
+A devices name must be less than 15 characters.  Names exceeding the maximum
+will be truncated. This is a limitation of the Linux kernel network-device
+structure.
+
+**mac_address**: *<MAC Address>*
+
+When specifying MAC Address on a bond this value will be assigned to the bond
+device and may be different than the MAC address of any of the underlying 
+bond interfaces.  Specifying a MAC Address is optional.  If ``mac_address`` is
+not present, then the bond will use one of the MAC Address values from one of
+the bond interfaces.
+
+
+**bond_interfaces**: *<List of network device names>*
+
+The ``bond_interfaces`` key accepts a list of network device ``name`` values
+from the configuration.  This list may be empty.
+
+**params**:  *<Dictionary of key: value bonding parameter pairs>* 
+
+The ``params`` key in a bond holds a dictionary of bonding parameters.
+This dictionary may be empty. For more details on what the various bonding
+parameters mean please read the Linux Kernel Bonding.txt.
+
+Valid ``params`` keys are:
+
+  - ``active_slave``: Set bond attribute
+  - ``ad_actor_key``: Set bond attribute
+  - ``ad_actor_sys_prio``: Set bond attribute
+  - ``ad_actor_system``: Set bond attribute
+  - ``ad_aggregator``: Set bond attribute
+  - ``ad_num_ports``: Set bond attribute
+  - ``ad_partner_key``: Set bond attribute
+  - ``ad_partner_mac``: Set bond attribute
+  - ``ad_select``: Set bond attribute
+  - ``ad_user_port_key``: Set bond attribute
+  - ``all_slaves_active``: Set bond attribute
+  - ``arp_all_targets``: Set bond attribute
+  - ``arp_interval``: Set bond attribute
+  - ``arp_ip_target``: Set bond attribute
+  - ``arp_validate``: Set bond attribute
+  - ``downdelay``: Set bond attribute
+  - ``fail_over_mac``: Set bond attribute
+  - ``lacp_rate``: Set bond attribute
+  - ``lp_interval``: Set bond attribute
+  - ``miimon``: Set bond attribute
+  - ``mii_status``: Set bond attribute
+  - ``min_links``: Set bond attribute
+  - ``mode``: Set bond attribute
+  - ``num_grat_arp``: Set bond attribute
+  - ``num_unsol_na``: Set bond attribute
+  - ``packets_per_slave``: Set bond attribute
+  - ``primary``: Set bond attribute
+  - ``primary_reselect``: Set bond attribute
+  - ``queue_id``: Set bond attribute
+  - ``resend_igmp``: Set bond attribute
+  - ``slaves``: Set bond attribute
+  - ``tlb_dynamic_lb``: Set bond attribute
+  - ``updelay``: Set bond attribute
+  - ``use_carrier``: Set bond attribute
+  - ``xmit_hash_policy``: Set bond attribute
+ 
+**Bond Example**::
+
+   network:
+    version: 1
+    config:
+      # Simple network adapter
+      - type: physical
+        name: interface0
+        mac_address: 00:11:22:33:44:55
+      # 10G pair
+      - type: physical
+        name: gbe0
+        mac_address: cd:11:22:33:44:00
+      - type: physical
+        name: gbe1
+        mac_address: cd:11:22:33:44:02
+      - type: bond
+        name: bond0
+        bond_interfaces:
+          - gbe0
+          - gbe1
+        params:
+          bond-mode: active-backup
+ 
+Bridge
+~~~~~~
+Type ``bridge`` requires the following keys:
+
+- ``name``: Set the name of the bridge.
+- ``bridge_interfaces``: Specify the ports of a bridge via their ``name``.  This list may be empty.
+- ``params``:  A list of bridge params.  For more details, please read the bridge-utils-interfaces manpage.
+
+Valid keys are:
+
+  - ``bridge_ageing``: Set the bridge's ageing value.
+  - ``bridge_bridgeprio``: Set the bridge device network priority.
+  - ``bridge_fd``: Set the bridge's forward delay.
+  - ``bridge_hello``: Set the bridge's hello value.
+  - ``bridge_hw``: Set the bridge's MAC address.
+  - ``bridge_maxage``: Set the bridge's maxage value.
+  - ``bridge_maxwait``:  Set how long network scripts should wait for the bridge to be up.
+  - ``bridge_pathcost``:  Set the cost of a specific port on the bridge.
+  - ``bridge_portprio``:  Set the priority of a specific port on the bridge.
+  - ``bridge_ports``:  List of devices that are part of the bridge.
+  - ``bridge_stp``:  Set spanning tree protocol on or off.
+  - ``bridge_waitport``: Set amount of time in seconds to wait on specific ports to become available.
+
+
+**Bridge Example**::
+
+   network:
+    version: 1
+    config:
+      # Simple network adapter
+      - type: physical
+        name: interface0
+        mac_address: 00:11:22:33:44:55
+      # Second nic with Jumbo frames
+      - type: physical
+        name: jumbo0
+        mac_address: aa:11:22:33:44:55
+        mtu: 9000
+      - type: bridge
+        name: br0
+        bridge_interfaces:
+          - jumbo0
+        params:
+          bridge_ageing: 250
+		  bridge_bridgeprio: 22
+		  bridge_fd: 1
+          bridge_hello: 1
+          bridge_maxage: 10
+          bridge_maxwait: 0
+          bridge_pathcost:
+            - jumbo0 75
+          bridge_pathprio:
+            - jumbo0 28
+          bridge_stp: 'off'
+          bridge_maxwait:
+            - jumbo0 0
+
+  
+VLAN
+~~~~
+Type ``vlan`` requires the following keys:
+
+- ``name``: Set the name of the VLAN
+- ``vlan_link``: Specify the underlying link via its ``name``.
+- ``vlan_id``: Specify the VLAN numeric id.
+
+**VLAN Example**::
+
+   network:
+     version: 1
+     config:
+       # Physical interfaces.
+       - type: physical
+         name: eth0
+         mac_address: "c0:d6:9f:2c:e8:80"
+       # VLAN interface.
+       - type: vlan
+         name: eth0.101
+         vlan_link: eth0
+         vlan_id: 101
+         mtu: 1500
+
+Nameserver
+~~~~~~~~~~
+
+Users can specify a ``nameserver`` type.  Nameserver dictionaries include
+the following keys:
+
+- ``address``: List of IPv4 or IPv6 address of nameservers.
+- ``search``: List of of hostnames to include in the resolv.conf search path.
+
+**Nameserver Example**::
+
+  network:
+    version: 1
+    config:
+      - type: physical
+        name: interface0
+        mac_address: 00:11:22:33:44:55
+        subnets:
+           - type: static
+             address: 192.168.23.14/27
+             gateway: 192.168.23.1
+      - type: nameserver:
+        address: 
+          - 192.168.23.2
+          - 8.8.8.8
+        search:
+          - exemplary
+
+     
+
+Route
+~~~~~
+
+Users can include static routing information as well.  A ``route`` dictionary
+has the following keys:
+
+- ``destination``: IPv4 network address with CIDR netmask notation.
+- ``gateway``: IPv4 gateway address with CIDR netmask notation.
+- ``metric``: Integer which sets the network metric value for this route.
+
+**Route Example**::
+
+  network:
+    version: 1
+    config:
+      - type: physical
+        name: interface0
+        mac_address: 00:11:22:33:44:55
+        subnets:
+           - type: static
+             address: 192.168.23.14/24
+             gateway: 192.168.23.1
+      - type: route
+        destination: 192.168.24.0/24
+        gateway: 192.168.24.1
+        metric: 3
+
+Subnet/IP
+~~~~~~~~~
+
+For any network device (one of the Config Types) users can define a list of
+``subnets`` which contain ip configuration dictionaries.  Multiple subnet
+entries will create interface alias allowing a single interface to use different
+ip configurations.  
+
+Valid keys for for ``subnets`` include the following:
+
+- ``type``: Specify the subnet type.
+- ``control``: Specify manual, auto or hotplug.  Indicates how the interface will be handled during boot.
+- ``address``: IPv4 or IPv6 address.  It may include CIDR netmask notation.
+- ``netmask``: IPv4 subnet mask in dotted format or CIDR notation.
+- ``gateway``: IPv4 address of the default gateway for this subnet.
+- ``dns_nameserver``: Specify a list of IPv4 dns server IPs to end up in resolv.conf.
+- ``dns_search``: Specify a list of search paths to be included in resolv.conf.
+
+
+Subnet types are one of the following:
+
+- ``dhcp4``: Configure this interface with IPv4 dhcp.
+- ``dhcp``: Alias for ``dhcp4``
+- ``dhcp6``: Configure this interface with IPv6 dhcp.
+- ``static``: Configure this interface with a static IPv4.
+- ``static6``: Configure this interface with a static IPv6 .
+
+When making use of ``dhcp`` types, no additional configuration is needed in the
+subnet dictionary.
+
+
+**Subnet DHCP Example**::
+
+   network:
+     version: 1
+     config:
+       - type: physical
+         name: interface0
+         mac_address: 00:11:22:33:44:55
+         subnets:
+           - type: dhcp
+
+
+**Subnet Static Example**::
+
+   network:
+     version: 1
+     config:
+       - type: physical
+         name: interface0
+         mac_address: 00:11:22:33:44:55
+         subnets:
+           - type: static
+             address: 192.168.23.14/27
+             gateway: 192.168.23.1
+             dns_nameservers:
+               - 192.168.23.2
+               - 8.8.8.8
+             dns_search:
+               - exemplary.maas
+
+The following will result in an ``interface0`` using DHCP and ``interface0:1``
+using the static subnet configuration.
+
+**Multiple subnet Example**::
+
+   network:
+     version: 1
+     config:
+       - type: physical
+         name: interface0
+         mac_address: 00:11:22:33:44:55
+         subnets:
+           - type: dhcp
+           - type: static
+             address: 192.168.23.14/27
+             gateway: 192.168.23.1
+             dns_nameservers:
+               - 192.168.23.2
+               - 8.8.8.8
+             dns_search:
+               - exemplary
+
+
+Multi-layered configurations
+----------------------------
+
+Complex networking sometimes uses layers of configuration.  The syntax allows
+users to build those layers one at a time.  All of the virtual network devices
+supported allow specifying an underlying device by their ``name`` value.
+
+**Bonded VLAN Example**::
+
+  network:
+    version: 1
+    config:
+      # 10G pair
+      - type: physical
+        name: gbe0
+        mac_address: cd:11:22:33:44:00
+      - type: physical
+        name: gbe1
+        mac_address: cd:11:22:33:44:02
+      # Bond.
+      - type: bond
+        name: bond0
+        bond_interfaces:
+          - gbe0
+          - gbe1
+        params:
+          bond-mode: 802.3ad
+          bond-lacp-rate: fast
+      # A Bond VLAN.
+      - type: vlan
+          name: bond0.200
+          vlan_link: bond0
+          vlan_id: 200
+          subnets:
+              - type: dhcp4
+
+More Examples
+-------------
+Some more examples to explore the various options available.
+
+**Multiple VLAN example**::
+
+  network:
+    version: 1
+    config:
+    - id: eth0
+      mac_address: d4:be:d9:a8:49:13
+      mtu: 1500
+      name: eth0
+      subnets:
+      - address: 10.245.168.16/21
+        dns_nameservers:
+        - 10.245.168.2
+        gateway: 10.245.168.1
+        type: static
+      type: physical
+    - id: eth1
+      mac_address: d4:be:d9:a8:49:15
+      mtu: 1500
+      name: eth1
+      subnets:
+      - address: 10.245.188.2/24
+        dns_nameservers: []
+        type: static
+      type: physical
+    - id: eth1.2667
+      mtu: 1500
+      name: eth1.2667
+      subnets:
+      - address: 10.245.184.2/24
+        dns_nameservers: []
+        type: static
+      type: vlan
+      vlan_id: 2667
+      vlan_link: eth1
+    - id: eth1.2668
+      mtu: 1500
+      name: eth1.2668
+      subnets:
+      - address: 10.245.185.1/24
+        dns_nameservers: []
+        type: static
+      type: vlan
+      vlan_id: 2668
+      vlan_link: eth1
+    - id: eth1.2669
+      mtu: 1500
+      name: eth1.2669
+      subnets:
+      - address: 10.245.186.1/24
+        dns_nameservers: []
+        type: static
+      type: vlan
+      vlan_id: 2669
+      vlan_link: eth1
+    - id: eth1.2670
+      mtu: 1500
+      name: eth1.2670
+      subnets:
+      - address: 10.245.187.2/24
+        dns_nameservers: []
+        type: static
+      type: vlan
+      vlan_id: 2670
+      vlan_link: eth1
+    - address: 10.245.168.2
+      search:
+      - dellstack
+      type: nameserver
+

=== modified file 'doc/topics/overview.rst'
--- doc/topics/overview.rst	2013-07-29 16:12:09 +0000
+++ doc/topics/overview.rst	2016-10-03 18:55:20 +0000
@@ -20,7 +20,7 @@
 ~~~~~~~~~~~~~~~~~~~~~~~~
 At the moment, curtin doesn't address how the system that it is running on is booted.  It could be booted from a live-cd or from a pxe boot environment.  It could even be booted off a disk in the system (although installation to that disk would probably break things).
 
-Curtin's assumption is that a fairly rich linux (Ubuntu) environment is booted.
+Curtin's assumption is that a fairly rich Linux (Ubuntu) environment is booted.
 
 Early Commands
 ~~~~~~~~~~~~~~
@@ -38,13 +38,13 @@
 
 Partitioning
 ~~~~~~~~~~~~
-Partitioning covers setting up filesystems on the system.  A series of commands are run serially in order.  At the end, a fstab formated file must be populated in ``OUTPUT_FSTAB`` that contains mount information, and the filesystems are expected to be mounted at the ``TARGET_MOUNT_POINT``.
+Partitioning covers setting up filesystems on the system.  A series of commands are run serially in order.  At the end, a fstab formatted file must be populated in ``OUTPUT_FSTAB`` that contains mount information, and the filesystems are expected to be mounted at the ``TARGET_MOUNT_POINT``.
 
 Any commands can be used to create this filesystem, but curtin contains some tools to facilitate with this process.
 
 **Config Example**::
 
- paritioning_commands:
+ partitioning_commands:
   10_wipe_filesystems: curtin wipe --quick --all-unused-disks
   50_setup_raid: curtin disk-setup --all-disks raid0 /
 
@@ -53,7 +53,7 @@
 Partitioning commands have the following environment variables available to them:
 
 - ``WORKING_DIR``: This is simply for some sort of inter-command state.  It will be the same directory for each command run and will only be deleted at the end of all partitioning_commands.
-- ``OUTPUT_FSTAB``: This is the target path for a fstab file.  After all partitioning commands have been run, a file should exist, formated per fstab(5) that describes how the filesystems should be mounted.
+- ``OUTPUT_FSTAB``: This is the target path for a fstab file.  After all partitioning commands have been run, a file should exist, formatted per fstab(5) that describes how the filesystems should be mounted.
 - ``TARGET_MOUNT_POINT``:
 
 
@@ -61,7 +61,7 @@
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~
 Networking is done in a similar fashion to partitioning.  A series of commands, specified in the config are run.  At the end of these commands, a interfaces(5) style file is expected to be written to ``OUTPUT_INTERFACES``.
 
-Note, that as with fstab, this file is not copied verbatum to the target filesystem, but rather made availble to the OS customization stage.  That stage may just copy the file verbatum, but may also parse it, and use that as input.
+Note, that as with fstab, this file is not copied verbatim to the target filesystem, but rather made available to the OS customization stage.  That stage may just copy the file verbatim, but may also parse it, and use that as input.
 
 **Config Example**::
 
@@ -73,7 +73,7 @@
 Networking commands have the following environment variables available to them:
 
 - ``WORKING_DIR``: This is simply for some sort of inter-command state.  It will be the same directory for each command run and will only be deleted at the end of all network_commands.
-- ``OUTPUT_INTERFACES``: This is the target path for an interfaces style file. After all commands have been run, a file should exist, formated per interfaces(5) that describes the systems network setup.
+- ``OUTPUT_INTERFACES``: This is the target path for an interfaces style file. After all commands have been run, a file should exist, formatted per interfaces(5) that describes the systems network setup.
 
 Extraction of sources
 ~~~~~~~~~~~~~~~~~~~~~
@@ -84,7 +84,7 @@
  sources:
   05_primary: http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-amd64-root.tar.gz
 
-Given the source above, curtin will essentiall do a::
+Given the source above, curtin will essentially do a::
 
  wget $URL | tar -Sxvzf 
 

=== modified file 'doc/topics/reporting.rst'
--- doc/topics/reporting.rst	2016-05-10 16:13:29 +0000
+++ doc/topics/reporting.rst	2016-10-03 18:55:20 +0000
@@ -7,7 +7,7 @@
 
 Events
 ------
-Reporting consists of notifcation of a series of 'events.  Each event has:
+Reporting consists of notification of a series of 'events.  Each event has:
  - **event_type**: 'start' or 'finish'
  - **description**: human readable text
  - **name**: and id for this event
@@ -48,7 +48,7 @@
 Each entry must have a 'type'.  The currently supported values are:
  - **log**: logs via python logger
  - **print**: prints messages to stdout (for debugging)
- - **webhook**: posts json formated data to a remote url.  Supports Oauth.
+ - **webhook**: posts json formatted data to a remote url.  Supports Oauth.
 
 
 Additionally, the webhook reporter will post files on finish of curtin.  The user can declare which files should be posted in the ``install`` item via ``post_files`` as shown above.  If post_files is not present, it will default to the value of log_file.
@@ -154,7 +154,7 @@
 Legacy Reporter
 ---------------
 The legacy 'reporter' config entry is still supported.  This was utilized by
-MAAS for start/end and posting of the install log at the end of isntallation.
+MAAS for start/end and posting of the install log at the end of installation.
 
 Its configuration looks like this:
 

=== added file 'doc/topics/storage.rst'
--- doc/topics/storage.rst	1970-01-01 00:00:00 +0000
+++ doc/topics/storage.rst	2016-10-03 18:55:20 +0000
@@ -0,0 +1,894 @@
+=======
+Storage
+=======
+
+Curtin supports a user-configurable storage layout.  This format lets users
+(including via MAAS) to customize their machines' storage configuration by
+creating partitions, RAIDs, LVMs, formatting with file systems and setting
+mount points.
+
+Custom storage configuration is handled by the ``block-meta custom`` command
+in curtin. Partitioning layout is read as a list of in-order modifications to
+make to achieve the desired configuration. The top level configuration key
+containing this configuration is ``storage``. This key should contain a
+dictionary with at least a version number and the configuration list. The
+current config specification is ``version: 1``.
+
+**Config Example**::
+
+ storage:
+   version: 1
+   config:
+     - id: sda
+       type: disk
+       ptable: gpt
+       serial: QM00002
+       model: QEMU_HARDDISK
+
+Configuration Types
+-------------------
+Each entry in the config list is a dictionary with several keys which vary
+between commands. The two dictionary keys that every entry in the list needs
+to have are ``id: <id>`` and ``type: <type>``.
+
+An entry's ``id`` allows other entries in the config to refer to a specific
+entry. It can be any string other than one with a special meaning in yaml, such
+as ``true`` or ``none``.
+
+An entry's ``type`` tells curtin how to handle a particular entry. Available
+commands include:
+
+- Disk Command (``disk``)
+- Partition Command (``partition``)
+- Format Command (``format``)
+- Mount Command  (``mount``)
+- LVM_VolGroup Command (``lvm_volgroup``)
+- LVM_Partition Command (``lvm_partition``)
+- DM_Crypt Command (``dm_crypt``)
+- RAID Command (``raid``)
+- Bcache Command (``bcache``)
+
+Disk Command
+~~~~~~~~~~~~
+The disk command sets up disks for use by curtin. It can wipe the disks, create
+partition tables, or just verify that the disks exist with an existing partition
+table. A disk command may contain all or some of the following keys:
+
+**ptable**: *msdos, gpt*
+
+If the ``ptable`` key is present and a valid type of partition table, curtin
+will create an empty partition table of that type on the disk.  At the moment,
+msdos and gpt partition tables are supported.
+
+**serial**: *<serial number>*
+
+In order to uniquely identify a disk on the system its serial number should be
+specified. This ensures that even if additional storage devices
+are added to the system during installation, or udev rules cause the path to a
+disk to change curtin will still be able to correctly identify the disk it
+should be operating on using ``/dev/disk/by-id``.
+
+This is the preferred way to identify a disk and should be used in all
+production environments as it is less likely to point to an incorrect device.
+
+**path**: *<path to device with leading /dev*
+
+The ``path`` key can be used to identify the disk.  If both ``serial`` and
+``path`` are specified, curtin will use the serial number and ignore the path
+that was specified.
+
+**model**: *<disk model>*
+
+This can specify the manufacturer or model of the disk. It is not currently
+used by curtin, but can be useful for a human reading a config file. Future
+versions of curtin may make use of this information.
+
+**wipe**: *superblock, superblock-recursive, zero, random*
+
+If wipe is specified, **the disk contents will be destroyed**.  In the case that
+a disk is a part of virtual block device, like bcache, RAID array, or LVM, then
+curtin will attempt to tear down the virtual device to allow access to the disk
+for resetting the disk.
+
+The most common option for clearing a disk is  ``wipe: superblock``.  In some
+cases use of ``wipe: superblock-recursive`` is useful to ensure that embedded
+superblocks on a disk aren't rediscovered during probing.  For example, LVM,
+bcache and RAID on a partition would have metadata outside of the range of a
+superblock wipe of the start and end sections of the disk.
+
+The ``wipe: zero`` option will write zeros to each sector of the disk.
+Depending on the size and speed of the disk; it may take a long time to
+complete.
+
+The ``wipe: random`` option will write pseudo-random data from /dev/urandom
+Depending on the size and speed of the disk; it may take a long time to
+complete.
+
+**preserve**: *true, false*
+
+When the preserve key is present and set to ``true`` curtin will attempt
+to use the disk without damaging data present on it. If ``preserve`` is set and
+``ptable`` is also set, then curtin will validate that the partition table
+specified by ``ptable`` exists on the disk and will raise an error if it does
+not. If ``preserve`` is set and ``ptable`` is not, then curtin will be able to
+use the disk in later commands, but will not check if the disk has a valid
+partition table, and will only verify that the disk exists.
+
+It can be dangerous to try to move or re-size filesystems and partitions
+containing data that needs to be preserved. Therefor curtin does not support
+preserving a disk without also preserving the partitions on it. If a disk is
+set to be preserved and curtin is told to move a partition on that disk,
+installation will stop. It is still possible to reformat partitions that do
+not need to be preserved.
+
+**name**: *<name>*
+
+If the ``name`` key is present, curtin will create a udev rule that makes a
+symbolic link to the disk with the given name value. This makes it easy to find
+disks on an installed system. The links are created in
+``/dev/disk/by-dname/<name>``.
+A link to each partition on the disk will also be created at
+``/dev/disk/by-dname/<name>-part<number>``, so if ``name: maindisk`` is set,
+the disk will be at ``/dev/disk/by-dname/maindisk`` and the first partition on
+it will be at ``/dev/disk/by-dname/maindisk-part1``.
+
+**grub_device**: *true, false*
+
+If the ``grub_device`` key is present and set to true, then when post
+installation hooks are run grub will be installed onto this disk. In most
+situations it is not necessary to specify this value as curtin will detect
+and determine which device to use as a boot disk.  In cases where the boot
+device is on a special volume, such as a RAID array or a LVM Logical Volume,
+it may be necessary to specify the device that will hold the grub bootloader.
+
+**Config Example**::
+
+ - id: disk0
+   type: disk
+   ptable: gpt
+   serial: QM00002
+   model: QEMU_HARDDISK
+   name: maindisk
+   wipe: superblock
+
+Partition Command
+~~~~~~~~~~~~~~~~~
+The partition command creates a single partition on a disk. Curtin only needs
+to be told which disk to use and the size of the partition.  Additional options
+are available.
+
+**number**: *<number>*
+
+The partition number can be specified using ``number``. However, numbers must
+be in order and some situations, such as extended/logical partitions on msdos
+partition tables will require special numbering, so it maybe better to omit 
+the partition number. If the ``number`` key is not present, curtin will attempt
+determine the right number to use.
+
+**size**: *<size>*
+
+The partition size can be specified with the ``size`` key. Sizes must be
+given with an appropriate SI unit, such as *B, kB, MB, GB, TB*, or using just
+the appropriate SI prefix, i.e. *B, k, M, G, T...*
+
+.. note::
+
+  Curtin does not adjust size values.  If you specific a size that exceeds the 
+  capacity of a device then installation will fail.
+
+**device**: *<device id>*
+
+The ``device`` key refers to the ``id`` of a disk in the storage configuration.
+The disk entry must already be defined in the list of commands to ensure that
+it has already been processed.
+
+**wipe**: *superblock, pvremove, zero, random*
+
+After the partition is added to the disk's partition table, curtin can run a
+wipe command on the partition. The wipe command values are the sames as for
+disks.
+
+**flag**: *logical, extended, boot, bios_grub, swap, lvm, raid, home, prep*
+
+If the ``flag`` key is present, curtin will set the specified flag on the
+partition. Note that some flags only apply to msdos partition tables, and some
+only apply to gpt partition tables.
+
+The *logical/extended* partition flags can be used to create logical partitions
+on a msdos table. An extended partition should be created containing all of the
+empty space on the drive, and logical partitions can be created within it. A
+extended partition must already be present to create logical partitions. If the
+``number`` flag is set for an extended partition it must be set to 4, and
+each logical partition should be numbered starting from 5.
+
+On msdos partition tables, the *boot* flag sets the boot parameter to that
+partition. On gpt partition tables, the boot flag sets the esp flag on the
+partition.
+
+If the host system for curtin has been booted using UEFI then curtin will
+install grub to the esp partition. If the system installation media
+has been booted using an MBR, grub will be installed onto the disk's MBR.
+However, on a disk with a gpt partition table, there is not enough space after
+the MBR for grub to store its second stage core.img, so a small un-formatted
+partition with the *bios_grub* flag is needed. This partition should be placed
+at the beginning of the disk and should be 1MB in size. It should not contain a
+filesystem or be mounted anywhere on the system.
+
+**preserve**: *true, false*
+
+If the preserve flag is set to true, curtin will verify that the partition
+exists and will not modify the partition.
+
+**Config Example**::
+
+ - id: disk0-part1
+   type: partition
+   number: 1
+   size: 8GB
+   device: disk0
+   flag: boot
+
+Format Command
+~~~~~~~~~~~~~~
+The format command makes filesystems on a volume. The filesystem type and
+target volume can be specified, as well as a few other options.
+
+**fstype**: ext4, ext3, fat32, fat16, swap, xfs
+
+The ``fstype`` key specifies what type of filesystem format curtin should use
+for this volume. Curtin knows about common Linux filesystems such as ext4/3 and
+fat filesystems and makes use of additional parameters and flags to optimize the
+filesystem.  If the ``fstype`` value is not known to curtin, that is not fatal.
+Curtin will check if ``mkfs.<fstype>`` exists and if so,  will use that tool to
+format the target volume.
+
+For fat filesystems, the size of the fat table can be specified by entering
+*fat64*, *fat32*, *fat16*, or *fat12* instead of just entering *fat*.
+If *fat* is used, then ``mkfs.fat`` will automatically determine the best
+size fat table to use, probably *fat32*.
+
+If ``fstype: swap`` is set, curtin will create a swap partition on the target
+volume.
+
+**volume**: *<volume id>*
+
+The ``volume`` key refers to the ``id`` of the target volume in the storage
+config.  The target volume must already exist and be accessible. Any type
+of target volume can be used as long as it has a block device that curtin
+can locate.
+
+**label**: *<volume name>*
+
+The ``label`` key tells curtin to create a filesystem LABEL when formatting a
+volume. Note that not all filesystem types support names and that there are
+length limits for names. For fat filesystems, names are limited to 11
+characters. For ext4/3 filesystems, names are limited to 16 characters.
+
+If curtin does not know about the filesystem type it is using, then the
+``label`` key will be ignored, because curtin will not know the correct flags
+to set the label value in the filesystem metadata.
+
+**uuid**: *<uuid>*
+
+If the ``uuid`` key is set and ``fstype`` is set to *ext4* or *ext3*, then
+curtin will set the uuid of the new filesystem to the specified value.
+
+**preserve**: *true, false*
+
+If the ``preserve`` key is set to true, curtin will not format the partition.
+
+**Config Example**::
+
+ - id: disk0-part1-fs1
+   type: format
+   fstype: ext4
+   label: cloud-image
+   volume: disk0-part1
+
+Mount Command
+~~~~~~~~~~~~~
+The mount command mounts the target filesystem and creates an entry for it in
+the newly installed system's ``/etc/fstab``. The path to the target mountpoint
+must be specified as well as the target filesystem.
+
+**path**: *<path>*
+
+The ``path`` key tells curtin where the filesystem should be mounted on the
+target system. An entry in the target system's ``/etc/fstab`` will be created
+for the target device which will mount it in the correct place once the
+installed system boots.
+
+If the device specified is formatted as swap space, then an entry will be added
+to the target system's ``/etc/fstab`` to make use of this swap space.
+
+When entries are created in ``/etc/fstab``, curtin will use the most reliable
+method available to identify each device. For regular partitions, curtin will
+use the UUID of the filesystem present on the partition. For special devices,
+such as RAID arrays, or LVM logical volumes, curtin will use their normal path
+in ``/dev``.
+
+**device**: *<device id>*
+
+The ``device`` key refers to the ``id`` of the target device in the storage
+config. The target device must already contain a valid filesystem and be
+accessible.
+
+**Config Example**::
+
+ - id: disk0-part1-fs1-mount0
+   type: mount
+   path: /home
+   device: disk0-part1-fs1
+
+Lvm Volgroup Command
+~~~~~~~~~~~~~~~~~~~~
+The lvm_volgroup command creates LVM Physical Volumes (PV) and connects them in
+a LVM Volume Group (vg). The command requires a name for the volgroup and a
+list of the devices that should be used as physical volumes.
+
+**name**: *<name>*
+
+The ``name`` key specifies the name of the volume group.  It anything can be
+used except words with special meanings in YAML, such as *true*, or *none*.
+
+**devices**: *[]*
+
+The ``devices`` key gives a list of devices to use as physical volumes. Each
+device is specified using the ``id`` of existing devices in the storage config.
+Almost anything can be used as a device such as partitions, whole disks, RAID.
+
+**Config Example**::
+
+ - id: volgroup1
+   type: lvm_volgroup
+   name: vg1
+   devices:
+     - disk0-part2
+     - disk1
+
+Lvm Partition Command
+~~~~~~~~~~~~~~~~~~~~~
+The lvm_partition command creates a lvm logical volume on the specified
+volgroup with the specified size. It also assigns it the specified name.
+
+**name**: *<name>*
+
+The ``name`` key specifies the name of the Logical Volume (LV) to be created.
+
+Curtin creates udev rules for Logical Volumes to give them consistently named 
+symbolic links in the target system under ``/dev/disk/by-dname/``. The naming
+scheme for Logical Volumes follows the pattern
+``<volgroup name>-<logical volume name>``.  For example a ``lvm_partition``
+with ``name`` *lv1* on a ``lvm_volgroup`` named *vg1* would have the path
+``/dev/disk/by-dname/vg1-lv1``.
+
+**volgroup**: *<volgroup id>*
+
+The ``volgroup`` key specifies the ``id`` of the Volume Group in which to
+create the logical volume. The volgroup must already have been created and must
+have enough free space on it to create the logical volume.  The volgroup should
+be specified using the ``id`` key of the volgroup in the storage config, not the
+name of the volgroup.
+
+**size**: *<size>*
+
+The ``size`` key tells curtin what size to make the logical volume. The size
+can be entered in any format that can be processed by the lvm2 tools, so a
+number followed by a SI unit should work, i.e. *B, kB, MB, GB, TB*.
+
+If the ``size`` key is omitted then all remaining space on the volgroup will be
+used for the logical volume.
+
+.. note::
+
+  Curtin does not adjust size values.  If you specific a size that exceeds the 
+  capacity of a device then installation will fail.
+
+
+**Config Example**::
+
+ - id: lvm_partition_1
+   type: lvm_partition
+   name: lv1
+   volgroup: volgroup1
+   size: 10G
+
+
+**Combined Example**::
+
+ - id: volgroup1
+   type: lvm_volgroup
+   name: vg1
+   devices:
+     - disk0-part2
+     - disk1
+ - id: lvm_partition_1
+   type: lvm_partition
+   name: lv1
+   volgroup: volgroup1
+   size: 10G
+
+
+
+Dm-Crypt Command
+~~~~~~~~~~~~~~~~
+The dm_crypt command creates encrypted volumes using ``cryptsetup``. It
+requires a name for the encrypted volume, the volume to be encrypted and a key.
+Note that this should not be used for systems where security is a requirement.
+The key is stored in plain-text in the storage configuration and it could be
+possible for the storage configuration to be intercepted between the utility
+that generates it and curtin.
+
+**volume**: *<volume id>*
+
+The ``volume`` key gives the volume that is to be encrypted.
+
+**dm_name**: *<name>*
+
+The ``name`` key specifies the name of the encrypted volume.
+
+**key**: *<key>*
+
+The ``key`` key specifies the password of the encryption key.  The target
+system will prompt for this password in order to mount the disk.
+
+.. note::
+
+  Encrypted disks and partitions are tracked in ``/etc/crypttab`` and will  be
+  mounted at boot time.
+
+**Config Example**::
+
+ - id: lvm_partition_1
+   type: dm_crypt
+   dm_name: crypto
+   volume: sdb1
+   key: testkey
+
+RAID Command
+~~~~~~~~~~~~
+The RAID command configures Linux Software RAID using mdadm. It needs to be given
+a name for the md device, a list of volumes for to compose the md device, an
+optional list of devices to be used as spare volumes, and RAID level.
+
+**name**: *<name>*
+
+The ``name`` key specifies the name of the md device.
+
+.. note::
+
+  Curtin creates a udev rule to create a link to the md device in
+  ``/dev/disk/by-dname/<name>`` using the specified name.
+
+**raidlevel**: *0, 1, 5, 6, 10*
+
+The ``raidlevel`` key specifies the raid level of the array.
+
+**devices**: *[]*
+
+The ``devices`` key specifies a list of the devices that will be used for the
+raid array. Each device must be referenced by ``id`` and the device must be
+previously defined in the storage configuration.  Must not be empty.
+
+Devices can either be full disks or partition.
+
+
+**spare_devices**: *[]*
+
+The ``spare_devices`` key specifies a list of the devices that will be used for
+spares in the raid array. Each device must be referenced by ``id`` and the
+device must be previously defined in the storage configuration.  May be empty.
+
+
+**Config Example**::
+
+ - id: raid_array
+   type: raid
+   name: md0
+   raidlevel: 1
+   devices:
+     - sdb
+     - sdc
+   spare_devices:
+     - sdd
+
+Bcache Command
+~~~~~~~~~~~~~~
+The bcache command will configure a block-cache device using the Linux kernel
+bcache module.  Bcache allows users to use a typically small, but fast SSD or
+NVME device as a cache for larger, slower spinning disks.
+
+The bcache command needs to be told which device to use hold the data and which
+device to use as its cache device.  A cache device may be reused with multiple
+backing devices.
+
+
+**backing_device**: *<device id>*
+
+The ``backing_device`` key specifies the item in storage configuration to use
+as the backing device. This can be any device that would normally be used with
+a filesystem on it, such as a partition or a raid array.
+
+**cache_device**: *<device id>*
+
+The ``cache_device`` key specifies the item in the storage configuration to use
+as the cache device. This can be a partition or a whole disk. It should be on a
+ssd in most cases, as bcache is designed around the performance characteristics
+of a ssd.
+
+**cache_mode**: *writethrough, writeback, writearound, none*
+
+The ``cache_mode`` key specifies the mode in which bcache operates.  The
+default mode is writethrough which ensures data hits the backing device
+before completing the operation.  writeback mode will have higher performance
+but exposes dataloss if the cache device fails.  writearound will avoid using
+the cache for large sequential writes; useful for not evicting smaller
+reads/writes from the cache.  None effectively disables bcache.
+
+**name**: *<name>*
+
+If the ``name`` key is present, curtin will create a link to the device at
+``/dev/disk/by-dname/<name>``.
+
+**Config Example**::
+
+ - id: bcache0
+   type: bcache
+   name: cached_raid
+   backing_device: raid_array
+   cache_device: sdb
+
+
+Additional Examples
+-------------------
+
+Learn by examples.
+
+- Basic
+- LVM
+- Bcache
+- RAID Boot
+- RAID5 + Bcache
+
+Basic Layout
+~~~~~~~~~~~~
+
+::
+
+  storage:
+    version: 1
+    config:
+      - id: disk0
+        type: disk
+        ptable: msdos
+        model: QEMU HARDDISK
+        path: /dev/vdb
+        name: main_disk
+        wipe: superblock
+        grub_device: true
+      - id: disk0-part1
+        type: partition
+        number: 1
+        size: 3GB
+        device: disk0
+        flag: boot
+      - id: disk0-part2
+        type: partition
+        number: 2
+        size: 1GB
+        device: disk0
+      - id: disk0-part1-format-root
+        type: format
+        fstype: ext4
+        volume: disk0-part1
+      - id: disk0-part2-format-home
+        type: format
+        fstype: ext4
+        volume: disk0-part2
+      - id: disk0-part1-mount-root
+        type: mount
+        path: /
+        device: disk0-part1-format-root
+      - id: disk0-part2-mount-home
+        type: mount
+        path: /home
+        device: disk0-part2-format-home
+
+LVM
+~~~
+
+::
+
+  storage:
+    version: 1
+    config:
+      - id: sda
+        type: disk
+        ptable: msdos
+        model: QEMU HARDDISK
+        path: /dev/vdb
+        name: main_disk
+      - id: sda1
+        type: partition
+        size: 3GB
+        device: sda
+        flag: boot
+      - id: sda_extended
+        type: partition
+        size: 5G
+        flag: extended
+        device: sda
+      - id: sda2
+        type: partition
+        size: 2G
+        flag: logical
+        device: sda
+      - id: sda3
+        type: partition
+        size: 3G
+        flag: logical
+        device: sda
+      - id: volgroup1
+        name: vg1
+        type: lvm_volgroup
+        devices:
+            - sda2
+            - sda3
+      - id: lvmpart1
+        name: lv1
+        size: 1G
+        type: lvm_partition
+        volgroup: volgroup1
+      - id: lvmpart2
+        name: lv2
+        type: lvm_partition
+        volgroup: volgroup1
+      - id: sda1_root
+        type: format
+        fstype: ext4
+        volume: sda1
+      - id: lv1_fs
+        name: storage
+        type: format
+        fstype: fat32
+        volume: lvmpart1
+      - id: lv2_fs
+        name: storage
+        type: format
+        fstype: ext3
+        volume: lvmpart2
+      - id: sda1_mount
+        type: mount
+        path: /
+        device: sda1_root
+      - id: lv1_mount
+        type: mount
+        path: /srv/data
+        device: lv1_fs
+      - id: lv2_mount
+        type: mount
+        path: /srv/backup
+        device: lv2_fs
+
+Bcache
+~~~~~~
+
+::
+
+  storage:
+    version: 1
+    config:
+      - id: id_rotary0
+        type: disk
+        name: rotary0
+        path: /dev/vdb
+        ptable: msdos
+        wipe: superblock
+        grub_device: true
+      - id: id_ssd0
+        type: disk
+        name: ssd0
+        path: /dev/vdc
+        wipe: superblock
+      - id: id_rotary0_part1
+        type: partition
+        name: rotary0-part1
+        device: id_rotary0
+        number: 1
+        size: 999M
+        wipe: superblock
+      - id: id_rotary0_part2
+        type: partition
+        name: rotary0-part2
+        device: id_rotary0
+        number: 2
+        size: 9G
+        wipe: superblock
+      - id: id_bcache0
+        type: bcache
+        name: bcache0
+        backing_device: id_rotary0_part2
+        cache_device: id_ssd0
+        cache_mode: writeback
+      - id: bootfs
+        type: format
+        label: boot-fs
+        volume: id_rotary0_part1
+        fstype: ext4
+      - id: rootfs
+        type: format
+        label: root-fs
+        volume: id_bcache0
+        fstype: ext4
+      - id: rootfs_mount
+        type: mount
+        path: /
+        device: rootfs
+      - id: bootfs_mount
+        type: mount
+        path: /boot
+        device: bootfs
+
+RAID Boot
+~~~~~~~~~
+
+::
+
+  storage:
+    version: 1
+    config:
+       - id: sda
+         type: disk
+         ptable: gpt
+         model: QEMU HARDDISK
+         path: /dev/vdb
+         name: main_disk
+         grub_device: 1
+       - id: bios_boot_partition
+         type: partition
+         size: 1MB
+         device: sda
+         flag: bios_grub
+       - id: sda1
+         type: partition
+         size: 3GB
+         device: sda
+       - id: sdb
+         type: disk
+         ptable: gpt
+         model: QEMU HARDDISK
+         path: /dev/vdc
+         name: second_disk
+       - id: sdb1
+         type: partition
+         size: 3GB
+         device: sdb
+       - id: sdc
+         type: disk
+         ptable: gpt
+         model: QEMU HARDDISK
+         path: /dev/vdd
+         name: third_disk
+       - id: sdc1
+         type: partition
+         size: 3GB
+         device: sdc
+       - id: mddevice
+         name: md0
+         type: raid
+         raidlevel: 5
+         devices:
+           - sda1
+           - sdb1
+           - sdc1
+       - id: md_root
+         type: format
+         fstype: ext4
+         volume: mddevice
+       - id: md_mount
+         type: mount
+         path: /
+         device: md_root
+
+
+RAID5 + Bcache
+~~~~~~~~~~~~~~
+
+::
+
+  storage:
+    config:
+    - grub_device: true
+      id: sda
+      model: QEMU HARDDISK
+      name: sda
+      ptable: msdos
+      path: /dev/vdb
+      type: disk
+      wipe: superblock
+    - id: sdb
+      model: QEMU HARDDISK
+      name: sdb
+      path: /dev/vdc
+      type: disk
+      wipe: superblock
+    - id: sdc
+      model: QEMU HARDDISK
+      name: sdc
+      path: /dev/vdd
+      type: disk
+      wipe: superblock
+    - id: sdd
+      model: QEMU HARDDISK
+      name: sdd
+      path: /dev/vde
+      type: disk
+      wipe: superblock
+    - id: sde
+      model: QEMU HARDDISK
+      name: sde
+      path: /dev/vdf
+      type: disk
+      wipe: superblock
+    - devices:
+      - sdc
+      - sdd
+      - sde
+      id: md0
+      name: md0
+      raidlevel: 5
+      spare_devices: []
+      type: raid
+    - device: sda
+      id: sda-part1
+      name: sda-part1
+      number: 1
+      size: 1000001536B
+      type: partition
+      uuid: 3a38820c-d675-4069-b060-509a3d9d13cc
+      wipe: superblock
+    - device: sda
+      id: sda-part2
+      name: sda-part2
+      number: 2
+      size: 7586787328B
+      type: partition
+      uuid: 17747faa-4b9e-4411-97e5-12fd3d199fb8
+      wipe: superblock
+    - backing_device: sda-part2
+      cache_device: sdb
+      cache_mode: writeback
+      id: bcache0
+      name: bcache0
+      type: bcache
+    - fstype: ext4
+      id: sda-part1_format
+      label: ''
+      type: format
+      uuid: 71b1ef6f-5cab-4a77-b4c8-5a209ec11d7c
+      volume: sda-part1
+    - fstype: ext4
+      id: md0_format
+      label: ''
+      type: format
+      uuid: b031f0a0-adb3-43be-bb43-ce0fc8a224a4
+      volume: md0
+    - fstype: ext4
+      id: bcache0_format
+      label: ''
+      type: format
+      uuid: ce45bbaf-5a44-4487-b89e-035c2dd40657
+      volume: bcache0
+    - device: bcache0_format
+      id: bcache0_mount
+      path: /
+      type: mount
+    - device: sda-part1_format
+      id: sda-part1_mount
+      path: /boot
+      type: mount
+    - device: md0_format
+      id: md0_mount
+      path: /srv/data
+      type: mount
+    version: 1

=== added file 'examples/apt-source.yaml'
--- examples/apt-source.yaml	1970-01-01 00:00:00 +0000
+++ examples/apt-source.yaml	2016-10-03 18:55:20 +0000
@@ -0,0 +1,267 @@
+# YAML example of an apt config.
+apt:
+  # The apt config consists of two major "areas".
+  #
+  # On one hand there is the global configuration for the apt feature.
+  #
+  # On one hand (down in this file) there is the source dictionary which allows
+  # to define various entries to be considered by apt.
+
+  ##############################################################################
+  # Section 1: global apt configuration
+  #
+  # The following examples number the top keys to ease identification in
+  # discussions.
+
+  # 1.1 preserve_sources_list
+  #
+  # Preserves the existing /etc/apt/sources.list
+  # Default: True - do not overwrite sources_list. If staying at true
+  # then any "mirrors" configuration will have no effect.
+  # Set to False to affect sources.list with the configuration. Without setting
+  # this to false only "extra" source specifications will be written into
+  # /etc/apt/sources.list.d/*
+  preserve_sources_list: false
+
+  # 1.2 disable_suites
+  #
+  # This is an empty list by default, so nothing is disabled.
+  #
+  # If given, those suites are removed from sources.list after all other
+  # modifications have been made.
+  # Suites are even disabled if no other modification was made,
+  # but not if is preserve_sources_list is active.
+  # There is a special alias “$RELEASE” as in the sources that will be replace
+  # by the matching release.
+  #
+  # To ease configuration and improve readability the following common ubuntu
+  # suites will be automatically mapped to their full definition.
+  # updates   => $RELEASE-updates
+  # backports => $RELEASE-backports
+  # security  => $RELEASE-security
+  # proposed  => $RELEASE-proposed
+  # release   => $RELEASE
+  #
+  # There is no harm in specifying a suite to be disabled that is not found in
+  # the source.list file (just a no-op then)
+  #
+  # Note: Lines don’t get deleted, but disabled by being converted to a comment.
+  # The following example disables all usual defaults except $RELEASE-security.
+  # On top it disables a custom suite called "mysuite"
+  disable_suites: [$RELEASE-updates, backports, $RELEASE, mysuite]
+
+  # 1.3 primary/security
+  #
+  # define a custom (e.g. localized) mirror that will be used in sources.list
+  # and any custom sources entries for deb / deb-src lines.
+  #
+  # One can set primary and security mirror to different uri's
+  # the child elements to the keys primary and secondary are equivalent
+  primary:
+    # arches is list of architectures the following config applies to
+    # the special keyword "default" applies to any architecture not explicitly
+    # listed.
+      - arches: [amd64, i386, default]
+      # uri is just defining the target as-is
+      uri: http://us.archive.ubuntu.com/ubuntu
+      #
+      # via search one can define lists that are
+      # tried one by one. The first with a working DNS resolution (or if it is an
+      # IP) will be picked. That way one can keep one configuration for multiple
+      # subenvironments that select the working one.
+      search:
+        - http://cool.but-sometimes-unreachable.com/ubuntu
+        - http://us.archive.ubuntu.com/ubuntu
+      #
+      # If multiple of a category are given
+      #   1. uri
+      #   2. search
+      # the first defining a valid mirror wins (in the order as defined here,
+      # not the order as listed in the config).
+      #
+    - arches: [s390x, arm64]
+      # as above, allowing to have one config for different per arch mirrors
+  # security is optional, if not defined it is set to the same value as primary
+  security:
+    uri: http://security.ubuntu.com/ubuntu
+    [...]
+
+  # if no mirrors are specified at all, or all lookups fail it will use:
+  # primary: http://archive.ubuntu.com/ubuntu
+  # security: http://security.ubuntu.com/ubuntu
+
+  # 1.4 sources_list
+  #
+  # Provide a custom template for rendering sources.list
+  # without one provided curtin will try to modify the sources.list it finds
+  # in the target at /etc/apt/sources.list.
+  # Within these sources.list templates you can use the following replacement
+  # variables (all have sane Ubuntu defaults, but mirrors can be overwritten
+  # as needed (see above)):
+  # => $RELEASE, $MIRROR, $PRIMARY, $SECURITY
+  sources_list: | # written by curtin custom template
+    deb $MIRROR $RELEASE main restricted
+    deb-src $MIRROR $RELEASE main restricted
+    deb $PRIMARY $RELEASE universe restricted
+    deb $SECURITY $RELEASE-security multiverse
+
+  # 1.5 conf
+  #
+  # Any apt config string that will be made available to apt
+  # see the APT.CONF(5) man page for details what can be specified
+  conf: | # APT config
+    APT {
+      Get {
+        Assume-Yes "true";
+        Fix-Broken "true";
+      };
+    };
+
+  # 1.6 (http_|ftp_|https_)proxy
+  #
+  # Proxies are the most common apt.conf option, so that for simplified use
+  # there is a shortcut for those. Those get automatically translated into the
+  # correct Acquire::*::Proxy statements.
+  #
+  # note: proxy actually being a short synonym to http_proxy
+  proxy: http://[[user][:pass]@]host[:port]/
+  http_proxy: http://[[user][:pass]@]host[:port]/
+  ftp_proxy: ftp://[[user][:pass]@]host[:port]/
+  https_proxy: https://[[user][:pass]@]host[:port]/
+
+  # 1.7 add_apt_repo_match
+  #
+  # 'source' entries in apt-sources that match this python regex
+  # expression will be passed to add-apt-repository
+  # The following example is also the builtin default if nothing is specified
+  add_apt_repo_match: '^[\w-]+:\w'
+
+
+  ##############################################################################
+  # Section 2: source list entries
+  #
+  # This is a dictionary (unlike most block/net which are lists)
+  #
+  # The key of each source entry is the filename and will be prepended by
+  # /etc/apt/sources.list.d/ if it doesn't start with a '/'.
+  # If it doesn't end with .list it will be appended so that apt picks up it's
+  # configuration.
+  #
+  # Whenever there is no content to be written into such a file, the key is
+  # not used as filename - yet it can still be used as index for merging
+  # configuration.
+  #
+  # The values inside the entries consost of the following optional entries:
+  #   'source': a sources.list entry (some variable replacements apply)
+  #   'keyid': providing a key to import via shortid or fingerprint
+  #   'key': providing a raw PGP key
+  #   'keyserver': specify an alternate keyserver to pull keys from that
+  #                were specified by keyid
+
+  # This allows merging between multiple input files than a list like:
+  # cloud-config1
+  # sources:
+  #    s1: {'key': 'key1', 'source': 'source1'}
+  # cloud-config2
+  # sources:
+  #    s2: {'key': 'key2'}
+  #    s1: {'keyserver': 'foo'}
+  # This would be merged to
+  # sources:
+  #    s1:
+  #        keyserver: foo
+  #        key: key1
+  #        source: source1
+  #    s2:
+  #        key: key2
+  #
+  # The following examples number the subfeatures per sources entry to ease
+  # identification in discussions.
+
+
+  sources:
+    curtin-dev-ppa.list:
+      # 2.1 source
+      #
+      # Creates a file in /etc/apt/sources.list.d/ for the sources list entry
+      # based on the key: "/etc/apt/sources.list.d/curtin-dev-ppa.list"
+      source: "deb http://ppa.launchpad.net/curtin-dev/test-archive/ubuntu xenial main"
+
+      # 2.2 keyid
+      #
+      # Importing a gpg key for a given key id. Used keyserver defaults to
+      # keyserver.ubuntu.com
+      keyid: F430BBA5 # GPG key ID published on a key server
+
+    ignored1:
+      # 2.3 PPA shortcut
+      #
+      # Setup correct apt sources.list line and Auto-Import the signing key
+      # from LP
+      #
+      # See https://help.launchpad.net/Packaging/PPA for more information
+      # this requires 'add-apt-repository'. This will create a file in
+      # /etc/apt/sources.list.d automatically, therefore the key here is
+      # ignored as filename in those cases.
+      source: "ppa:curtin-dev/test-archive"    # Quote the string
+
+    my-repo2.list:
+      # 2.4 replacement variables
+      #
+      # sources can use $MIRROR, $PRIMARY, $SECURITY and $RELEASE replacement
+      # variables.
+      # They will be replaced with the default or specified mirrors and the
+      # running release.
+      # The entry below would be possibly turned into:
+      #   source: deb http://archive.ubuntu.com/ubuntu xenial multiverse
+      source: deb $MIRROR $RELEASE multiverse
+
+    my-repo3.list:
+      # this would have the same end effect as 'ppa:curtin-dev/test-archive'
+      source: "deb http://ppa.launchpad.net/curtin-dev/test-archive/ubuntu xenial main"
+      keyid: F430BBA5 # GPG key ID published on the key server
+      filename: curtin-dev-ppa.list
+
+    ignored2:
+      # 2.5 key only
+      #
+      # this would only import the key without adding a ppa or other source spec
+      # since this doesn't generate a source.list file the filename key is ignored
+      keyid: F430BBA5 # GPG key ID published on a key server
+
+    ignored3:
+      # 2.6 key id alternatives
+      #
+      # Keyid's can also be specified via their long fingerprints
+      keyid: B59D 5F15 97A5 04B7 E230  6DCA 0620 BBCF 0368 3F77
+
+    ignored4:
+      # 2.7 alternative keyservers
+      #
+      # One can also specify alternative keyservers to fetch keys from.
+      keyid: B59D 5F15 97A5 04B7 E230  6DCA 0620 BBCF 0368 3F77
+      keyserver: pgp.mit.edu
+
+
+    my-repo4.list:
+      # 2.8 raw key
+      #
+      # The apt signing key can also be specified by providing a pgp public key
+      # block. Providing the PGP key this way is the most robust method for
+      # specifying a key, as it removes dependency on a remote key server.
+      #
+      # As with keyid's this can be specified with or without some actual source
+      # content.
+      key: | # The value needs to start with -----BEGIN PGP PUBLIC KEY BLOCK-----
+         -----BEGIN PGP PUBLIC KEY BLOCK-----
+         Version: SKS 1.0.10
+
+         mI0ESpA3UQEEALdZKVIMq0j6qWAXAyxSlF63SvPVIgxHPb9Nk0DZUixn+akqytxG4zKCONz6
+         qLjoBBfHnynyVLfT4ihg9an1PqxRnTO+JKQxl8NgKGz6Pon569GtAOdWNKw15XKinJTDLjnj
+         9y96ljJqRcpV9t/WsIcdJPcKFR5voHTEoABE2aEXABEBAAG0GUxhdW5jaHBhZCBQUEEgZm9y
+         IEFsZXN0aWOItgQTAQIAIAUCSpA3UQIbAwYLCQgHAwIEFQIIAwQWAgMBAh4BAheAAAoJEA7H
+         5Qi+CcVxWZ8D/1MyYvfj3FJPZUm2Yo1zZsQ657vHI9+pPouqflWOayRR9jbiyUFIn0VdQBrP
+         t0FwvnOFArUovUWoKAEdqR8hPy3M3APUZjl5K4cMZR/xaMQeQRZ5CHpS4DBKURKAHC0ltS5o
+         uBJKQOZm5iltJp15cgyIkBkGe8Mx18VFyVglAZey
+         =Y2oI
+         -----END PGP PUBLIC KEY BLOCK-----

=== added file 'examples/network-ipv6-bond-vlan.yaml'
--- examples/network-ipv6-bond-vlan.yaml	1970-01-01 00:00:00 +0000
+++ examples/network-ipv6-bond-vlan.yaml	2016-10-03 18:55:20 +0000
@@ -0,0 +1,56 @@
+network:
+  version: 1
+  config:
+    - name: interface0
+      type: physical
+      mac_address: BC:76:4E:06:96:B3
+    - name: interface1
+      type: physical
+      mac_address: BC:76:4E:04:88:41
+    - type: bond
+      bond_interfaces:
+        - interface0
+        - interface1
+      name: bond0
+      params:
+        bond_miimon: 100
+        bond_mode: 802.3ad
+        bond_xmit_hash_policy: layer3+4
+    - type: vlan
+      name: bond0.108
+      vlan_id: '108'
+      vlan_link: bond0
+      subnets:
+        - type: static
+          address: 65.61.151.38
+          netmask: 255.255.255.252
+          routes:
+            - gateway: 65.61.151.37
+              netmask: 0.0.0.0
+              network: 0.0.0.0
+        - type: static
+          address: 2001:4800:78ff:1b:be76:4eff:fe06:96b3
+          netmask: 'ffff:ffff:ffff:ffff::'
+          routes:
+            - gateway: 2001:4800:78ff:1b::1
+              netmask: '::'
+              network: '::'
+    - type: vlan
+      name: bond0.208
+      vlan_id: '208'
+      vlan_link: bond0
+      subnets:
+        - address: 10.184.225.122
+          netmask: 255.255.255.252
+          type: static
+          routes:
+            - gateway: 10.184.225.121
+              netmask: 255.240.0.0
+              network: 10.176.0.0
+            - gateway: 10.184.225.121
+              netmask: 255.240.0.0
+              network: 10.208.0.0
+    - type: nameserver
+      address: 72.3.128.240
+    - type: nameserver
+      address: 72.3.128.241

=== added file 'examples/tests/apt_config_command.yaml'
--- examples/tests/apt_config_command.yaml	1970-01-01 00:00:00 +0000
+++ examples/tests/apt_config_command.yaml	2016-10-03 18:55:20 +0000
@@ -0,0 +1,85 @@
+# This pushes curtin through a automatic installation
+# where no storage configuration is necessary.
+# exercising the standalong curtin apt-config command
+-placeholder_simple_install: unused
+bucket:
+  - &run_with_stdin |
+    #!/bin/sh
+    input="$1"
+    shift
+    printf "%s\n" "$input" | "$@"
+
+  - &run_apt_config_file |
+    #!/bin/sh
+    # take the first argument, write it to a tmp file and execute
+    # curtin apt --config=<tmpfile> "$@"
+    set -e
+    config="$1"
+    shift
+    TEMP_D=$(mktemp -d "${TMPDIR:-/tmp}/${0##*/}.XXXXXX")
+    trap cleanup EXIT
+    cleanup() { [ -z "${TEMP_D}" || rm -Rf "${TEMP_D}"; }
+    cfg_file="${TEMP_D}/curtin-apt.conf"
+    printf "%s\n" "$config" > "${cfg_file}"
+    curtin apt-config "--config=${cfg_file}" "$@"
+
+  - &apt_config_ppa |
+    # this is just a large string
+    apt:
+      sources:
+        ignored:
+           source: "ppa:curtin-dev/test-archive"
+        curtin-test1.list:
+           source: "deb $MIRROR $RELEASE-proposed main"
+
+  - &apt_config_source |
+    apt:
+      preserve_sources_list: false
+      sources:
+        ignored:
+           source: "ppa:curtin-dev/test-archive"
+           # apt-add-repositroy adds the key anyway, but lets pass that code
+           # path of adding once more in this scope
+           key: |
+             -----BEGIN PGP PUBLIC KEY BLOCK-----
+             Version: GnuPG v1
+
+             mQINBFazYtEBEADXrW53tDOvwcnHwchLapTKK89+wBWR2qQKXx5Mymtjkrb688Fs
+             ciXcCsvClnNGJ9bEhrJTucyb7WF0KcDVQcvOd0C4HOSEAc0DANBu1Mdp/tmCWuiW
+             1TbbhomyHAcHNdbuSZeMDh5xi9M3DYPVq72PwYwjrE4lotVxHeX5nYEH304U+5nJ
+             tBNpVon91k3ItymQ6Jii+9gVoQ7ujiH1/Gw4/J/1/5zQ3C1mOjq68vLunz5iw1Kn
+             7TMVyID6qwq2UFEgudpseLfFZcb/p7KgI0m3S/OViwzSc44m63ggTPMmbeHW51xA
+             1rpUChSU+cm0cJ4tNtAcYHRYRltWAo/3J1OzB6Ut5P7vIC5r+QcCyyMbku9NjYaw
+             dWX4DDKqW3is3qJ/7EeOKPL4N8wuKwuWUC7s2wqsIZL8EmsvR+ZOnTJ3bHZFvsLg
+             p/OKqmhxMGYXiXOWDOEJ+vwboPxrvhD90JZl8weNGPnpla+EkxRDBSpEb31Vgt5X
+             AIoxE7XxwfuXS3MGMA7fSqkGPGHfSLYQFFk+CAIeTUV+ypKW94hIxXKgqRxa7dxz
+             Ymqs+wgIGaWJCnx7z1Kpd3HD9iTAYjyWyhlQ/Tjt43kwUBdALhTL0vYUTGQyTgKt
+             tAriVf5bqHb6Hj5PS5YZQ/+YoCUI2OTrAWWNyH9rIEZGsFc30oJFPHj3fQARAQAB
+             tCNMYXVuY2hwYWQgUFBBIGZvciBjdXJ0aW4gZGV2ZWxvcGVyc4kCOAQTAQIAIgUC
+             VrNi0QIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQVf58jAFlAT4TGg//
+             SV7vWmkJqr5TSlT9JqCBfmtFjgudxTGG8XM2zwnta+m/3YVOMo0ZjyGL4fUKjCmN
+             eh6eYihwpRtfdawziaEOydDxNfdjwscV4Qcy7FjHX+DQnNzQyzK+WgWRJwNWloCw
+             skg2tF+EDRajalTRjHJAn+5zAilXVn71T/hhOCxkF0PBiH9s/e7pW/KcgBEC1MYV
+             Fs0fLST8SYhsIxttVRWuRkJDrtEY1zeVhkvk+PN6UuCY6/gyRSQ1rhhBF3ePqiba
+             CmLiUjnJMEm1OJOkuD33IMNPKQi99TZhr8y3AGCcrmAQtJsYLvVDPcsOsjGQHXP4
+             2qQXK+jE/AAUycCQ6tgrAqCcUNQiClP8xUPkZOiDNvVMiPvIj/s79ShkoRaWLMb7
+             n9jyDOhs3L7dtmKQwHWq9qJ56fzx1L0/jxSanzm+ZJ/Q7t6E/GFxY1RsAk7xtI1C
+             SzSmrGKmtlbWlOyqqQb6zhULIJpaXvh/GaYyo0xI3rA+QvPDt/fgUJEBiSidwabW
+             Q8JU9iI5HXQxbVq1gSdy/z31fue5JuZSqjnjCjgho/UrXa4i1RPtqsY3FoTk7Hmo
+             C1z2cJc8HQI8JnEX/4qJXvPMRM2JsMD9DqvgsUJG5M9Qchy8cymYY+xeiBVYzJI+
+             WHCq6LHqnVxYZ+RM858lSsD6wetN44vguIjL3qJJ+wU=
+        curtin-test1.list:
+           source: "deb $MIRROR $RELEASE-proposed main"
+
+# into ephemeral environment
+early_commands:
+ 00_add_archive: [sh, -c, *run_with_stdin, "curtin-apt",
+                  *apt_config_ppa, curtin, apt-config, --config=-, --target=/]
+ # tests itself by installing a packet only available in that ppa
+ 00_install_package: [apt-get, install, --assume-yes, smello]
+
+# into target environment
+late_commands:
+ 00_add_archive: [sh, -c, *run_apt_config_file, "curtin-apt-file",
+                  *apt_config_source]
+ 00_install_package: [curtin, in-target, --, apt-get, install, --assume-yes, smello]

=== added file 'examples/tests/apt_source_custom.yaml'
--- examples/tests/apt_source_custom.yaml	1970-01-01 00:00:00 +0000
+++ examples/tests/apt_source_custom.yaml	2016-10-03 18:55:20 +0000
@@ -0,0 +1,97 @@
+showtrace: true
+apt:
+  preserve_sources_list: false
+  primary:
+    - arches: [default]
+      uri: http://us.archive.ubuntu.com/ubuntu
+  security:
+    - arches: [default]
+      uri: http://security.ubuntu.com/ubuntu
+  sources_list: | # written by curtin custom template
+    deb $MIRROR $RELEASE main restricted
+    deb-src $MIRROR $RELEASE main restricted
+    deb $PRIMARY $RELEASE universe restricted
+    deb $SECURITY $RELEASE-security multiverse
+    # nice line to check in test
+  conf: | # APT config
+    ACQUIRE {
+      Retries "3";
+    };
+  sources:
+    curtin-dev-ppa.list:
+      source: "deb http://ppa.launchpad.net/curtin-dev/test-archive/ubuntu xenial main"
+      keyid: F430BBA5
+    ignored1:
+      source: "ppa:curtin-dev/test-archive"
+    my-repo2.list:
+      source: deb $MIRROR $RELEASE multiverse
+    ignored3:
+      keyid: 0E72 9061 0D2F 6DC4 D65E  A921 9A31 4EC5 F470 A0AC
+    my-repo4.list:
+      source: deb http://ppa.launchpad.net/curtin-dev/test-archive/ubuntu xenial main
+      key: |
+         -----BEGIN PGP PUBLIC KEY BLOCK-----
+         Version: GnuPG v1
+
+         mQINBFXJ3NcBEAC85PMdaKdItkdjCT1vRJrdwNqj4lN5mu6z4dDVfeZlmozRDBGb
+         ENSOWCiYz3meANO7bKthQQCqAETSBV72rrDCqFZUpXeyG3zCN98Z/UdJ8zpQD9uw
+         mq2CaAqWMk6ty+PkHQ4gtIc390lGfRbHNoZ5HaWJNVOK7FCB2hBmnTZW7AViYiYa
+         YswOjYxaCkwQ/DsMOPD7S5OjwbLucs2YGjkBm7YF1nnXNzyt+BwieKQW/sQ2+ga1
+         mkgLW1BTQN3+JreBpeHy/yrRdK4dOZZUar4WPZitZzOW2eNpaaf6hKNA14LB/96a
+         tEguK8VazoqSQGvNV/R3PjIYmurVP3/Z9bEVgOKhMCflgwKCYgx+tBUypN3zFWv9
+         pgVq3iHx1MFCvoP9FsNB7I6jzOxlQP4z25BzR3ympx/QexkFw5CBFXhdrU+qNVBl
+         SSnz69aLEjCRXqBOnQEr0irs/e/35+yLJdEuw89vSwWwrzbV5r1Y7uxinEGWSydT
+         qddj97uKOWeMmnp20Be4+nhDDW/BMiTFI4Y3bYeDTrftbWMaSEmtSTw5HHxtAFtg
+         X9Hyx0Q3eN1w3gRZgIdm0xYTe7bNTofFRdfXzB/9wtNIcaW10+IlODShFHPCnh+d
+         i56a8LCdZcXiiLfCIhEcnqmM37BVvhjIQKSyOU1eMEgX148aVEz36OVuMwARAQAB
+         tCdDaHJpc3RpYW4gRWhyaGFyZHQgPGNwYWVsemVyQGdtYWlsLmNvbT6JAjgEEwEC
+         ACIFAlXJ3NcCGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJELo+KTOCgLJC
+         BugP/ir0ES3wCzvHMnkz2UlXt9FR4KqY0L9uFmwu9VYpmfAploEVIOi2HcuxpcRp
+         hgoQlUtkz3lRhUeZzCxuB1ljM2JKTJiezP1tFTTGCbVYhPyA0LmUiHDWylG7FzPb
+         TX96HY/G0jf+m4CfR8q3HNHjeDi4VeA2ppBxdHcVE5I7HihDgRPJd+CvCa3nYdAb
+         nXDKlQZz5aZc7AgrRVamr4mshkzWuwNNCwOt3AIgHDkU/HzA5xlXfwHxOoP6scWH
+         /T7vFsd/vOikBphGseWPgKm6w1zyQ5Dk/wjRL8UeSJZW+Rh4PuBMbxg01lAZpPTq
+         tu/bePeNty3g5bhwO6oHMpWhprn3dO37R680qo6UnBPzICeuBUnSYgpPnsQC9maz
+         FEjiBtMsXSanU5vww7TpxY1JHjk5KFcmKx4sBeablznsm+GuVaDFN8R4eDjrM14r
+         SOzA9cV0bSQr4dMqA9fZFSx6qLTacIeMfptybW3zaDX/pJOeBBWRAtoAfZIFbBnu
+         /ZxDDgiQtZzpVK4UkYk5rjjtV/CPVXx64AnTHi35YfUn14KkE+k3odHdvPfBiv9+
+         NxfkTuV/koOgpD3+lTIYXyVHS9gwvhfRD/YfdrnVGl7bRZe68j7bfWDuQuSqIhSA
+         jpeJslJCawnqv6fVB6buj6jjcgHIxqCVn99chaPFSblEIPfXtDVDaHJpc3RpYW4g
+         RWhyaGFyZHQgPGNocmlzdGlhbi5laHJoYXJkdEBjYW5vbmljYWwuY29tPokCOAQT
+         AQIAIgUCVsbUOgIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQuj4pM4KA
+         skJNPg/7BF/iXHHdSBigWKXCCvQz58uInoc/R4beIegxRCMq7wkYEey4B7Fd35zY
+         zv9CBOTV3hZePMCg9jxl4ki2kSsrZSCIEJw4L/aXDtJtx3HT18uTW0QKoU3nK/ro
+         OtthVqBqmiSEi40UUU+5MGrUjwLSm+PjaaSapjK/lddf0KbXBB78/BtR/XT0gxWM
+         +o68Oei9Nj1S3h6UndJwNAQ1xaDWmU2T7CRJet3F+cXZd3aDuS2axOTSTZbraSq7
+         zdl1xUiKtzXZIp8X1ewne+dzkewZuWj7DOwOBEFK26UhxCjKd5mUr7jpWQ4ampFX
+         6xfd/MK8SJFY+iHOBKyzq9po40tE23dqWuaHB+T3MxOgQ9JHCo9x22XNvEuKZW/V
+         4WaoGHVkR+jtWNC8Qv/xCMHL3CEvAklKJR68WDhozwUYTgNt5vCoJOviMlbhDSwf
+         0zVXpQwMR//4c0QSA0+BPpIEPDnx5vTIHBVXHy4bBBHU2Vi87QIDS0AtiBpNcspN
+         6AG0ktuldkE/pqfSTJ2A9HpHZyU+8boagRS5/z102Pjtmf/mzUkcHmfRb9o0DE15
+         X5fqpA3lYyx9eHIAgH4eaB1+G20Ez/EY5hr8IMS2nNBSem491UW6DXDYRu6eBLrR
+         sRmtrJ6DlTZFRFlqVZ47bce/SbeM/xljvRkBxWG6RtDRsTyNVI65Ag0EVcnc1wEQ
+         ANzk9W058tSHqf05UEtJGrN0K8DLriCvPd7QdFA8yVIZM3WD+m0AMBGXjd8BT5c2
+         lt0GmhB8klonHZvPiVLTRTLcSsc3NBopr1HL1bWsgOczwWiXSrc62oGAHUOQT/bv
+         vS6KIkZgez+qtCo/DCOGJrADaoJBiBCLSsZgowpzazZZDPUF7rAsfcryVCFvftK0
+         wAe1OdvUG77NHrMrE1oX3zh82hTqR5azBre6Y81lNwxxug/Xl/RHjNhEOYohcsLS
+         /xl0m2X831fHzcGGpoISRgrfel+M4RoC7KsLrwVhrF8koCD/ZQlevfLpuRl5LNpO
+         s1ZtEi8ZvLliih+H+BOlBD0zUc3zZrrks/NCpm1eZba0Z6L48r4TIHW08SGlHx7o
+         SrXgkq3mtoM8C4uDiLwjav5KxiF7n68s/9LF82aAr7YjNXd+xYZNjsmmFlYj9CGI
+         lL4jVt4v4EtTONa6pbtCNv5ezOLDZ6BBcQ36xdkrWzdpjQjL2mnh3sqIAGIPu7tH
+         N8euQ5L1zIvIjVqYlR1eJssp96QYPWYxF7TosfML4BUhCP631IWfuD9X/K2LzDmv
+         B2gVZo9fbhSC+P7GYVG+tV4VLAMbspAxRXXL69+j98aeV5g59f8OFQPbGpKE/SAY
+         eIXtq8DD+PYUXXq3VUI2brVLv42LBVdSJpKNKG3decIBABEBAAGJAh8EGAECAAkF
+         AlXJ3NcCGwwACgkQuj4pM4KAskKzeg/9FxXJLV3eWwY4nn2VhwYTHnHtSUpi8usk
+         RzIa3Mcj6OEVjU2LZaT3UQF8h6dLM9y+CemcwyjMqm1RQ5+ogfrItby1AaBXwCvm
+         XCUGw2zFOAnyzSHHoDFj27sllFxDmfSiBY5KP8M+/ywHKZDkRb6EjzMPx5oKFeGW
+         Hmqaj5FDmTeWChSIHd1ZxobashFauOZDbS/ijRRMsVGFulU2Nb/4QJK73g3orfhY
+         5mq1TMkQ5Kcbqh4OmYYYayLtJQcpa6ZVopaRhAJFe30P83zW9pM5LQDpP9JIyY+S
+         DjasEY4ekYtw6oCKAjpqlwaaNDjl27OkJ7R7laFKy4grZ2TSB/2KTjn/Ea3CH/pA
+         SrpVis1LvC90XytbBnsEKYXU55H943wmBc6oj+itQhx4WyIiv+UgtHI/DbnYbUru
+         71wpfapqGBXYfu/zAra8PITngOFuizeYu+idemu55ANO3keJPKr3ZBUSBBpNFauT
+         VUUCSnrLt+kpSLopYESiNdsPW/aQTFgFvA4BkBJTIMQsQZXicuXUePYlg5xFzXOv
+         XgiqkjRA9xBI5JAIUgLRk3ulVFt2bIsTG9XgtGyphEs86Q0MOIMo0WbZGtAYDrZO
+         DITbm2KzVLGVLn/ZJiW11RSHPNiwgg66/puKdFWrSogYYDJdDEUJtLIhypZ+ORxe
+         7oh88hTkC1w=
+         =UNSw
+         -----END PGP PUBLIC KEY BLOCK-----

=== added file 'examples/tests/apt_source_modify.yaml'
--- examples/tests/apt_source_modify.yaml	1970-01-01 00:00:00 +0000
+++ examples/tests/apt_source_modify.yaml	2016-10-03 18:55:20 +0000
@@ -0,0 +1,92 @@
+showtrace: true
+apt:
+  preserve_sources_list: false
+  primary:
+    - arches: [default]
+      uri: http://us.archive.ubuntu.com/ubuntu
+  security:
+    - arches: [default]
+      uri: http://security.ubuntu.com/ubuntu
+  conf: | # APT config
+    ACQUIRE {
+      Retries "3";
+    };
+  sources:
+    curtin-dev-ppa.list:
+      source: "deb http://ppa.launchpad.net/curtin-dev/test-archive/ubuntu xenial main"
+      keyid: F430BBA5
+    ignored1:
+      source: "ppa:curtin-dev/test-archive"
+    # intentionally dropped the .list here, has to be added by the code
+    my-repo2:
+      source: deb $MIRROR $RELEASE multiverse
+    ignored3:
+      keyid: 0E72 9061 0D2F 6DC4 D65E  A921 9A31 4EC5 F470 A0AC
+    my-repo4.list:
+      source: deb http://ppa.launchpad.net/curtin-dev/test-archive/ubuntu xenial main
+      key: |
+         -----BEGIN PGP PUBLIC KEY BLOCK-----
+         Version: GnuPG v1
+
+         mQINBFXJ3NcBEAC85PMdaKdItkdjCT1vRJrdwNqj4lN5mu6z4dDVfeZlmozRDBGb
+         ENSOWCiYz3meANO7bKthQQCqAETSBV72rrDCqFZUpXeyG3zCN98Z/UdJ8zpQD9uw
+         mq2CaAqWMk6ty+PkHQ4gtIc390lGfRbHNoZ5HaWJNVOK7FCB2hBmnTZW7AViYiYa
+         YswOjYxaCkwQ/DsMOPD7S5OjwbLucs2YGjkBm7YF1nnXNzyt+BwieKQW/sQ2+ga1
+         mkgLW1BTQN3+JreBpeHy/yrRdK4dOZZUar4WPZitZzOW2eNpaaf6hKNA14LB/96a
+         tEguK8VazoqSQGvNV/R3PjIYmurVP3/Z9bEVgOKhMCflgwKCYgx+tBUypN3zFWv9
+         pgVq3iHx1MFCvoP9FsNB7I6jzOxlQP4z25BzR3ympx/QexkFw5CBFXhdrU+qNVBl
+         SSnz69aLEjCRXqBOnQEr0irs/e/35+yLJdEuw89vSwWwrzbV5r1Y7uxinEGWSydT
+         qddj97uKOWeMmnp20Be4+nhDDW/BMiTFI4Y3bYeDTrftbWMaSEmtSTw5HHxtAFtg
+         X9Hyx0Q3eN1w3gRZgIdm0xYTe7bNTofFRdfXzB/9wtNIcaW10+IlODShFHPCnh+d
+         i56a8LCdZcXiiLfCIhEcnqmM37BVvhjIQKSyOU1eMEgX148aVEz36OVuMwARAQAB
+         tCdDaHJpc3RpYW4gRWhyaGFyZHQgPGNwYWVsemVyQGdtYWlsLmNvbT6JAjgEEwEC
+         ACIFAlXJ3NcCGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJELo+KTOCgLJC
+         BugP/ir0ES3wCzvHMnkz2UlXt9FR4KqY0L9uFmwu9VYpmfAploEVIOi2HcuxpcRp
+         hgoQlUtkz3lRhUeZzCxuB1ljM2JKTJiezP1tFTTGCbVYhPyA0LmUiHDWylG7FzPb
+         TX96HY/G0jf+m4CfR8q3HNHjeDi4VeA2ppBxdHcVE5I7HihDgRPJd+CvCa3nYdAb
+         nXDKlQZz5aZc7AgrRVamr4mshkzWuwNNCwOt3AIgHDkU/HzA5xlXfwHxOoP6scWH
+         /T7vFsd/vOikBphGseWPgKm6w1zyQ5Dk/wjRL8UeSJZW+Rh4PuBMbxg01lAZpPTq
+         tu/bePeNty3g5bhwO6oHMpWhprn3dO37R680qo6UnBPzICeuBUnSYgpPnsQC9maz
+         FEjiBtMsXSanU5vww7TpxY1JHjk5KFcmKx4sBeablznsm+GuVaDFN8R4eDjrM14r
+         SOzA9cV0bSQr4dMqA9fZFSx6qLTacIeMfptybW3zaDX/pJOeBBWRAtoAfZIFbBnu
+         /ZxDDgiQtZzpVK4UkYk5rjjtV/CPVXx64AnTHi35YfUn14KkE+k3odHdvPfBiv9+
+         NxfkTuV/koOgpD3+lTIYXyVHS9gwvhfRD/YfdrnVGl7bRZe68j7bfWDuQuSqIhSA
+         jpeJslJCawnqv6fVB6buj6jjcgHIxqCVn99chaPFSblEIPfXtDVDaHJpc3RpYW4g
+         RWhyaGFyZHQgPGNocmlzdGlhbi5laHJoYXJkdEBjYW5vbmljYWwuY29tPokCOAQT
+         AQIAIgUCVsbUOgIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQuj4pM4KA
+         skJNPg/7BF/iXHHdSBigWKXCCvQz58uInoc/R4beIegxRCMq7wkYEey4B7Fd35zY
+         zv9CBOTV3hZePMCg9jxl4ki2kSsrZSCIEJw4L/aXDtJtx3HT18uTW0QKoU3nK/ro
+         OtthVqBqmiSEi40UUU+5MGrUjwLSm+PjaaSapjK/lddf0KbXBB78/BtR/XT0gxWM
+         +o68Oei9Nj1S3h6UndJwNAQ1xaDWmU2T7CRJet3F+cXZd3aDuS2axOTSTZbraSq7
+         zdl1xUiKtzXZIp8X1ewne+dzkewZuWj7DOwOBEFK26UhxCjKd5mUr7jpWQ4ampFX
+         6xfd/MK8SJFY+iHOBKyzq9po40tE23dqWuaHB+T3MxOgQ9JHCo9x22XNvEuKZW/V
+         4WaoGHVkR+jtWNC8Qv/xCMHL3CEvAklKJR68WDhozwUYTgNt5vCoJOviMlbhDSwf
+         0zVXpQwMR//4c0QSA0+BPpIEPDnx5vTIHBVXHy4bBBHU2Vi87QIDS0AtiBpNcspN
+         6AG0ktuldkE/pqfSTJ2A9HpHZyU+8boagRS5/z102Pjtmf/mzUkcHmfRb9o0DE15
+         X5fqpA3lYyx9eHIAgH4eaB1+G20Ez/EY5hr8IMS2nNBSem491UW6DXDYRu6eBLrR
+         sRmtrJ6DlTZFRFlqVZ47bce/SbeM/xljvRkBxWG6RtDRsTyNVI65Ag0EVcnc1wEQ
+         ANzk9W058tSHqf05UEtJGrN0K8DLriCvPd7QdFA8yVIZM3WD+m0AMBGXjd8BT5c2
+         lt0GmhB8klonHZvPiVLTRTLcSsc3NBopr1HL1bWsgOczwWiXSrc62oGAHUOQT/bv
+         vS6KIkZgez+qtCo/DCOGJrADaoJBiBCLSsZgowpzazZZDPUF7rAsfcryVCFvftK0
+         wAe1OdvUG77NHrMrE1oX3zh82hTqR5azBre6Y81lNwxxug/Xl/RHjNhEOYohcsLS
+         /xl0m2X831fHzcGGpoISRgrfel+M4RoC7KsLrwVhrF8koCD/ZQlevfLpuRl5LNpO
+         s1ZtEi8ZvLliih+H+BOlBD0zUc3zZrrks/NCpm1eZba0Z6L48r4TIHW08SGlHx7o
+         SrXgkq3mtoM8C4uDiLwjav5KxiF7n68s/9LF82aAr7YjNXd+xYZNjsmmFlYj9CGI
+         lL4jVt4v4EtTONa6pbtCNv5ezOLDZ6BBcQ36xdkrWzdpjQjL2mnh3sqIAGIPu7tH
+         N8euQ5L1zIvIjVqYlR1eJssp96QYPWYxF7TosfML4BUhCP631IWfuD9X/K2LzDmv
+         B2gVZo9fbhSC+P7GYVG+tV4VLAMbspAxRXXL69+j98aeV5g59f8OFQPbGpKE/SAY
+         eIXtq8DD+PYUXXq3VUI2brVLv42LBVdSJpKNKG3decIBABEBAAGJAh8EGAECAAkF
+         AlXJ3NcCGwwACgkQuj4pM4KAskKzeg/9FxXJLV3eWwY4nn2VhwYTHnHtSUpi8usk
+         RzIa3Mcj6OEVjU2LZaT3UQF8h6dLM9y+CemcwyjMqm1RQ5+ogfrItby1AaBXwCvm
+         XCUGw2zFOAnyzSHHoDFj27sllFxDmfSiBY5KP8M+/ywHKZDkRb6EjzMPx5oKFeGW
+         Hmqaj5FDmTeWChSIHd1ZxobashFauOZDbS/ijRRMsVGFulU2Nb/4QJK73g3orfhY
+         5mq1TMkQ5Kcbqh4OmYYYayLtJQcpa6ZVopaRhAJFe30P83zW9pM5LQDpP9JIyY+S
+         DjasEY4ekYtw6oCKAjpqlwaaNDjl27OkJ7R7laFKy4grZ2TSB/2KTjn/Ea3CH/pA
+         SrpVis1LvC90XytbBnsEKYXU55H943wmBc6oj+itQhx4WyIiv+UgtHI/DbnYbUru
+         71wpfapqGBXYfu/zAra8PITngOFuizeYu+idemu55ANO3keJPKr3ZBUSBBpNFauT
+         VUUCSnrLt+kpSLopYESiNdsPW/aQTFgFvA4BkBJTIMQsQZXicuXUePYlg5xFzXOv
+         XgiqkjRA9xBI5JAIUgLRk3ulVFt2bIsTG9XgtGyphEs86Q0MOIMo0WbZGtAYDrZO
+         DITbm2KzVLGVLn/ZJiW11RSHPNiwgg66/puKdFWrSogYYDJdDEUJtLIhypZ+ORxe
+         7oh88hTkC1w=
+         =UNSw
+         -----END PGP PUBLIC KEY BLOCK-----

=== added file 'examples/tests/apt_source_modify_arches.yaml'
--- examples/tests/apt_source_modify_arches.yaml	1970-01-01 00:00:00 +0000
+++ examples/tests/apt_source_modify_arches.yaml	2016-10-03 18:55:20 +0000
@@ -0,0 +1,102 @@
+showtrace: true
+apt:
+  preserve_sources_list: false
+  primary:
+    # we don't know on which arch this will run, so we can't put the "right"
+    # config in an arch, but we can provide various confusing alternatives
+    # and orders and it has to pick default out of them
+    - arches: [x86_2048, x86_4096, x86_8192, amd18.5, "foobar"]
+      uri: http://notthis.com/ubuntu
+    - arches: ["*"]
+      uri: http://notthis.com/ubuntu
+    - arches: [default]
+      uri: http://us.archive.ubuntu.com/ubuntu
+    - arches: []
+      uri: http://notthis.com/ubuntu
+  security:
+    - arches: [default]
+      uri: http://security.ubuntu.com/ubuntu
+    - arches: ["supersecurearchthatdoesnexist"]
+      uri: http://notthat.com/ubuntu
+  conf: | # APT config
+    ACQUIRE {
+      Retries "3";
+    };
+  sources:
+    curtin-dev-ppa.list:
+      source: "deb http://ppa.launchpad.net/curtin-dev/test-archive/ubuntu xenial main"
+      keyid: F430BBA5
+    ignored1:
+      source: "ppa:curtin-dev/test-archive"
+    my-repo2.list:
+      source: deb $MIRROR $RELEASE multiverse
+    ignored3:
+      keyid: 0E72 9061 0D2F 6DC4 D65E  A921 9A31 4EC5 F470 A0AC
+    my-repo4.list:
+      source: deb http://ppa.launchpad.net/curtin-dev/test-archive/ubuntu xenial main
+      key: |
+         -----BEGIN PGP PUBLIC KEY BLOCK-----
+         Version: GnuPG v1
+
+         mQINBFXJ3NcBEAC85PMdaKdItkdjCT1vRJrdwNqj4lN5mu6z4dDVfeZlmozRDBGb
+         ENSOWCiYz3meANO7bKthQQCqAETSBV72rrDCqFZUpXeyG3zCN98Z/UdJ8zpQD9uw
+         mq2CaAqWMk6ty+PkHQ4gtIc390lGfRbHNoZ5HaWJNVOK7FCB2hBmnTZW7AViYiYa
+         YswOjYxaCkwQ/DsMOPD7S5OjwbLucs2YGjkBm7YF1nnXNzyt+BwieKQW/sQ2+ga1
+         mkgLW1BTQN3+JreBpeHy/yrRdK4dOZZUar4WPZitZzOW2eNpaaf6hKNA14LB/96a
+         tEguK8VazoqSQGvNV/R3PjIYmurVP3/Z9bEVgOKhMCflgwKCYgx+tBUypN3zFWv9
+         pgVq3iHx1MFCvoP9FsNB7I6jzOxlQP4z25BzR3ympx/QexkFw5CBFXhdrU+qNVBl
+         SSnz69aLEjCRXqBOnQEr0irs/e/35+yLJdEuw89vSwWwrzbV5r1Y7uxinEGWSydT
+         qddj97uKOWeMmnp20Be4+nhDDW/BMiTFI4Y3bYeDTrftbWMaSEmtSTw5HHxtAFtg
+         X9Hyx0Q3eN1w3gRZgIdm0xYTe7bNTofFRdfXzB/9wtNIcaW10+IlODShFHPCnh+d
+         i56a8LCdZcXiiLfCIhEcnqmM37BVvhjIQKSyOU1eMEgX148aVEz36OVuMwARAQAB
+         tCdDaHJpc3RpYW4gRWhyaGFyZHQgPGNwYWVsemVyQGdtYWlsLmNvbT6JAjgEEwEC
+         ACIFAlXJ3NcCGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJELo+KTOCgLJC
+         BugP/ir0ES3wCzvHMnkz2UlXt9FR4KqY0L9uFmwu9VYpmfAploEVIOi2HcuxpcRp
+         hgoQlUtkz3lRhUeZzCxuB1ljM2JKTJiezP1tFTTGCbVYhPyA0LmUiHDWylG7FzPb
+         TX96HY/G0jf+m4CfR8q3HNHjeDi4VeA2ppBxdHcVE5I7HihDgRPJd+CvCa3nYdAb
+         nXDKlQZz5aZc7AgrRVamr4mshkzWuwNNCwOt3AIgHDkU/HzA5xlXfwHxOoP6scWH
+         /T7vFsd/vOikBphGseWPgKm6w1zyQ5Dk/wjRL8UeSJZW+Rh4PuBMbxg01lAZpPTq
+         tu/bePeNty3g5bhwO6oHMpWhprn3dO37R680qo6UnBPzICeuBUnSYgpPnsQC9maz
+         FEjiBtMsXSanU5vww7TpxY1JHjk5KFcmKx4sBeablznsm+GuVaDFN8R4eDjrM14r
+         SOzA9cV0bSQr4dMqA9fZFSx6qLTacIeMfptybW3zaDX/pJOeBBWRAtoAfZIFbBnu
+         /ZxDDgiQtZzpVK4UkYk5rjjtV/CPVXx64AnTHi35YfUn14KkE+k3odHdvPfBiv9+
+         NxfkTuV/koOgpD3+lTIYXyVHS9gwvhfRD/YfdrnVGl7bRZe68j7bfWDuQuSqIhSA
+         jpeJslJCawnqv6fVB6buj6jjcgHIxqCVn99chaPFSblEIPfXtDVDaHJpc3RpYW4g
+         RWhyaGFyZHQgPGNocmlzdGlhbi5laHJoYXJkdEBjYW5vbmljYWwuY29tPokCOAQT
+         AQIAIgUCVsbUOgIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQuj4pM4KA
+         skJNPg/7BF/iXHHdSBigWKXCCvQz58uInoc/R4beIegxRCMq7wkYEey4B7Fd35zY
+         zv9CBOTV3hZePMCg9jxl4ki2kSsrZSCIEJw4L/aXDtJtx3HT18uTW0QKoU3nK/ro
+         OtthVqBqmiSEi40UUU+5MGrUjwLSm+PjaaSapjK/lddf0KbXBB78/BtR/XT0gxWM
+         +o68Oei9Nj1S3h6UndJwNAQ1xaDWmU2T7CRJet3F+cXZd3aDuS2axOTSTZbraSq7
+         zdl1xUiKtzXZIp8X1ewne+dzkewZuWj7DOwOBEFK26UhxCjKd5mUr7jpWQ4ampFX
+         6xfd/MK8SJFY+iHOBKyzq9po40tE23dqWuaHB+T3MxOgQ9JHCo9x22XNvEuKZW/V
+         4WaoGHVkR+jtWNC8Qv/xCMHL3CEvAklKJR68WDhozwUYTgNt5vCoJOviMlbhDSwf
+         0zVXpQwMR//4c0QSA0+BPpIEPDnx5vTIHBVXHy4bBBHU2Vi87QIDS0AtiBpNcspN
+         6AG0ktuldkE/pqfSTJ2A9HpHZyU+8boagRS5/z102Pjtmf/mzUkcHmfRb9o0DE15
+         X5fqpA3lYyx9eHIAgH4eaB1+G20Ez/EY5hr8IMS2nNBSem491UW6DXDYRu6eBLrR
+         sRmtrJ6DlTZFRFlqVZ47bce/SbeM/xljvRkBxWG6RtDRsTyNVI65Ag0EVcnc1wEQ
+         ANzk9W058tSHqf05UEtJGrN0K8DLriCvPd7QdFA8yVIZM3WD+m0AMBGXjd8BT5c2
+         lt0GmhB8klonHZvPiVLTRTLcSsc3NBopr1HL1bWsgOczwWiXSrc62oGAHUOQT/bv
+         vS6KIkZgez+qtCo/DCOGJrADaoJBiBCLSsZgowpzazZZDPUF7rAsfcryVCFvftK0
+         wAe1OdvUG77NHrMrE1oX3zh82hTqR5azBre6Y81lNwxxug/Xl/RHjNhEOYohcsLS
+         /xl0m2X831fHzcGGpoISRgrfel+M4RoC7KsLrwVhrF8koCD/ZQlevfLpuRl5LNpO
+         s1ZtEi8ZvLliih+H+BOlBD0zUc3zZrrks/NCpm1eZba0Z6L48r4TIHW08SGlHx7o
+         SrXgkq3mtoM8C4uDiLwjav5KxiF7n68s/9LF82aAr7YjNXd+xYZNjsmmFlYj9CGI
+         lL4jVt4v4EtTONa6pbtCNv5ezOLDZ6BBcQ36xdkrWzdpjQjL2mnh3sqIAGIPu7tH
+         N8euQ5L1zIvIjVqYlR1eJssp96QYPWYxF7TosfML4BUhCP631IWfuD9X/K2LzDmv
+         B2gVZo9fbhSC+P7GYVG+tV4VLAMbspAxRXXL69+j98aeV5g59f8OFQPbGpKE/SAY
+         eIXtq8DD+PYUXXq3VUI2brVLv42LBVdSJpKNKG3decIBABEBAAGJAh8EGAECAAkF
+         AlXJ3NcCGwwACgkQuj4pM4KAskKzeg/9FxXJLV3eWwY4nn2VhwYTHnHtSUpi8usk
+         RzIa3Mcj6OEVjU2LZaT3UQF8h6dLM9y+CemcwyjMqm1RQ5+ogfrItby1AaBXwCvm
+         XCUGw2zFOAnyzSHHoDFj27sllFxDmfSiBY5KP8M+/ywHKZDkRb6EjzMPx5oKFeGW
+         Hmqaj5FDmTeWChSIHd1ZxobashFauOZDbS/ijRRMsVGFulU2Nb/4QJK73g3orfhY
+         5mq1TMkQ5Kcbqh4OmYYYayLtJQcpa6ZVopaRhAJFe30P83zW9pM5LQDpP9JIyY+S
+         DjasEY4ekYtw6oCKAjpqlwaaNDjl27OkJ7R7laFKy4grZ2TSB/2KTjn/Ea3CH/pA
+         SrpVis1LvC90XytbBnsEKYXU55H943wmBc6oj+itQhx4WyIiv+UgtHI/DbnYbUru
+         71wpfapqGBXYfu/zAra8PITngOFuizeYu+idemu55ANO3keJPKr3ZBUSBBpNFauT
+         VUUCSnrLt+kpSLopYESiNdsPW/aQTFgFvA4BkBJTIMQsQZXicuXUePYlg5xFzXOv
+         XgiqkjRA9xBI5JAIUgLRk3ulVFt2bIsTG9XgtGyphEs86Q0MOIMo0WbZGtAYDrZO
+         DITbm2KzVLGVLn/ZJiW11RSHPNiwgg66/puKdFWrSogYYDJdDEUJtLIhypZ+ORxe
+         7oh88hTkC1w=
+         =UNSw
+         -----END PGP PUBLIC KEY BLOCK-----

=== added file 'examples/tests/apt_source_modify_disable_suite.yaml'
--- examples/tests/apt_source_modify_disable_suite.yaml	1970-01-01 00:00:00 +0000
+++ examples/tests/apt_source_modify_disable_suite.yaml	2016-10-03 18:55:20 +0000
@@ -0,0 +1,92 @@
+showtrace: true
+apt:
+  preserve_sources_list: false
+  primary:
+    - arches: [default]
+      uri: http://us.archive.ubuntu.com/ubuntu
+  security:
+    - arches: [default]
+      uri: http://security.ubuntu.com/ubuntu
+  disable_suites: [$RELEASE-updates]
+  conf: | # APT config
+    ACQUIRE {
+      Retries "3";
+    };
+  sources:
+    curtin-dev-ppa.list:
+      source: "deb http://ppa.launchpad.net/curtin-dev/test-archive/ubuntu xenial main"
+      keyid: F430BBA5
+    ignored1:
+      source: "ppa:curtin-dev/test-archive"
+    my-repo2.list:
+      source: deb $MIRROR $RELEASE multiverse
+    ignored3:
+      keyid: 0E72 9061 0D2F 6DC4 D65E  A921 9A31 4EC5 F470 A0AC
+    my-repo4.list:
+      source: deb http://ppa.launchpad.net/curtin-dev/test-archive/ubuntu xenial main
+      key: |
+         -----BEGIN PGP PUBLIC KEY BLOCK-----
+         Version: GnuPG v1
+
+         mQINBFXJ3NcBEAC85PMdaKdItkdjCT1vRJrdwNqj4lN5mu6z4dDVfeZlmozRDBGb
+         ENSOWCiYz3meANO7bKthQQCqAETSBV72rrDCqFZUpXeyG3zCN98Z/UdJ8zpQD9uw
+         mq2CaAqWMk6ty+PkHQ4gtIc390lGfRbHNoZ5HaWJNVOK7FCB2hBmnTZW7AViYiYa
+         YswOjYxaCkwQ/DsMOPD7S5OjwbLucs2YGjkBm7YF1nnXNzyt+BwieKQW/sQ2+ga1
+         mkgLW1BTQN3+JreBpeHy/yrRdK4dOZZUar4WPZitZzOW2eNpaaf6hKNA14LB/96a
+         tEguK8VazoqSQGvNV/R3PjIYmurVP3/Z9bEVgOKhMCflgwKCYgx+tBUypN3zFWv9
+         pgVq3iHx1MFCvoP9FsNB7I6jzOxlQP4z25BzR3ympx/QexkFw5CBFXhdrU+qNVBl
+         SSnz69aLEjCRXqBOnQEr0irs/e/35+yLJdEuw89vSwWwrzbV5r1Y7uxinEGWSydT
+         qddj97uKOWeMmnp20Be4+nhDDW/BMiTFI4Y3bYeDTrftbWMaSEmtSTw5HHxtAFtg
+         X9Hyx0Q3eN1w3gRZgIdm0xYTe7bNTofFRdfXzB/9wtNIcaW10+IlODShFHPCnh+d
+         i56a8LCdZcXiiLfCIhEcnqmM37BVvhjIQKSyOU1eMEgX148aVEz36OVuMwARAQAB
+         tCdDaHJpc3RpYW4gRWhyaGFyZHQgPGNwYWVsemVyQGdtYWlsLmNvbT6JAjgEEwEC
+         ACIFAlXJ3NcCGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJELo+KTOCgLJC
+         BugP/ir0ES3wCzvHMnkz2UlXt9FR4KqY0L9uFmwu9VYpmfAploEVIOi2HcuxpcRp
+         hgoQlUtkz3lRhUeZzCxuB1ljM2JKTJiezP1tFTTGCbVYhPyA0LmUiHDWylG7FzPb
+         TX96HY/G0jf+m4CfR8q3HNHjeDi4VeA2ppBxdHcVE5I7HihDgRPJd+CvCa3nYdAb
+         nXDKlQZz5aZc7AgrRVamr4mshkzWuwNNCwOt3AIgHDkU/HzA5xlXfwHxOoP6scWH
+         /T7vFsd/vOikBphGseWPgKm6w1zyQ5Dk/wjRL8UeSJZW+Rh4PuBMbxg01lAZpPTq
+         tu/bePeNty3g5bhwO6oHMpWhprn3dO37R680qo6UnBPzICeuBUnSYgpPnsQC9maz
+         FEjiBtMsXSanU5vww7TpxY1JHjk5KFcmKx4sBeablznsm+GuVaDFN8R4eDjrM14r
+         SOzA9cV0bSQr4dMqA9fZFSx6qLTacIeMfptybW3zaDX/pJOeBBWRAtoAfZIFbBnu
+         /ZxDDgiQtZzpVK4UkYk5rjjtV/CPVXx64AnTHi35YfUn14KkE+k3odHdvPfBiv9+
+         NxfkTuV/koOgpD3+lTIYXyVHS9gwvhfRD/YfdrnVGl7bRZe68j7bfWDuQuSqIhSA
+         jpeJslJCawnqv6fVB6buj6jjcgHIxqCVn99chaPFSblEIPfXtDVDaHJpc3RpYW4g
+         RWhyaGFyZHQgPGNocmlzdGlhbi5laHJoYXJkdEBjYW5vbmljYWwuY29tPokCOAQT
+         AQIAIgUCVsbUOgIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQuj4pM4KA
+         skJNPg/7BF/iXHHdSBigWKXCCvQz58uInoc/R4beIegxRCMq7wkYEey4B7Fd35zY
+         zv9CBOTV3hZePMCg9jxl4ki2kSsrZSCIEJw4L/aXDtJtx3HT18uTW0QKoU3nK/ro
+         OtthVqBqmiSEi40UUU+5MGrUjwLSm+PjaaSapjK/lddf0KbXBB78/BtR/XT0gxWM
+         +o68Oei9Nj1S3h6UndJwNAQ1xaDWmU2T7CRJet3F+cXZd3aDuS2axOTSTZbraSq7
+         zdl1xUiKtzXZIp8X1ewne+dzkewZuWj7DOwOBEFK26UhxCjKd5mUr7jpWQ4ampFX
+         6xfd/MK8SJFY+iHOBKyzq9po40tE23dqWuaHB+T3MxOgQ9JHCo9x22XNvEuKZW/V
+         4WaoGHVkR+jtWNC8Qv/xCMHL3CEvAklKJR68WDhozwUYTgNt5vCoJOviMlbhDSwf
+         0zVXpQwMR//4c0QSA0+BPpIEPDnx5vTIHBVXHy4bBBHU2Vi87QIDS0AtiBpNcspN
+         6AG0ktuldkE/pqfSTJ2A9HpHZyU+8boagRS5/z102Pjtmf/mzUkcHmfRb9o0DE15
+         X5fqpA3lYyx9eHIAgH4eaB1+G20Ez/EY5hr8IMS2nNBSem491UW6DXDYRu6eBLrR
+         sRmtrJ6DlTZFRFlqVZ47bce/SbeM/xljvRkBxWG6RtDRsTyNVI65Ag0EVcnc1wEQ
+         ANzk9W058tSHqf05UEtJGrN0K8DLriCvPd7QdFA8yVIZM3WD+m0AMBGXjd8BT5c2
+         lt0GmhB8klonHZvPiVLTRTLcSsc3NBopr1HL1bWsgOczwWiXSrc62oGAHUOQT/bv
+         vS6KIkZgez+qtCo/DCOGJrADaoJBiBCLSsZgowpzazZZDPUF7rAsfcryVCFvftK0
+         wAe1OdvUG77NHrMrE1oX3zh82hTqR5azBre6Y81lNwxxug/Xl/RHjNhEOYohcsLS
+         /xl0m2X831fHzcGGpoISRgrfel+M4RoC7KsLrwVhrF8koCD/ZQlevfLpuRl5LNpO
+         s1ZtEi8ZvLliih+H+BOlBD0zUc3zZrrks/NCpm1eZba0Z6L48r4TIHW08SGlHx7o
+         SrXgkq3mtoM8C4uDiLwjav5KxiF7n68s/9LF82aAr7YjNXd+xYZNjsmmFlYj9CGI
+         lL4jVt4v4EtTONa6pbtCNv5ezOLDZ6BBcQ36xdkrWzdpjQjL2mnh3sqIAGIPu7tH
+         N8euQ5L1zIvIjVqYlR1eJssp96QYPWYxF7TosfML4BUhCP631IWfuD9X/K2LzDmv
+         B2gVZo9fbhSC+P7GYVG+tV4VLAMbspAxRXXL69+j98aeV5g59f8OFQPbGpKE/SAY
+         eIXtq8DD+PYUXXq3VUI2brVLv42LBVdSJpKNKG3decIBABEBAAGJAh8EGAECAAkF
+         AlXJ3NcCGwwACgkQuj4pM4KAskKzeg/9FxXJLV3eWwY4nn2VhwYTHnHtSUpi8usk
+         RzIa3Mcj6OEVjU2LZaT3UQF8h6dLM9y+CemcwyjMqm1RQ5+ogfrItby1AaBXwCvm
+         XCUGw2zFOAnyzSHHoDFj27sllFxDmfSiBY5KP8M+/ywHKZDkRb6EjzMPx5oKFeGW
+         Hmqaj5FDmTeWChSIHd1ZxobashFauOZDbS/ijRRMsVGFulU2Nb/4QJK73g3orfhY
+         5mq1TMkQ5Kcbqh4OmYYYayLtJQcpa6ZVopaRhAJFe30P83zW9pM5LQDpP9JIyY+S
+         DjasEY4ekYtw6oCKAjpqlwaaNDjl27OkJ7R7laFKy4grZ2TSB/2KTjn/Ea3CH/pA
+         SrpVis1LvC90XytbBnsEKYXU55H943wmBc6oj+itQhx4WyIiv+UgtHI/DbnYbUru
+         71wpfapqGBXYfu/zAra8PITngOFuizeYu+idemu55ANO3keJPKr3ZBUSBBpNFauT
+         VUUCSnrLt+kpSLopYESiNdsPW/aQTFgFvA4BkBJTIMQsQZXicuXUePYlg5xFzXOv
+         XgiqkjRA9xBI5JAIUgLRk3ulVFt2bIsTG9XgtGyphEs86Q0MOIMo0WbZGtAYDrZO
+         DITbm2KzVLGVLn/ZJiW11RSHPNiwgg66/puKdFWrSogYYDJdDEUJtLIhypZ+ORxe
+         7oh88hTkC1w=
+         =UNSw
+         -----END PGP PUBLIC KEY BLOCK-----

=== added file 'examples/tests/apt_source_preserve.yaml'
--- examples/tests/apt_source_preserve.yaml	1970-01-01 00:00:00 +0000
+++ examples/tests/apt_source_preserve.yaml	2016-10-03 18:55:20 +0000
@@ -0,0 +1,98 @@
+showtrace: true
+apt:
+  # this is like the other apt_source test but with preserve true
+  # this is the default now preserve_sources_list: true
+  primary:
+    - arches: [default]
+      uri: http://us.archive.ubuntu.com/ubuntu
+  security:
+    - arches: [default]
+      uri: http://security.ubuntu.com/ubuntu
+  sources_list: | # written by curtin custom template
+    deb $MIRROR $RELEASE main restricted
+    deb-src $MIRROR $RELEASE main restricted
+    deb $PRIMARY $RELEASE universe restricted
+    deb $SECURITY $RELEASE-security multiverse
+    # nice line to check in test
+  conf: | # APT config
+    ACQUIRE {
+      Retries "3";
+    };
+  sources:
+    curtin-dev-ppa.list:
+      source: "deb http://ppa.launchpad.net/curtin-dev/test-archive/ubuntu xenial main"
+      keyid: F430BBA5
+    ignored1:
+      source: "ppa:curtin-dev/test-archive"
+    my-repo2.list:
+      source: deb $MIRROR $RELEASE multiverse
+    ignored3:
+      keyid: 0E72 9061 0D2F 6DC4 D65E  A921 9A31 4EC5 F470 A0AC
+    my-repo4.list:
+      source: deb http://ppa.launchpad.net/curtin-dev/test-archive/ubuntu xenial main
+      key: |
+         -----BEGIN PGP PUBLIC KEY BLOCK-----
+         Version: GnuPG v1
+
+         mQINBFXJ3NcBEAC85PMdaKdItkdjCT1vRJrdwNqj4lN5mu6z4dDVfeZlmozRDBGb
+         ENSOWCiYz3meANO7bKthQQCqAETSBV72rrDCqFZUpXeyG3zCN98Z/UdJ8zpQD9uw
+         mq2CaAqWMk6ty+PkHQ4gtIc390lGfRbHNoZ5HaWJNVOK7FCB2hBmnTZW7AViYiYa
+         YswOjYxaCkwQ/DsMOPD7S5OjwbLucs2YGjkBm7YF1nnXNzyt+BwieKQW/sQ2+ga1
+         mkgLW1BTQN3+JreBpeHy/yrRdK4dOZZUar4WPZitZzOW2eNpaaf6hKNA14LB/96a
+         tEguK8VazoqSQGvNV/R3PjIYmurVP3/Z9bEVgOKhMCflgwKCYgx+tBUypN3zFWv9
+         pgVq3iHx1MFCvoP9FsNB7I6jzOxlQP4z25BzR3ympx/QexkFw5CBFXhdrU+qNVBl
+         SSnz69aLEjCRXqBOnQEr0irs/e/35+yLJdEuw89vSwWwrzbV5r1Y7uxinEGWSydT
+         qddj97uKOWeMmnp20Be4+nhDDW/BMiTFI4Y3bYeDTrftbWMaSEmtSTw5HHxtAFtg
+         X9Hyx0Q3eN1w3gRZgIdm0xYTe7bNTofFRdfXzB/9wtNIcaW10+IlODShFHPCnh+d
+         i56a8LCdZcXiiLfCIhEcnqmM37BVvhjIQKSyOU1eMEgX148aVEz36OVuMwARAQAB
+         tCdDaHJpc3RpYW4gRWhyaGFyZHQgPGNwYWVsemVyQGdtYWlsLmNvbT6JAjgEEwEC
+         ACIFAlXJ3NcCGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJELo+KTOCgLJC
+         BugP/ir0ES3wCzvHMnkz2UlXt9FR4KqY0L9uFmwu9VYpmfAploEVIOi2HcuxpcRp
+         hgoQlUtkz3lRhUeZzCxuB1ljM2JKTJiezP1tFTTGCbVYhPyA0LmUiHDWylG7FzPb
+         TX96HY/G0jf+m4CfR8q3HNHjeDi4VeA2ppBxdHcVE5I7HihDgRPJd+CvCa3nYdAb
+         nXDKlQZz5aZc7AgrRVamr4mshkzWuwNNCwOt3AIgHDkU/HzA5xlXfwHxOoP6scWH
+         /T7vFsd/vOikBphGseWPgKm6w1zyQ5Dk/wjRL8UeSJZW+Rh4PuBMbxg01lAZpPTq
+         tu/bePeNty3g5bhwO6oHMpWhprn3dO37R680qo6UnBPzICeuBUnSYgpPnsQC9maz
+         FEjiBtMsXSanU5vww7TpxY1JHjk5KFcmKx4sBeablznsm+GuVaDFN8R4eDjrM14r
+         SOzA9cV0bSQr4dMqA9fZFSx6qLTacIeMfptybW3zaDX/pJOeBBWRAtoAfZIFbBnu
+         /ZxDDgiQtZzpVK4UkYk5rjjtV/CPVXx64AnTHi35YfUn14KkE+k3odHdvPfBiv9+
+         NxfkTuV/koOgpD3+lTIYXyVHS9gwvhfRD/YfdrnVGl7bRZe68j7bfWDuQuSqIhSA
+         jpeJslJCawnqv6fVB6buj6jjcgHIxqCVn99chaPFSblEIPfXtDVDaHJpc3RpYW4g
+         RWhyaGFyZHQgPGNocmlzdGlhbi5laHJoYXJkdEBjYW5vbmljYWwuY29tPokCOAQT
+         AQIAIgUCVsbUOgIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQuj4pM4KA
+         skJNPg/7BF/iXHHdSBigWKXCCvQz58uInoc/R4beIegxRCMq7wkYEey4B7Fd35zY
+         zv9CBOTV3hZePMCg9jxl4ki2kSsrZSCIEJw4L/aXDtJtx3HT18uTW0QKoU3nK/ro
+         OtthVqBqmiSEi40UUU+5MGrUjwLSm+PjaaSapjK/lddf0KbXBB78/BtR/XT0gxWM
+         +o68Oei9Nj1S3h6UndJwNAQ1xaDWmU2T7CRJet3F+cXZd3aDuS2axOTSTZbraSq7
+         zdl1xUiKtzXZIp8X1ewne+dzkewZuWj7DOwOBEFK26UhxCjKd5mUr7jpWQ4ampFX
+         6xfd/MK8SJFY+iHOBKyzq9po40tE23dqWuaHB+T3MxOgQ9JHCo9x22XNvEuKZW/V
+         4WaoGHVkR+jtWNC8Qv/xCMHL3CEvAklKJR68WDhozwUYTgNt5vCoJOviMlbhDSwf
+         0zVXpQwMR//4c0QSA0+BPpIEPDnx5vTIHBVXHy4bBBHU2Vi87QIDS0AtiBpNcspN
+         6AG0ktuldkE/pqfSTJ2A9HpHZyU+8boagRS5/z102Pjtmf/mzUkcHmfRb9o0DE15
+         X5fqpA3lYyx9eHIAgH4eaB1+G20Ez/EY5hr8IMS2nNBSem491UW6DXDYRu6eBLrR
+         sRmtrJ6DlTZFRFlqVZ47bce/SbeM/xljvRkBxWG6RtDRsTyNVI65Ag0EVcnc1wEQ
+         ANzk9W058tSHqf05UEtJGrN0K8DLriCvPd7QdFA8yVIZM3WD+m0AMBGXjd8BT5c2
+         lt0GmhB8klonHZvPiVLTRTLcSsc3NBopr1HL1bWsgOczwWiXSrc62oGAHUOQT/bv
+         vS6KIkZgez+qtCo/DCOGJrADaoJBiBCLSsZgowpzazZZDPUF7rAsfcryVCFvftK0
+         wAe1OdvUG77NHrMrE1oX3zh82hTqR5azBre6Y81lNwxxug/Xl/RHjNhEOYohcsLS
+         /xl0m2X831fHzcGGpoISRgrfel+M4RoC7KsLrwVhrF8koCD/ZQlevfLpuRl5LNpO
+         s1ZtEi8ZvLliih+H+BOlBD0zUc3zZrrks/NCpm1eZba0Z6L48r4TIHW08SGlHx7o
+         SrXgkq3mtoM8C4uDiLwjav5KxiF7n68s/9LF82aAr7YjNXd+xYZNjsmmFlYj9CGI
+         lL4jVt4v4EtTONa6pbtCNv5ezOLDZ6BBcQ36xdkrWzdpjQjL2mnh3sqIAGIPu7tH
+         N8euQ5L1zIvIjVqYlR1eJssp96QYPWYxF7TosfML4BUhCP631IWfuD9X/K2LzDmv
+         B2gVZo9fbhSC+P7GYVG+tV4VLAMbspAxRXXL69+j98aeV5g59f8OFQPbGpKE/SAY
+         eIXtq8DD+PYUXXq3VUI2brVLv42LBVdSJpKNKG3decIBABEBAAGJAh8EGAECAAkF
+         AlXJ3NcCGwwACgkQuj4pM4KAskKzeg/9FxXJLV3eWwY4nn2VhwYTHnHtSUpi8usk
+         RzIa3Mcj6OEVjU2LZaT3UQF8h6dLM9y+CemcwyjMqm1RQ5+ogfrItby1AaBXwCvm
+         XCUGw2zFOAnyzSHHoDFj27sllFxDmfSiBY5KP8M+/ywHKZDkRb6EjzMPx5oKFeGW
+         Hmqaj5FDmTeWChSIHd1ZxobashFauOZDbS/ijRRMsVGFulU2Nb/4QJK73g3orfhY
+         5mq1TMkQ5Kcbqh4OmYYYayLtJQcpa6ZVopaRhAJFe30P83zW9pM5LQDpP9JIyY+S
+         DjasEY4ekYtw6oCKAjpqlwaaNDjl27OkJ7R7laFKy4grZ2TSB/2KTjn/Ea3CH/pA
+         SrpVis1LvC90XytbBnsEKYXU55H943wmBc6oj+itQhx4WyIiv+UgtHI/DbnYbUru
+         71wpfapqGBXYfu/zAra8PITngOFuizeYu+idemu55ANO3keJPKr3ZBUSBBpNFauT
+         VUUCSnrLt+kpSLopYESiNdsPW/aQTFgFvA4BkBJTIMQsQZXicuXUePYlg5xFzXOv
+         XgiqkjRA9xBI5JAIUgLRk3ulVFt2bIsTG9XgtGyphEs86Q0MOIMo0WbZGtAYDrZO
+         DITbm2KzVLGVLn/ZJiW11RSHPNiwgg66/puKdFWrSogYYDJdDEUJtLIhypZ+ORxe
+         7oh88hTkC1w=
+         =UNSw
+         -----END PGP PUBLIC KEY BLOCK-----

=== added file 'examples/tests/apt_source_search.yaml'
--- examples/tests/apt_source_search.yaml	1970-01-01 00:00:00 +0000
+++ examples/tests/apt_source_search.yaml	2016-10-03 18:55:20 +0000
@@ -0,0 +1,97 @@
+showtrace: true
+apt:
+  preserve_sources_list: false
+  primary:
+    - arches: [default]
+      search:
+        - http://does.not.exist/ubuntu
+        - http://does.also.not.exist/ubuntu
+        - http://us.archive.ubuntu.com/ubuntu
+  security:
+    - arches: [default]
+      search:
+        - http://does.not.exist/ubuntu
+        - http://does.also.not.exist/ubuntu
+        - http://security.ubuntu.com/ubuntu
+  conf: | # APT config
+    ACQUIRE {
+      Retries "3";
+    };
+  sources:
+    curtin-dev-ppa.list:
+      source: "deb http://ppa.launchpad.net/curtin-dev/test-archive/ubuntu xenial main"
+      keyid: F430BBA5
+    ignored1:
+      source: "ppa:curtin-dev/test-archive"
+    my-repo2.list:
+      source: deb $MIRROR $RELEASE multiverse
+    ignored3:
+      keyid: 0E72 9061 0D2F 6DC4 D65E  A921 9A31 4EC5 F470 A0AC
+    my-repo4.list:
+      source: deb http://ppa.launchpad.net/curtin-dev/test-archive/ubuntu xenial main
+      key: |
+         -----BEGIN PGP PUBLIC KEY BLOCK-----
+         Version: GnuPG v1
+
+         mQINBFXJ3NcBEAC85PMdaKdItkdjCT1vRJrdwNqj4lN5mu6z4dDVfeZlmozRDBGb
+         ENSOWCiYz3meANO7bKthQQCqAETSBV72rrDCqFZUpXeyG3zCN98Z/UdJ8zpQD9uw
+         mq2CaAqWMk6ty+PkHQ4gtIc390lGfRbHNoZ5HaWJNVOK7FCB2hBmnTZW7AViYiYa
+         YswOjYxaCkwQ/DsMOPD7S5OjwbLucs2YGjkBm7YF1nnXNzyt+BwieKQW/sQ2+ga1
+         mkgLW1BTQN3+JreBpeHy/yrRdK4dOZZUar4WPZitZzOW2eNpaaf6hKNA14LB/96a
+         tEguK8VazoqSQGvNV/R3PjIYmurVP3/Z9bEVgOKhMCflgwKCYgx+tBUypN3zFWv9
+         pgVq3iHx1MFCvoP9FsNB7I6jzOxlQP4z25BzR3ympx/QexkFw5CBFXhdrU+qNVBl
+         SSnz69aLEjCRXqBOnQEr0irs/e/35+yLJdEuw89vSwWwrzbV5r1Y7uxinEGWSydT
+         qddj97uKOWeMmnp20Be4+nhDDW/BMiTFI4Y3bYeDTrftbWMaSEmtSTw5HHxtAFtg
+         X9Hyx0Q3eN1w3gRZgIdm0xYTe7bNTofFRdfXzB/9wtNIcaW10+IlODShFHPCnh+d
+         i56a8LCdZcXiiLfCIhEcnqmM37BVvhjIQKSyOU1eMEgX148aVEz36OVuMwARAQAB
+         tCdDaHJpc3RpYW4gRWhyaGFyZHQgPGNwYWVsemVyQGdtYWlsLmNvbT6JAjgEEwEC
+         ACIFAlXJ3NcCGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJELo+KTOCgLJC
+         BugP/ir0ES3wCzvHMnkz2UlXt9FR4KqY0L9uFmwu9VYpmfAploEVIOi2HcuxpcRp
+         hgoQlUtkz3lRhUeZzCxuB1ljM2JKTJiezP1tFTTGCbVYhPyA0LmUiHDWylG7FzPb
+         TX96HY/G0jf+m4CfR8q3HNHjeDi4VeA2ppBxdHcVE5I7HihDgRPJd+CvCa3nYdAb
+         nXDKlQZz5aZc7AgrRVamr4mshkzWuwNNCwOt3AIgHDkU/HzA5xlXfwHxOoP6scWH
+         /T7vFsd/vOikBphGseWPgKm6w1zyQ5Dk/wjRL8UeSJZW+Rh4PuBMbxg01lAZpPTq
+         tu/bePeNty3g5bhwO6oHMpWhprn3dO37R680qo6UnBPzICeuBUnSYgpPnsQC9maz
+         FEjiBtMsXSanU5vww7TpxY1JHjk5KFcmKx4sBeablznsm+GuVaDFN8R4eDjrM14r
+         SOzA9cV0bSQr4dMqA9fZFSx6qLTacIeMfptybW3zaDX/pJOeBBWRAtoAfZIFbBnu
+         /ZxDDgiQtZzpVK4UkYk5rjjtV/CPVXx64AnTHi35YfUn14KkE+k3odHdvPfBiv9+
+         NxfkTuV/koOgpD3+lTIYXyVHS9gwvhfRD/YfdrnVGl7bRZe68j7bfWDuQuSqIhSA
+         jpeJslJCawnqv6fVB6buj6jjcgHIxqCVn99chaPFSblEIPfXtDVDaHJpc3RpYW4g
+         RWhyaGFyZHQgPGNocmlzdGlhbi5laHJoYXJkdEBjYW5vbmljYWwuY29tPokCOAQT
+         AQIAIgUCVsbUOgIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQuj4pM4KA
+         skJNPg/7BF/iXHHdSBigWKXCCvQz58uInoc/R4beIegxRCMq7wkYEey4B7Fd35zY
+         zv9CBOTV3hZePMCg9jxl4ki2kSsrZSCIEJw4L/aXDtJtx3HT18uTW0QKoU3nK/ro
+         OtthVqBqmiSEi40UUU+5MGrUjwLSm+PjaaSapjK/lddf0KbXBB78/BtR/XT0gxWM
+         +o68Oei9Nj1S3h6UndJwNAQ1xaDWmU2T7CRJet3F+cXZd3aDuS2axOTSTZbraSq7
+         zdl1xUiKtzXZIp8X1ewne+dzkewZuWj7DOwOBEFK26UhxCjKd5mUr7jpWQ4ampFX
+         6xfd/MK8SJFY+iHOBKyzq9po40tE23dqWuaHB+T3MxOgQ9JHCo9x22XNvEuKZW/V
+         4WaoGHVkR+jtWNC8Qv/xCMHL3CEvAklKJR68WDhozwUYTgNt5vCoJOviMlbhDSwf
+         0zVXpQwMR//4c0QSA0+BPpIEPDnx5vTIHBVXHy4bBBHU2Vi87QIDS0AtiBpNcspN
+         6AG0ktuldkE/pqfSTJ2A9HpHZyU+8boagRS5/z102Pjtmf/mzUkcHmfRb9o0DE15
+         X5fqpA3lYyx9eHIAgH4eaB1+G20Ez/EY5hr8IMS2nNBSem491UW6DXDYRu6eBLrR
+         sRmtrJ6DlTZFRFlqVZ47bce/SbeM/xljvRkBxWG6RtDRsTyNVI65Ag0EVcnc1wEQ
+         ANzk9W058tSHqf05UEtJGrN0K8DLriCvPd7QdFA8yVIZM3WD+m0AMBGXjd8BT5c2
+         lt0GmhB8klonHZvPiVLTRTLcSsc3NBopr1HL1bWsgOczwWiXSrc62oGAHUOQT/bv
+         vS6KIkZgez+qtCo/DCOGJrADaoJBiBCLSsZgowpzazZZDPUF7rAsfcryVCFvftK0
+         wAe1OdvUG77NHrMrE1oX3zh82hTqR5azBre6Y81lNwxxug/Xl/RHjNhEOYohcsLS
+         /xl0m2X831fHzcGGpoISRgrfel+M4RoC7KsLrwVhrF8koCD/ZQlevfLpuRl5LNpO
+         s1ZtEi8ZvLliih+H+BOlBD0zUc3zZrrks/NCpm1eZba0Z6L48r4TIHW08SGlHx7o
+         SrXgkq3mtoM8C4uDiLwjav5KxiF7n68s/9LF82aAr7YjNXd+xYZNjsmmFlYj9CGI
+         lL4jVt4v4EtTONa6pbtCNv5ezOLDZ6BBcQ36xdkrWzdpjQjL2mnh3sqIAGIPu7tH
+         N8euQ5L1zIvIjVqYlR1eJssp96QYPWYxF7TosfML4BUhCP631IWfuD9X/K2LzDmv
+         B2gVZo9fbhSC+P7GYVG+tV4VLAMbspAxRXXL69+j98aeV5g59f8OFQPbGpKE/SAY
+         eIXtq8DD+PYUXXq3VUI2brVLv42LBVdSJpKNKG3decIBABEBAAGJAh8EGAECAAkF
+         AlXJ3NcCGwwACgkQuj4pM4KAskKzeg/9FxXJLV3eWwY4nn2VhwYTHnHtSUpi8usk
+         RzIa3Mcj6OEVjU2LZaT3UQF8h6dLM9y+CemcwyjMqm1RQ5+ogfrItby1AaBXwCvm
+         XCUGw2zFOAnyzSHHoDFj27sllFxDmfSiBY5KP8M+/ywHKZDkRb6EjzMPx5oKFeGW
+         Hmqaj5FDmTeWChSIHd1ZxobashFauOZDbS/ijRRMsVGFulU2Nb/4QJK73g3orfhY
+         5mq1TMkQ5Kcbqh4OmYYYayLtJQcpa6ZVopaRhAJFe30P83zW9pM5LQDpP9JIyY+S
+         DjasEY4ekYtw6oCKAjpqlwaaNDjl27OkJ7R7laFKy4grZ2TSB/2KTjn/Ea3CH/pA
+         SrpVis1LvC90XytbBnsEKYXU55H943wmBc6oj+itQhx4WyIiv+UgtHI/DbnYbUru
+         71wpfapqGBXYfu/zAra8PITngOFuizeYu+idemu55ANO3keJPKr3ZBUSBBpNFauT
+         VUUCSnrLt+kpSLopYESiNdsPW/aQTFgFvA4BkBJTIMQsQZXicuXUePYlg5xFzXOv
+         XgiqkjRA9xBI5JAIUgLRk3ulVFt2bIsTG9XgtGyphEs86Q0MOIMo0WbZGtAYDrZO
+         DITbm2KzVLGVLn/ZJiW11RSHPNiwgg66/puKdFWrSogYYDJdDEUJtLIhypZ+ORxe
+         7oh88hTkC1w=
+         =UNSw
+         -----END PGP PUBLIC KEY BLOCK-----

=== modified file 'examples/tests/basic.yaml'
--- examples/tests/basic.yaml	2016-05-10 16:13:29 +0000
+++ examples/tests/basic.yaml	2016-10-03 18:55:20 +0000
@@ -7,7 +7,7 @@
         ptable: msdos
         model: QEMU HARDDISK
         path: /dev/vdb
-        name: main_disk
+        name: main_disk_with_in/\&valid@#dname
         wipe: superblock
         grub_device: true
       - id: sda1
@@ -42,6 +42,10 @@
         path: /dev/vdc
         name: sparedisk
         wipe: superblock
+      - id: sparedisk_fat_fmt_id
+        type: format
+        fstype: fat32
+        volume: sparedisk_id
       - id: btrfs_disk_id
         type: disk
         path: /dev/vdd

=== added file 'examples/tests/basic_network_static_ipv6.yaml'
--- examples/tests/basic_network_static_ipv6.yaml	1970-01-01 00:00:00 +0000
+++ examples/tests/basic_network_static_ipv6.yaml	2016-10-03 18:55:20 +0000
@@ -0,0 +1,22 @@
+showtrace: true
+network:
+    version: 1
+    config:
+        # Physical interfaces.
+        - type: physical
+          name: interface0
+          mac_address: "52:54:00:12:34:00"
+          subnets:
+              - type: static
+                address: 2001:4800:78ff:1b:be76:4eff:fe06:96b3
+                netmask: 'ffff:ffff:ffff:ffff::'
+                routes:
+                  - gateway: 2001:4800:78ff:1b::1
+                    netmask: '::'
+                    network: '::'
+        - type: nameserver
+          address:
+            - 10.0.2.3
+          search:
+            - wark.maas
+            - foobar.maas

=== modified file 'examples/tests/basic_scsi.yaml'
--- examples/tests/basic_scsi.yaml	2016-07-12 16:17:29 +0000
+++ examples/tests/basic_scsi.yaml	2016-10-03 18:55:20 +0000
@@ -6,7 +6,7 @@
         type: disk
         ptable: msdos
         wwn: '0x39cc071e72c64cc4'
-        name: main_disk
+        name: main_disk_with_in/\&valid@#dname
         wipe: superblock
         grub_device: true
       - id: sda1

=== added file 'examples/tests/network_alias.yaml'
--- examples/tests/network_alias.yaml	1970-01-01 00:00:00 +0000
+++ examples/tests/network_alias.yaml	2016-10-03 18:55:20 +0000
@@ -0,0 +1,125 @@
+showtrace: true
+network:
+    version: 1
+    config:
+        # no-alias: single v4 and v6 on same interface
+        - type: physical
+          name: interface0
+          mac_address: "52:54:00:12:34:00"
+          subnets:
+              - type: static
+                address: 192.168.1.2/24
+                mtu: 1501
+              - type: static
+                address: 2001:4800:78ff:1b:be76:4eff:fe06:ffac
+                netmask: 'ffff:ffff:ffff:ffff::'
+                mtu: 1480
+        # multi_v4_alias: multiple v4 addrs on same interface
+        - type: physical
+          name: interface1
+          mac_address: "52:54:00:12:34:02"
+          subnets:
+              - type: static
+                address: 192.168.2.2/22
+                routes:
+                  - network: 192.168.0.0
+                    netmask: 255.255.252.0
+                    gateway: 192.168.2.1
+              - type: static
+                address: 10.23.23.7/23
+                routes:
+                  - gateway: 10.23.23.1
+                    netmask: 255.255.254.0
+                    network: 10.23.22.0
+        # multi_v6_alias: multiple v6 addrs on same interface
+        - type: physical
+          name: interface2
+          mac_address: "52:54:00:12:34:04"
+          subnets:
+              - type: static
+                address: 2001:4800:78ff:1b:be76:4eff:dead:1000
+                netmask: 'ffff:ffff:ffff:ffff::'
+              - type: static
+                address: 2001:4800:78ff:1b:be76:4eff:dead:2000
+                netmask: 'ffff:ffff:ffff:ffff::'
+              - type: static
+                address: 2001:4800:78ff:1b:be76:4eff:dead:3000
+                netmask: 'ffff:ffff:ffff:ffff::'
+        # multi_v4_and_v6_alias: multiple v4 and v6 addrs on same interface
+        - type: physical
+          name: interface3
+          mac_address: "52:54:00:12:34:06"
+          subnets:
+              - type: static
+                address: 192.168.7.7/22
+                routes:
+                  - network: 192.168.0.0
+                    netmask: 255.255.252.0
+                    gateway: 192.168.7.1
+              - type: static
+                address: 10.99.99.23/23
+                routes:
+                  - gateway: 10.99.99.1
+                    netmask: 255.255.254.0
+                    network: 10.99.98.0
+              - type: static
+                address: 2001:4800:78ff:1b:be76:4eff:beef:4000
+                netmask: 'ffff:ffff:ffff:ffff::'
+              - type: static
+                address: 2001:4800:78ff:1b:be76:4eff:beef:5000
+                netmask: 'ffff:ffff:ffff:ffff::'
+              - type: static
+                address: 2001:4800:78ff:1b:be76:4eff:beef:6000
+                netmask: 'ffff:ffff:ffff:ffff::'
+        # multi_v6_and_v4_revorder_alias: multiple v4 and v6 addr, rev order
+        - type: physical
+          name: interface4
+          mac_address: "52:54:00:12:34:08"
+          subnets:
+              - type: static
+                address: 2001:4800:78ff:1b:be76:4eff:debe:7000
+                netmask: 'ffff:ffff:ffff:ffff::'
+              - type: static
+                address: 2001:4800:78ff:1b:be76:4eff:debe:8000
+                netmask: 'ffff:ffff:ffff:ffff::'
+              - type: static
+                address: 2001:4800:78ff:1b:be76:4eff:debe:9000
+                netmask: 'ffff:ffff:ffff:ffff::'
+              - type: static
+                address: 192.168.100.100/22
+                routes:
+                  - network: 192.168.0.0
+                    netmask: 255.255.252.0
+                    gateway: 192.168.100.1
+              - type: static
+                address: 10.17.142.2/23
+                routes:
+                  - gateway: 10.17.142.1
+                    netmask: 255.255.254.0
+                    network: 10.17.142.0
+        # multi_v6_and_v4_mix_order: multiple v4 and v6 addr, mixed order
+        - type: physical
+          name: interface5
+          mac_address: "52:54:00:12:34:0a"
+          subnets:
+              - type: static
+                address: 2001:4800:78ff:1b:be76:4eff:baaf:a000
+                netmask: 'ffff:ffff:ffff:ffff::'
+              - type: static
+                address: 2001:4800:78ff:1b:be76:4eff:baaf:c000
+                netmask: 'ffff:ffff:ffff:ffff::'
+              - type: static
+                address: 192.168.200.200/22
+                routes:
+                  - network: 192.168.0.0
+                    netmask: 255.255.252.0
+                    gateway: 192.168.200.1
+              - type: static
+                address: 10.252.2.2/23
+                routes:
+                  - gateway: 10.252.2.1
+                    netmask: 255.255.254.0
+                    network: 10.252.2.0
+              - type: static
+                address: 2001:4800:78ff:1b:be76:4eff:baaf:b000
+                netmask: 'ffff:ffff:ffff:ffff::'

=== added file 'examples/tests/network_mtu.yaml'
--- examples/tests/network_mtu.yaml	1970-01-01 00:00:00 +0000
+++ examples/tests/network_mtu.yaml	2016-10-03 18:55:20 +0000
@@ -0,0 +1,88 @@
+showtrace: true
+network:
+    version: 1
+    config:
+        - type: physical
+          name: interface0
+          mac_address: "52:54:00:12:34:00"
+          subnets:
+              - type: static
+                address: 192.168.1.2/24
+                mtu: 1501
+              - type: static
+                address: 2001:4800:78ff:1b:be76:4eff:fe06:1000
+                netmask: 'ffff:ffff:ffff:ffff::'
+                mtu: 1480
+        - type: physical
+          name: interface1
+          mac_address: "52:54:00:12:34:02"
+          subnets:
+              - type: static
+                address: 192.168.2.2/24
+                mtu: 1501
+              - type: static
+                address: 2001:4800:78ff:1b:be76:4eff:fe06:2000
+                netmask: 'ffff:ffff:ffff:ffff::'
+                mtu: 1501
+        - type: physical
+          name: interface2
+          mac_address: "52:54:00:12:34:04"
+          subnets:
+              - type: static
+                address: 192.168.3.2/24
+              - type: static
+                address: 2001:4800:78ff:1b:be76:4eff:fe06:3000
+                netmask: 'ffff:ffff:ffff:ffff::'
+                mtu: 1501
+        - type: physical
+          name: interface3
+          mac_address: "52:54:00:12:34:06"
+          subnets:
+              - type: manual
+                control: manual
+              - type: static
+                address: 2001:4800:78ff:1b:be76:4eff:fe06:4000
+                netmask: 'ffff:ffff:ffff:ffff::'
+                mtu: 9000
+        - type: physical
+          name: interface4
+          mac_address: "52:54:00:12:34:08"
+          subnets:
+              - type: static
+                address: 2001:4800:78ff:1b:be76:4eff:fe06:5000
+                netmask: 'ffff:ffff:ffff:ffff::'
+                mtu: 1480
+              - type: static
+                address: 192.168.5.2/24
+                mtu: 1501
+        - type: physical
+          name: interface5
+          mac_address: "52:54:00:12:34:0c"
+          subnets:
+              - type: static
+                address: 2001:4800:78ff:1b:be76:4eff:fe06:6000
+                netmask: 'ffff:ffff:ffff:ffff::'
+                mtu: 1501
+              - type: static
+                address: 192.168.6.2/24
+                mtu: 1501
+        - type: physical
+          name: interface6
+          mac_address: "52:54:00:12:34:0e"
+          subnets:
+              - type: static
+                address: 2001:4800:78ff:1b:be76:4eff:fe06:7000
+                netmask: 'ffff:ffff:ffff:ffff::'
+                mtu: 1501
+              - type: static
+                address: 192.168.7.2/24
+        - type: physical
+          name: interface7
+          mac_address: "52:54:00:12:35:01"
+          subnets:
+              - type: static
+                address: 2001:4800:78ff:1b:be76:4eff:fe06:8000
+                netmask: 'ffff:ffff:ffff:ffff::'
+                mtu: 9000
+              - type: manual
+                control: manual

=== added file 'examples/tests/network_source_ipv6.yaml'
--- examples/tests/network_source_ipv6.yaml	1970-01-01 00:00:00 +0000
+++ examples/tests/network_source_ipv6.yaml	2016-10-03 18:55:20 +0000
@@ -0,0 +1,31 @@
+showtrace: true
+network:
+    version: 1
+    config:
+        # Physical interfaces.
+        - type: physical
+          name: interface0
+          mac_address: "52:54:00:12:34:00"
+          subnets:
+              - type: static
+                address: 2001:4800:78ff:1b:be76:4eff:fe06:96b3
+                netmask: 'ffff:ffff:ffff:ffff::'
+                routes:
+                  - gateway: 2001:4800:78ff:1b::1
+                    netmask: '::'
+                    network: '::'
+        - type: physical
+          name: interface2
+          mac_address: "52:54:00:12:34:04"
+        - type: nameserver
+          address:
+            - 10.0.2.3
+          search:
+            - wark.maas
+            - foobar.maas
+
+curthooks_commands:
+    # use curtin to inject a eni config file outside of the network yaml
+    # this allows us to test user installed configurations outside of
+    # curtin's control
+    aa_cleanup: ['curtin', 'in-target', '--', 'sh', '-c', "rm -f /etc/network/interfaces.d/eth0.cfg; /bin/echo -e 'auto interface2\niface interface2 inet static\n address 192.168.23.23/24\n' > /etc/network/interfaces.d/interface2.cfg"]

=== added file 'examples/tests/test_old_apt_features.yaml'
--- examples/tests/test_old_apt_features.yaml	1970-01-01 00:00:00 +0000
+++ examples/tests/test_old_apt_features.yaml	2016-10-03 18:55:20 +0000
@@ -0,0 +1,11 @@
+showtrace: true
+# apt_proxy gets configured by tools/launch and tests/vmtests/__init__.py
+apt_mirrors:
+# we need a mirror that works (even in CI) but isn't the default
+  ubuntu_archive: http://us.archive.ubuntu.com/ubuntu
+  ubuntu_security: http://archive.ubuntu.com/ubuntu
+# set some key that surely is available to a non-default value
+debconf_selections:
+  set1: |
+    debconf debconf/priority select low
+  cloudinit: "cloud-init cloud-init/datasources multiselect NoCloud"

=== added file 'examples/tests/test_old_apt_features_ports.yaml'
--- examples/tests/test_old_apt_features_ports.yaml	1970-01-01 00:00:00 +0000
+++ examples/tests/test_old_apt_features_ports.yaml	2016-10-03 18:55:20 +0000
@@ -0,0 +1,10 @@
+showtrace: true
+# apt_proxy gets configured by tools/launch and tests/vmtests/__init__.py
+apt_mirrors:
+# For ports there is no non-default alternative we could use
+  ubuntu_archive: http://ports.ubuntu.com/ubuntu-ports
+  ubuntu_security: http://ports.ubuntu.com/ubuntu-ports
+# set some key that surely is available to a non-default value
+debconf_selections:
+  set1: |
+    debconf debconf/priority select low

=== modified file 'examples/tests/uefi_basic.yaml'
--- examples/tests/uefi_basic.yaml	2016-05-10 16:13:29 +0000
+++ examples/tests/uefi_basic.yaml	2016-10-03 18:55:20 +0000
@@ -22,6 +22,12 @@
     size: 3G
     type: partition
     wipe: superblock
+  - id: id_disk0_part3
+    number: 3
+    size: 1G
+    type: partition
+    wipe: superblock
+    device: id_disk0
   - fstype: fat32
     id: id_efi_format
     label: efi
@@ -32,10 +38,19 @@
     label: root
     type: format
     volume: id_disk0_part2
+  - id: id_home_format
+    label: home
+    type: format
+    fstype: xfs
+    volume: id_disk0_part3
   - device: id_root_format
     id: id_root_mount
     path: /
     type: mount
+  - device: id_home_format
+    type: mount
+    path: /home
+    id: id_home_mount
   - device: id_efi_format
     id: id_efi_mount
     path: /boot/efi

=== added file 'examples/tests/vlan_network_ipv6.yaml'
--- examples/tests/vlan_network_ipv6.yaml	1970-01-01 00:00:00 +0000
+++ examples/tests/vlan_network_ipv6.yaml	2016-10-03 18:55:20 +0000
@@ -0,0 +1,92 @@
+network:
+  config:
+  - id: interface0
+    mac_address: d4:be:d9:a8:49:13
+    mtu: 1500
+    name: interface0
+    subnets:
+    - address: 2001:4800:78ff:1b:be76:4eff:fe06:96b3
+      netmask: 'ffff:ffff:ffff:ffff::'
+      dns_nameservers:
+      - 10.245.168.2
+      routes:
+            - gateway: 2001:4800:78ff:1b::1
+              netmask: '::'
+              network: '::'
+      type: static
+    type: physical
+  - id: interface1
+    mac_address: d4:be:d9:a8:49:15
+    mtu: 1500
+    name: interface1
+    subnets:
+    - address: 2001:4800:beef:1b:be76:4eff:fe06:97b0
+      netmask: 'ffff:ffff:ffff:ffff::'
+      dns_nameservers: []
+      type: static
+    type: physical
+  - id: interface2
+    mac_address: d4:be:d9:a8:49:17
+    mtu: 1500
+    name: interface2
+    subnets:
+    - type: manual
+      control: manual
+    type: physical
+  - id: interface3
+    mac_address: d4:be:d9:a8:49:19
+    mtu: 1500
+    name: interface3
+    subnets:
+    - type: manual
+      control: manual
+    type: physical
+  - id: interface1.2667
+    mtu: 1500
+    name: interface1.2667
+    subnets:
+    - address: 2001:4800:dead:1b:be76:4eff:c486:12f7
+      netmask: 'ffff:ffff:ffff:ffff::'
+      dns_nameservers: []
+      type: static
+    type: vlan
+    vlan_id: 2667
+    vlan_link: interface1
+  - id: interface1.2668
+    mtu: 1500
+    name: interface1.2668
+    subnets:
+    - address: 2001:4800:feef:1b:be76:4eff:4242:2323
+      netmask: 'ffff:ffff:ffff:ffff::'
+      dns_nameservers: []
+      type: static
+    type: vlan
+    vlan_id: 2668
+    vlan_link: interface1
+  - id: interface1.2669
+    mtu: 1500
+    name: interface1.2669
+    subnets:
+    - address: 2001:4800:effe:1b:be76:7634:5f42:79ff
+      netmask: 'ffff:ffff:ffff:ffff::'
+      dns_nameservers: []
+      type: static
+    type: vlan
+    vlan_id: 2669
+    vlan_link: interface1
+  - id: interface1.2670
+    mtu: 1500
+    name: interface1.2670
+    subnets:
+    - address: 2001:4800:9eaf:1b:be76:7634:321f:bbca
+      netmask: 'ffff:ffff:ffff:ffff::'
+      dns_nameservers: []
+      type: static
+    type: vlan
+    vlan_id: 2670
+    vlan_link: interface1
+  - address: 10.245.168.2
+    search:
+    - dellstack
+    type: nameserver
+  version: 1

=== modified file 'setup.py'
--- setup.py	2015-10-02 16:19:07 +0000
+++ setup.py	2016-10-03 18:55:20 +0000
@@ -2,7 +2,7 @@
 from glob import glob
 import os
 
-VERSION = '0.1.0'
+import curtin
 
 
 def is_f(p):
@@ -11,7 +11,7 @@
 setup(
     name="curtin",
     description='The curtin installer',
-    version=VERSION,
+    version=curtin.__version__,
     author='Scott Moser',
     author_email='scott.moser@canonical.com',
     license="AGPL",

=== added file 'tests/unittests/helpers.py'
--- tests/unittests/helpers.py	1970-01-01 00:00:00 +0000
+++ tests/unittests/helpers.py	2016-10-03 18:55:20 +0000
@@ -0,0 +1,41 @@
+#   Copyright (C) 2016 Canonical Ltd.
+#
+#   Author: Scott Moser <scott.moser@canonical.com>
+#
+#   Curtin is free software: you can redistribute it and/or modify it under
+#   the terms of the GNU Affero General Public License as published by the
+#   Free Software Foundation, either version 3 of the License, or (at your
+#   option) any later version.
+#
+#   Curtin is distributed in the hope that it will be useful, but WITHOUT ANY
+#   WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+#   FOR A PARTICULAR PURPOSE.  See the GNU Affero General Public License for
+#   more details.
+#
+#   You should have received a copy of the GNU Affero General Public License
+#   along with Curtin.  If not, see <http://www.gnu.org/licenses/>.
+import mock
+
+
+class mocked_open(object):
+    # older versions of mock can't really mock the builtin 'open' easily.
+    def __init__(self):
+        self.mocked = None
+
+    def __enter__(self):
+        if self.mocked:
+            return self.mocked.start()
+
+        py2_p = '__builtin__.open'
+        py3_p = 'builtins.open'
+        try:
+            self.mocked = mock.patch(py2_p, new_callable=mock.mock_open())
+            return self.mocked.start()
+        except ImportError:
+            self.mocked = mock.patch(py3_p, new_callable=mock.mock_open())
+            return self.mocked.start()
+
+    def __exit__(self, etype, value, trace):
+        if self.mocked:
+            self.mocked.stop()
+        self.mocked = None

=== added file 'tests/unittests/test_apt_custom_sources_list.py'
--- tests/unittests/test_apt_custom_sources_list.py	1970-01-01 00:00:00 +0000
+++ tests/unittests/test_apt_custom_sources_list.py	2016-10-03 18:55:20 +0000
@@ -0,0 +1,170 @@
+""" test_apt_custom_sources_list
+Test templating of custom sources list
+"""
+import logging
+import os
+import shutil
+import tempfile
+
+from unittest import TestCase
+
+import yaml
+import mock
+from mock import call
+
+from curtin import util
+from curtin.commands import apt_config
+
+LOG = logging.getLogger(__name__)
+
+TARGET = "/"
+
+# Input and expected output for the custom template
+YAML_TEXT_CUSTOM_SL = """
+preserve_sources_list: false
+primary:
+  - arches: [default]
+    uri: http://test.ubuntu.com/ubuntu/
+security:
+  - arches: [default]
+    uri: http://testsec.ubuntu.com/ubuntu/
+sources_list: |
+
+    ## Note, this file is written by curtin at install time. It should not end
+    ## up on the installed system itself.
+    #
+    # See http://help.ubuntu.com/community/UpgradeNotes for how to upgrade to
+    # newer versions of the distribution.
+    deb $MIRROR $RELEASE main restricted
+    deb-src $MIRROR $RELEASE main restricted
+    deb $PRIMARY $RELEASE universe restricted
+    deb $SECURITY $RELEASE-security multiverse
+    # FIND_SOMETHING_SPECIAL
+"""
+
+EXPECTED_CONVERTED_CONTENT = """
+## Note, this file is written by curtin at install time. It should not end
+## up on the installed system itself.
+#
+# See http://help.ubuntu.com/community/UpgradeNotes for how to upgrade to
+# newer versions of the distribution.
+deb http://test.ubuntu.com/ubuntu/ fakerel main restricted
+deb-src http://test.ubuntu.com/ubuntu/ fakerel main restricted
+deb http://test.ubuntu.com/ubuntu/ fakerel universe restricted
+deb http://testsec.ubuntu.com/ubuntu/ fakerel-security multiverse
+# FIND_SOMETHING_SPECIAL
+"""
+
+# mocked to be independent to the unittest system
+MOCKED_APT_SRC_LIST = """
+deb http://test.ubuntu.com/ubuntu/ notouched main restricted
+deb-src http://test.ubuntu.com/ubuntu/ notouched main restricted
+deb http://test.ubuntu.com/ubuntu/ notouched-updates main restricted
+deb http://testsec.ubuntu.com/ubuntu/ notouched-security main restricted
+"""
+
+EXPECTED_BASE_CONTENT = ("""
+deb http://test.ubuntu.com/ubuntu/ notouched main restricted
+deb-src http://test.ubuntu.com/ubuntu/ notouched main restricted
+deb http://test.ubuntu.com/ubuntu/ notouched-updates main restricted
+deb http://testsec.ubuntu.com/ubuntu/ notouched-security main restricted
+""")
+
+EXPECTED_MIRROR_CONTENT = ("""
+deb http://test.ubuntu.com/ubuntu/ notouched main restricted
+deb-src http://test.ubuntu.com/ubuntu/ notouched main restricted
+deb http://test.ubuntu.com/ubuntu/ notouched-updates main restricted
+deb http://test.ubuntu.com/ubuntu/ notouched-security main restricted
+""")
+
+EXPECTED_PRIMSEC_CONTENT = ("""
+deb http://test.ubuntu.com/ubuntu/ notouched main restricted
+deb-src http://test.ubuntu.com/ubuntu/ notouched main restricted
+deb http://test.ubuntu.com/ubuntu/ notouched-updates main restricted
+deb http://testsec.ubuntu.com/ubuntu/ notouched-security main restricted
+""")
+
+
+class TestAptSourceConfigSourceList(TestCase):
+    """TestAptSourceConfigSourceList - Class to test sources list rendering"""
+    def setUp(self):
+        super(TestAptSourceConfigSourceList, self).setUp()
+        self.new_root = tempfile.mkdtemp()
+        self.addCleanup(shutil.rmtree, self.new_root)
+        # self.patchUtils(self.new_root)
+
+    @staticmethod
+    def _apt_source_list(cfg, expected):
+        "_apt_source_list - Test rendering from template (generic)"
+
+        arch = util.get_architecture()
+        # would fail inside the unittest context
+        with mock.patch.object(util, 'get_architecture',
+                               return_value=arch) as mockga:
+            with mock.patch.object(util, 'write_file') as mockwrite:
+                # keep it side effect free and avoid permission errors
+                with mock.patch.object(os, 'rename'):
+                    # make test independent to executing system
+                    with mock.patch.object(util, 'load_file',
+                                           return_value=MOCKED_APT_SRC_LIST):
+                        with mock.patch.object(util, 'lsb_release',
+                                               return_value={'codename':
+                                                             'fakerel'}):
+                            apt_config.handle_apt(cfg, TARGET)
+
+        mockga.assert_called_with("/")
+
+        cloudfile = '/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg'
+        cloudconf = yaml.dump({'apt_preserve_sources_list': True}, indent=1)
+        calls = [call(util.target_path(TARGET, '/etc/apt/sources.list'),
+                      expected,
+                      mode=0o644),
+                 call(util.target_path(TARGET, cloudfile),
+                      cloudconf,
+                      mode=0o644)]
+        mockwrite.assert_has_calls(calls)
+
+    def test_apt_source_list(self):
+        """test_apt_source_list - Test with neither custom sources nor parms"""
+        cfg = {'preserve_sources_list': False}
+
+        self._apt_source_list(cfg, EXPECTED_BASE_CONTENT)
+
+    def test_apt_source_list_psm(self):
+        """test_apt_source_list_psm - Test specifying prim+sec mirrors"""
+        cfg = {'preserve_sources_list': False,
+               'primary': [{'arches': ["default"],
+                            'uri': 'http://test.ubuntu.com/ubuntu/'}],
+               'security': [{'arches': ["default"],
+                             'uri': 'http://testsec.ubuntu.com/ubuntu/'}]}
+
+        self._apt_source_list(cfg, EXPECTED_PRIMSEC_CONTENT)
+
+    @staticmethod
+    def test_apt_srcl_custom():
+        """test_apt_srcl_custom - Test rendering a custom source template"""
+        cfg = yaml.safe_load(YAML_TEXT_CUSTOM_SL)
+
+        arch = util.get_architecture()
+        # would fail inside the unittest context
+        with mock.patch.object(util, 'get_architecture',
+                               return_value=arch) as mockga:
+            with mock.patch.object(util, 'write_file') as mockwrite:
+                # keep it side effect free and avoid permission errors
+                with mock.patch.object(os, 'rename'):
+                    with mock.patch.object(util, 'lsb_release',
+                                           return_value={'codename':
+                                                         'fakerel'}):
+                        apt_config.handle_apt(cfg, TARGET)
+
+        mockga.assert_called_with("/")
+        cloudfile = '/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg'
+        cloudconf = yaml.dump({'apt_preserve_sources_list': True}, indent=1)
+        calls = [call(util.target_path(TARGET, '/etc/apt/sources.list'),
+                      EXPECTED_CONVERTED_CONTENT, mode=0o644),
+                 call(util.target_path(TARGET, cloudfile), cloudconf,
+                      mode=0o644)]
+        mockwrite.assert_has_calls(calls)
+
+
+# vi: ts=4 expandtab

=== added file 'tests/unittests/test_apt_source.py'
--- tests/unittests/test_apt_source.py	1970-01-01 00:00:00 +0000
+++ tests/unittests/test_apt_source.py	2016-10-03 18:55:20 +0000
@@ -0,0 +1,1032 @@
+""" test_apt_source
+Testing various config variations of the apt_source custom config
+"""
+import glob
+import os
+import re
+import shutil
+import socket
+import tempfile
+
+from unittest import TestCase
+
+import mock
+from mock import call
+
+from curtin import util
+from curtin import gpg
+from curtin.commands import apt_config
+
+
+EXPECTEDKEY = u"""-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v1
+
+mI0ESuZLUgEEAKkqq3idtFP7g9hzOu1a8+v8ImawQN4TrvlygfScMU1TIS1eC7UQ
+NUA8Qqgr9iUaGnejb0VciqftLrU9D6WYHSKz+EITefgdyJ6SoQxjoJdsCpJ7o9Jy
+8PQnpRttiFm4qHu6BVnKnBNxw/z3ST9YMqW5kbMQpfxbGe+obRox59NpABEBAAG0
+HUxhdW5jaHBhZCBQUEEgZm9yIFNjb3R0IE1vc2VyiLYEEwECACAFAkrmS1ICGwMG
+CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRAGILvPA2g/d3aEA/9tVjc10HOZwV29
+OatVuTeERjjrIbxflO586GLA8cp0C9RQCwgod/R+cKYdQcHjbqVcP0HqxveLg0RZ
+FJpWLmWKamwkABErwQLGlM/Hwhjfade8VvEQutH5/0JgKHmzRsoqfR+LMO6OS+Sm
+S0ORP6HXET3+jC8BMG4tBWCTK/XEZw==
+=ACB2
+-----END PGP PUBLIC KEY BLOCK-----"""
+
+ADD_APT_REPO_MATCH = r"^[\w-]+:\w"
+
+TARGET = "/"
+
+
+def load_tfile(filename):
+    """ load_tfile
+    load file and return content after decoding
+    """
+    try:
+        content = util.load_file(filename, mode="r")
+    except Exception as error:
+        print('failed to load file content for test: %s' % error)
+        raise
+
+    return content
+
+
+class PseudoChrootableTarget(util.ChrootableTarget):
+    # no-ops the mounting and modifying that ChrootableTarget does
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        return
+
+ChrootableTargetStr = "curtin.commands.apt_config.util.ChrootableTarget"
+
+
+class TestAptSourceConfig(TestCase):
+    """ TestAptSourceConfig
+    Main Class to test apt configs
+    """
+    def setUp(self):
+        super(TestAptSourceConfig, self).setUp()
+        self.tmp = tempfile.mkdtemp()
+        self.addCleanup(shutil.rmtree, self.tmp)
+        self.aptlistfile = os.path.join(self.tmp, "single-deb.list")
+        self.aptlistfile2 = os.path.join(self.tmp, "single-deb2.list")
+        self.aptlistfile3 = os.path.join(self.tmp, "single-deb3.list")
+        self.join = os.path.join
+        self.matcher = re.compile(ADD_APT_REPO_MATCH).search
+
+    @staticmethod
+    def _add_apt_sources(*args, **kwargs):
+        with mock.patch.object(util, 'apt_update'):
+            apt_config.add_apt_sources(*args, **kwargs)
+
+    @staticmethod
+    def _get_default_params():
+        """ get_default_params
+        Get the most basic default mrror and release info to be used in tests
+        """
+        params = {}
+        params['RELEASE'] = util.lsb_release()['codename']
+        arch = util.get_architecture()
+        params['MIRROR'] = apt_config.get_default_mirrors(arch)["PRIMARY"]
+        return params
+
+    def _myjoin(self, *args, **kwargs):
+        """ _myjoin - redir into writable tmpdir"""
+        if (args[0] == "/etc/apt/sources.list.d/" and
+                args[1] == "cloud_config_sources.list" and
+                len(args) == 2):
+            return self.join(self.tmp, args[0].lstrip("/"), args[1])
+        else:
+            return self.join(*args, **kwargs)
+
+    def _apt_src_basic(self, filename, cfg):
+        """ _apt_src_basic
+        Test Fix deb source string, has to overwrite mirror conf in params
+        """
+        params = self._get_default_params()
+
+        self._add_apt_sources(cfg, TARGET, template_params=params,
+                              aa_repo_match=self.matcher)
+
+        self.assertTrue(os.path.isfile(filename))
+
+        contents = load_tfile(filename)
+        self.assertTrue(re.search(r"%s %s %s %s\n" %
+                                  ("deb", "http://test.ubuntu.com/ubuntu",
+                                   "karmic-backports",
+                                   "main universe multiverse restricted"),
+                                  contents, flags=re.IGNORECASE))
+
+    def test_apt_src_basic(self):
+        """test_apt_src_basic - Test fix deb source string"""
+        cfg = {self.aptlistfile: {'source':
+                                  ('deb http://test.ubuntu.com/ubuntu'
+                                   ' karmic-backports'
+                                   ' main universe multiverse restricted')}}
+        self._apt_src_basic(self.aptlistfile, cfg)
+
+    def test_apt_src_basic_tri(self):
+        """test_apt_src_basic_tri - Test multiple fix deb source strings"""
+        cfg = {self.aptlistfile: {'source':
+                                  ('deb http://test.ubuntu.com/ubuntu'
+                                   ' karmic-backports'
+                                   ' main universe multiverse restricted')},
+               self.aptlistfile2: {'source':
+                                   ('deb http://test.ubuntu.com/ubuntu'
+                                    ' precise-backports'
+                                    ' main universe multiverse restricted')},
+               self.aptlistfile3: {'source':
+                                   ('deb http://test.ubuntu.com/ubuntu'
+                                    ' lucid-backports'
+                                    ' main universe multiverse restricted')}}
+        self._apt_src_basic(self.aptlistfile, cfg)
+
+        # extra verify on two extra files of this test
+        contents = load_tfile(self.aptlistfile2)
+        self.assertTrue(re.search(r"%s %s %s %s\n" %
+                                  ("deb", "http://test.ubuntu.com/ubuntu",
+                                   "precise-backports",
+                                   "main universe multiverse restricted"),
+                                  contents, flags=re.IGNORECASE))
+        contents = load_tfile(self.aptlistfile3)
+        self.assertTrue(re.search(r"%s %s %s %s\n" %
+                                  ("deb", "http://test.ubuntu.com/ubuntu",
+                                   "lucid-backports",
+                                   "main universe multiverse restricted"),
+                                  contents, flags=re.IGNORECASE))
+
+    def _apt_src_replacement(self, filename, cfg):
+        """ apt_src_replace
+        Test Autoreplacement of MIRROR and RELEASE in source specs
+        """
+        params = self._get_default_params()
+        self._add_apt_sources(cfg, TARGET, template_params=params,
+                              aa_repo_match=self.matcher)
+
+        self.assertTrue(os.path.isfile(filename))
+
+        contents = load_tfile(filename)
+        self.assertTrue(re.search(r"%s %s %s %s\n" %
+                                  ("deb", params['MIRROR'], params['RELEASE'],
+                                   "multiverse"),
+                                  contents, flags=re.IGNORECASE))
+
+    def test_apt_src_replace(self):
+        """test_apt_src_replace - Test Autoreplacement of MIRROR and RELEASE"""
+        cfg = {self.aptlistfile: {'source': 'deb $MIRROR $RELEASE multiverse'}}
+        self._apt_src_replacement(self.aptlistfile, cfg)
+
+    def test_apt_src_replace_fn(self):
+        """test_apt_src_replace_fn - Test filename being overwritten in dict"""
+        cfg = {'ignored': {'source': 'deb $MIRROR $RELEASE multiverse',
+                           'filename': self.aptlistfile}}
+        # second file should overwrite the dict key
+        self._apt_src_replacement(self.aptlistfile, cfg)
+
+    def _apt_src_replace_tri(self, cfg):
+        """ _apt_src_replace_tri
+        Test three autoreplacements of MIRROR and RELEASE in source specs with
+        generic part
+        """
+        self._apt_src_replacement(self.aptlistfile, cfg)
+
+        # extra verify on two extra files of this test
+        params = self._get_default_params()
+        contents = load_tfile(self.aptlistfile2)
+        self.assertTrue(re.search(r"%s %s %s %s\n" %
+                                  ("deb", params['MIRROR'], params['RELEASE'],
+                                   "main"),
+                                  contents, flags=re.IGNORECASE))
+        contents = load_tfile(self.aptlistfile3)
+        self.assertTrue(re.search(r"%s %s %s %s\n" %
+                                  ("deb", params['MIRROR'], params['RELEASE'],
+                                   "universe"),
+                                  contents, flags=re.IGNORECASE))
+
+    def test_apt_src_replace_tri(self):
+        """test_apt_src_replace_tri - Test multiple replacements/overwrites"""
+        cfg = {self.aptlistfile: {'source': 'deb $MIRROR $RELEASE multiverse'},
+               'notused':        {'source': 'deb $MIRROR $RELEASE main',
+                                  'filename': self.aptlistfile2},
+               self.aptlistfile3: {'source': 'deb $MIRROR $RELEASE universe'}}
+        self._apt_src_replace_tri(cfg)
+
+    def _apt_src_keyid(self, filename, cfg, keynum):
+        """ _apt_src_keyid
+        Test specification of a source + keyid
+        """
+        params = self._get_default_params()
+
+        with mock.patch("curtin.util.subp",
+                        return_value=('fakekey 1234', '')) as mockobj:
+            self._add_apt_sources(cfg, TARGET, template_params=params,
+                                  aa_repo_match=self.matcher)
+
+        # check if it added the right ammount of keys
+        calls = []
+        for _ in range(keynum):
+            calls.append(call(['apt-key', 'add', '-'], data=b'fakekey 1234',
+                              target=TARGET))
+        mockobj.assert_has_calls(calls, any_order=True)
+
+        self.assertTrue(os.path.isfile(filename))
+
+        contents = load_tfile(filename)
+        self.assertTrue(re.search(r"%s %s %s %s\n" %
+                                  ("deb",
+                                   ('http://ppa.launchpad.net/smoser/'
+                                    'cloud-init-test/ubuntu'),
+                                   "xenial", "main"),
+                                  contents, flags=re.IGNORECASE))
+
+    @mock.patch(ChrootableTargetStr, new=PseudoChrootableTarget)
+    def test_apt_src_keyid(self):
+        """test_apt_src_keyid - Test source + keyid with filename being set"""
+        cfg = {self.aptlistfile: {'source': ('deb '
+                                             'http://ppa.launchpad.net/'
+                                             'smoser/cloud-init-test/ubuntu'
+                                             ' xenial main'),
+                                  'keyid': "03683F77"}}
+        self._apt_src_keyid(self.aptlistfile, cfg, 1)
+
+    @mock.patch(ChrootableTargetStr, new=PseudoChrootableTarget)
+    def test_apt_src_keyid_tri(self):
+        """test_apt_src_keyid_tri - Test multiple src+keyid+filen overwrites"""
+        cfg = {self.aptlistfile:  {'source': ('deb '
+                                              'http://ppa.launchpad.net/'
+                                              'smoser/cloud-init-test/ubuntu'
+                                              ' xenial main'),
+                                   'keyid': "03683F77"},
+               'ignored':         {'source': ('deb '
+                                              'http://ppa.launchpad.net/'
+                                              'smoser/cloud-init-test/ubuntu'
+                                              ' xenial universe'),
+                                   'keyid': "03683F77",
+                                   'filename': self.aptlistfile2},
+               self.aptlistfile3: {'source': ('deb '
+                                              'http://ppa.launchpad.net/'
+                                              'smoser/cloud-init-test/ubuntu'
+                                              ' xenial multiverse'),
+                                   'keyid': "03683F77"}}
+
+        self._apt_src_keyid(self.aptlistfile, cfg, 3)
+        contents = load_tfile(self.aptlistfile2)
+        self.assertTrue(re.search(r"%s %s %s %s\n" %
+                                  ("deb",
+                                   ('http://ppa.launchpad.net/smoser/'
+                                    'cloud-init-test/ubuntu'),
+                                   "xenial", "universe"),
+                                  contents, flags=re.IGNORECASE))
+        contents = load_tfile(self.aptlistfile3)
+        self.assertTrue(re.search(r"%s %s %s %s\n" %
+                                  ("deb",
+                                   ('http://ppa.launchpad.net/smoser/'
+                                    'cloud-init-test/ubuntu'),
+                                   "xenial", "multiverse"),
+                                  contents, flags=re.IGNORECASE))
+
+    @mock.patch(ChrootableTargetStr, new=PseudoChrootableTarget)
+    def test_apt_src_key(self):
+        """test_apt_src_key - Test source + key"""
+        params = self._get_default_params()
+        cfg = {self.aptlistfile: {'source': ('deb '
+                                             'http://ppa.launchpad.net/'
+                                             'smoser/cloud-init-test/ubuntu'
+                                             ' xenial main'),
+                                  'key': "fakekey 4321"}}
+
+        with mock.patch.object(util, 'subp') as mockobj:
+            self._add_apt_sources(cfg, TARGET, template_params=params,
+                                  aa_repo_match=self.matcher)
+
+        mockobj.assert_any_call(['apt-key', 'add', '-'], data=b'fakekey 4321',
+                                target=TARGET)
+
+        self.assertTrue(os.path.isfile(self.aptlistfile))
+
+        contents = load_tfile(self.aptlistfile)
+        self.assertTrue(re.search(r"%s %s %s %s\n" %
+                                  ("deb",
+                                   ('http://ppa.launchpad.net/smoser/'
+                                    'cloud-init-test/ubuntu'),
+                                   "xenial", "main"),
+                                  contents, flags=re.IGNORECASE))
+
+    @mock.patch(ChrootableTargetStr, new=PseudoChrootableTarget)
+    def test_apt_src_keyonly(self):
+        """test_apt_src_keyonly - Test key without source"""
+        params = self._get_default_params()
+        cfg = {self.aptlistfile: {'key': "fakekey 4242"}}
+
+        with mock.patch.object(util, 'subp') as mockobj:
+            self._add_apt_sources(cfg, TARGET, template_params=params,
+                                  aa_repo_match=self.matcher)
+
+        mockobj.assert_any_call(['apt-key', 'add', '-'], data=b'fakekey 4242',
+                                target=TARGET)
+
+        # filename should be ignored on key only
+        self.assertFalse(os.path.isfile(self.aptlistfile))
+
+    @mock.patch(ChrootableTargetStr, new=PseudoChrootableTarget)
+    def test_apt_src_keyidonly(self):
+        """test_apt_src_keyidonly - Test keyid without source"""
+        params = self._get_default_params()
+        cfg = {self.aptlistfile: {'keyid': "03683F77"}}
+
+        with mock.patch.object(util, 'subp',
+                               return_value=('fakekey 1212', '')) as mockobj:
+            self._add_apt_sources(cfg, TARGET, template_params=params,
+                                  aa_repo_match=self.matcher)
+
+        mockobj.assert_any_call(['apt-key', 'add', '-'], data=b'fakekey 1212',
+                                target=TARGET)
+
+        # filename should be ignored on key only
+        self.assertFalse(os.path.isfile(self.aptlistfile))
+
+    def apt_src_keyid_real(self, cfg, expectedkey):
+        """apt_src_keyid_real
+        Test specification of a keyid without source including
+        up to addition of the key (add_apt_key_raw mocked to keep the
+        environment as is)
+        """
+        params = self._get_default_params()
+
+        with mock.patch.object(apt_config, 'add_apt_key_raw') as mockkey:
+            with mock.patch.object(gpg, 'getkeybyid',
+                                   return_value=expectedkey) as mockgetkey:
+                self._add_apt_sources(cfg, TARGET, template_params=params,
+                                      aa_repo_match=self.matcher)
+
+        keycfg = cfg[self.aptlistfile]
+        mockgetkey.assert_called_with(keycfg['keyid'],
+                                      keycfg.get('keyserver',
+                                                 'keyserver.ubuntu.com'))
+        mockkey.assert_called_with(expectedkey, TARGET)
+
+        # filename should be ignored on key only
+        self.assertFalse(os.path.isfile(self.aptlistfile))
+
+    def test_apt_src_keyid_real(self):
+        """test_apt_src_keyid_real - Test keyid including key add"""
+        keyid = "03683F77"
+        cfg = {self.aptlistfile: {'keyid': keyid}}
+
+        self.apt_src_keyid_real(cfg, EXPECTEDKEY)
+
+    def test_apt_src_longkeyid_real(self):
+        """test_apt_src_longkeyid_real Test long keyid including key add"""
+        keyid = "B59D 5F15 97A5 04B7 E230  6DCA 0620 BBCF 0368 3F77"
+        cfg = {self.aptlistfile: {'keyid': keyid}}
+
+        self.apt_src_keyid_real(cfg, EXPECTEDKEY)
+
+    def test_apt_src_longkeyid_ks_real(self):
+        """test_apt_src_longkeyid_ks_real Test long keyid from other ks"""
+        keyid = "B59D 5F15 97A5 04B7 E230  6DCA 0620 BBCF 0368 3F77"
+        cfg = {self.aptlistfile: {'keyid': keyid,
+                                  'keyserver': 'keys.gnupg.net'}}
+
+        self.apt_src_keyid_real(cfg, EXPECTEDKEY)
+
+    def test_apt_src_keyid_keyserver(self):
+        """test_apt_src_keyid_keyserver - Test custom keyserver"""
+        keyid = "03683F77"
+        params = self._get_default_params()
+        cfg = {self.aptlistfile: {'keyid': keyid,
+                                  'keyserver': 'test.random.com'}}
+
+        # in some test environments only *.ubuntu.com is reachable
+        # so mock the call and check if the config got there
+        with mock.patch.object(gpg, 'getkeybyid',
+                               return_value="fakekey") as mockgetkey:
+            with mock.patch.object(apt_config, 'add_apt_key_raw') as mockadd:
+                self._add_apt_sources(cfg, TARGET, template_params=params,
+                                      aa_repo_match=self.matcher)
+
+        mockgetkey.assert_called_with('03683F77', 'test.random.com')
+        mockadd.assert_called_with('fakekey', TARGET)
+
+        # filename should be ignored on key only
+        self.assertFalse(os.path.isfile(self.aptlistfile))
+
+    @mock.patch(ChrootableTargetStr, new=PseudoChrootableTarget)
+    def test_apt_src_ppa(self):
+        """test_apt_src_ppa - Test specification of a ppa"""
+        params = self._get_default_params()
+        cfg = {self.aptlistfile: {'source': 'ppa:smoser/cloud-init-test'}}
+
+        with mock.patch("curtin.util.subp") as mockobj:
+            self._add_apt_sources(cfg, TARGET, template_params=params,
+                                  aa_repo_match=self.matcher)
+        mockobj.assert_any_call(['add-apt-repository',
+                                 'ppa:smoser/cloud-init-test'], target=TARGET)
+
+        # adding ppa should ignore filename (uses add-apt-repository)
+        self.assertFalse(os.path.isfile(self.aptlistfile))
+
+    @mock.patch(ChrootableTargetStr, new=PseudoChrootableTarget)
+    def test_apt_src_ppa_tri(self):
+        """test_apt_src_ppa_tri - Test specification of multiple ppa's"""
+        params = self._get_default_params()
+        cfg = {self.aptlistfile: {'source': 'ppa:smoser/cloud-init-test'},
+               self.aptlistfile2: {'source': 'ppa:smoser/cloud-init-test2'},
+               self.aptlistfile3: {'source': 'ppa:smoser/cloud-init-test3'}}
+
+        with mock.patch("curtin.util.subp") as mockobj:
+            self._add_apt_sources(cfg, TARGET, template_params=params,
+                                  aa_repo_match=self.matcher)
+        calls = [call(['add-apt-repository', 'ppa:smoser/cloud-init-test'],
+                      target=TARGET),
+                 call(['add-apt-repository', 'ppa:smoser/cloud-init-test2'],
+                      target=TARGET),
+                 call(['add-apt-repository', 'ppa:smoser/cloud-init-test3'],
+                      target=TARGET)]
+        mockobj.assert_has_calls(calls, any_order=True)
+
+        # adding ppa should ignore all filenames (uses add-apt-repository)
+        self.assertFalse(os.path.isfile(self.aptlistfile))
+        self.assertFalse(os.path.isfile(self.aptlistfile2))
+        self.assertFalse(os.path.isfile(self.aptlistfile3))
+
+    @mock.patch("curtin.commands.apt_config.util.get_architecture")
+    def test_mir_apt_list_rename(self, m_get_architecture):
+        """test_mir_apt_list_rename - Test find mirror and apt list renaming"""
+        pre = "/var/lib/apt/lists"
+        # filenames are archive dependent
+
+        arch = 's390x'
+        m_get_architecture.return_value = arch
+        component = "ubuntu-ports"
+        archive = "ports.ubuntu.com"
+
+        cfg = {'primary': [{'arches': ["default"],
+                            'uri':
+                            'http://test.ubuntu.com/%s/' % component}],
+               'security': [{'arches': ["default"],
+                             'uri':
+                             'http://testsec.ubuntu.com/%s/' % component}]}
+        post = ("%s_dists_%s-updates_InRelease" %
+                (component, util.lsb_release()['codename']))
+        fromfn = ("%s/%s_%s" % (pre, archive, post))
+        tofn = ("%s/test.ubuntu.com_%s" % (pre, post))
+
+        mirrors = apt_config.find_apt_mirror_info(cfg, arch)
+
+        self.assertEqual(mirrors['MIRROR'],
+                         "http://test.ubuntu.com/%s/" % component)
+        self.assertEqual(mirrors['PRIMARY'],
+                         "http://test.ubuntu.com/%s/" % component)
+        self.assertEqual(mirrors['SECURITY'],
+                         "http://testsec.ubuntu.com/%s/" % component)
+
+        with mock.patch.object(os, 'rename') as mockren:
+            with mock.patch.object(glob, 'glob',
+                                   return_value=[fromfn]):
+                apt_config.rename_apt_lists(mirrors, TARGET)
+
+        mockren.assert_any_call(fromfn, tofn)
+
+    @mock.patch("curtin.commands.apt_config.util.get_architecture")
+    def test_mir_apt_list_rename_non_slash(self, m_get_architecture):
+        target = os.path.join(self.tmp, "rename_non_slash")
+        apt_lists_d = os.path.join(target, "./" + apt_config.APT_LISTS)
+
+        m_get_architecture.return_value = 'amd64'
+
+        mirror_path = "some/random/path/"
+        primary = "http://test.ubuntu.com/" + mirror_path
+        security = "http://test-security.ubuntu.com/" + mirror_path
+        mirrors = {'PRIMARY': primary, 'SECURITY': security}
+
+        # these match default archive prefixes
+        opri_pre = "archive.ubuntu.com_ubuntu_dists_xenial"
+        osec_pre = "security.ubuntu.com_ubuntu_dists_xenial"
+        # this one won't match and should not be renamed defaults.
+        other_pre = "dl.google.com_linux_chrome_deb_dists_stable"
+        # these are our new expected prefixes
+        npri_pre = "test.ubuntu.com_some_random_path_dists_xenial"
+        nsec_pre = "test-security.ubuntu.com_some_random_path_dists_xenial"
+
+        files = [
+            # orig prefix, new prefix, suffix
+            (opri_pre, npri_pre, "_main_binary-amd64_Packages"),
+            (opri_pre, npri_pre, "_main_binary-amd64_InRelease"),
+            (opri_pre, npri_pre, "-updates_main_binary-amd64_Packages"),
+            (opri_pre, npri_pre, "-updates_main_binary-amd64_InRelease"),
+            (other_pre, other_pre, "_main_binary-amd64_Packages"),
+            (other_pre, other_pre, "_Release"),
+            (other_pre, other_pre, "_Release.gpg"),
+            (osec_pre, nsec_pre, "_InRelease"),
+            (osec_pre, nsec_pre, "_main_binary-amd64_Packages"),
+            (osec_pre, nsec_pre, "_universe_binary-amd64_Packages"),
+        ]
+
+        expected = sorted([npre + suff for opre, npre, suff in files])
+        # create files
+        for (opre, npre, suff) in files:
+            fpath = os.path.join(apt_lists_d, opre + suff)
+            util.write_file(fpath, content=fpath)
+
+        apt_config.rename_apt_lists(mirrors, target)
+        found = sorted(os.listdir(apt_lists_d))
+        self.assertEqual(expected, found)
+
+    @staticmethod
+    def test_apt_proxy():
+        """test_apt_proxy - Test apt_*proxy configuration"""
+        cfg = {"proxy": "foobar1",
+               "http_proxy": "foobar2",
+               "ftp_proxy": "foobar3",
+               "https_proxy": "foobar4"}
+
+        with mock.patch.object(util, 'write_file') as mockobj:
+            apt_config.apply_apt_proxy_config(cfg, "proxyfn", "notused")
+
+        mockobj.assert_called_with('proxyfn',
+                                   ('Acquire::http::Proxy "foobar1";\n'
+                                    'Acquire::http::Proxy "foobar2";\n'
+                                    'Acquire::ftp::Proxy "foobar3";\n'
+                                    'Acquire::https::Proxy "foobar4";\n'))
+
+    def test_mirror(self):
+        """test_mirror - Test defining a mirror"""
+        pmir = "http://us.archive.ubuntu.com/ubuntu/"
+        smir = "http://security.ubuntu.com/ubuntu/"
+        cfg = {"primary": [{'arches': ["default"],
+                            "uri": pmir}],
+               "security": [{'arches': ["default"],
+                             "uri": smir}]}
+
+        mirrors = apt_config.find_apt_mirror_info(cfg, 'amd64')
+
+        self.assertEqual(mirrors['MIRROR'],
+                         pmir)
+        self.assertEqual(mirrors['PRIMARY'],
+                         pmir)
+        self.assertEqual(mirrors['SECURITY'],
+                         smir)
+
+    def test_mirror_default(self):
+        """test_mirror_default - Test without defining a mirror"""
+        arch = util.get_architecture()
+        default_mirrors = apt_config.get_default_mirrors(arch)
+        pmir = default_mirrors["PRIMARY"]
+        smir = default_mirrors["SECURITY"]
+        mirrors = apt_config.find_apt_mirror_info({}, arch)
+
+        self.assertEqual(mirrors['MIRROR'],
+                         pmir)
+        self.assertEqual(mirrors['PRIMARY'],
+                         pmir)
+        self.assertEqual(mirrors['SECURITY'],
+                         smir)
+
+    def test_mirror_arches(self):
+        """test_mirror_arches - Test arches selection of mirror"""
+        pmir = "http://my-primary.ubuntu.com/ubuntu/"
+        smir = "http://my-security.ubuntu.com/ubuntu/"
+        arch = 'ppc64el'
+        cfg = {"primary": [{'arches': ["default"], "uri": "notthis-primary"},
+                           {'arches': [arch], "uri": pmir}],
+               "security": [{'arches': ["default"], "uri": "nothis-security"},
+                            {'arches': [arch], "uri": smir}]}
+
+        mirrors = apt_config.find_apt_mirror_info(cfg, arch)
+
+        self.assertEqual(mirrors['PRIMARY'], pmir)
+        self.assertEqual(mirrors['MIRROR'], pmir)
+        self.assertEqual(mirrors['SECURITY'], smir)
+
+    def test_mirror_arches_default(self):
+        """test_mirror_arches - Test falling back to default arch"""
+        pmir = "http://us.archive.ubuntu.com/ubuntu/"
+        smir = "http://security.ubuntu.com/ubuntu/"
+        cfg = {"primary": [{'arches': ["default"],
+                            "uri": pmir},
+                           {'arches': ["thisarchdoesntexist"],
+                            "uri": "notthis"}],
+               "security": [{'arches': ["thisarchdoesntexist"],
+                             "uri": "nothat"},
+                            {'arches': ["default"],
+                             "uri": smir}]}
+
+        mirrors = apt_config.find_apt_mirror_info(cfg, 'amd64')
+
+        self.assertEqual(mirrors['MIRROR'],
+                         pmir)
+        self.assertEqual(mirrors['PRIMARY'],
+                         pmir)
+        self.assertEqual(mirrors['SECURITY'],
+                         smir)
+
+    @mock.patch("curtin.commands.apt_config.util.get_architecture")
+    def test_get_default_mirrors_non_intel_no_arch(self, m_get_architecture):
+        arch = 'ppc64el'
+        m_get_architecture.return_value = arch
+        expected = {'PRIMARY': 'http://ports.ubuntu.com/ubuntu-ports',
+                    'SECURITY': 'http://ports.ubuntu.com/ubuntu-ports'}
+        self.assertEqual(expected, apt_config.get_default_mirrors())
+
+    def test_get_default_mirrors_non_intel_with_arch(self):
+        found = apt_config.get_default_mirrors('ppc64el')
+
+        expected = {'PRIMARY': 'http://ports.ubuntu.com/ubuntu-ports',
+                    'SECURITY': 'http://ports.ubuntu.com/ubuntu-ports'}
+        self.assertEqual(expected, found)
+
+    def test_mirror_arches_sysdefault(self):
+        """test_mirror_arches - Test arches falling back to sys default"""
+        arch = util.get_architecture()
+        default_mirrors = apt_config.get_default_mirrors(arch)
+        pmir = default_mirrors["PRIMARY"]
+        smir = default_mirrors["SECURITY"]
+        cfg = {"primary": [{'arches': ["thisarchdoesntexist_64"],
+                            "uri": "notthis"},
+                           {'arches': ["thisarchdoesntexist"],
+                            "uri": "notthiseither"}],
+               "security": [{'arches': ["thisarchdoesntexist"],
+                             "uri": "nothat"},
+                            {'arches': ["thisarchdoesntexist_64"],
+                             "uri": "nothateither"}]}
+
+        mirrors = apt_config.find_apt_mirror_info(cfg, arch)
+
+        self.assertEqual(mirrors['MIRROR'], pmir)
+        self.assertEqual(mirrors['PRIMARY'], pmir)
+        self.assertEqual(mirrors['SECURITY'], smir)
+
+    def test_mirror_search(self):
+        """test_mirror_search - Test searching mirrors in a list
+            mock checks to avoid relying on network connectivity"""
+        pmir = "http://us.archive.ubuntu.com/ubuntu/"
+        smir = "http://security.ubuntu.com/ubuntu/"
+        cfg = {"primary": [{'arches': ["default"],
+                            "search": ["pfailme", pmir]}],
+               "security": [{'arches': ["default"],
+                             "search": ["sfailme", smir]}]}
+
+        with mock.patch.object(apt_config, 'search_for_mirror',
+                               side_effect=[pmir, smir]) as mocksearch:
+            mirrors = apt_config.find_apt_mirror_info(cfg, 'amd64')
+
+        calls = [call(["pfailme", pmir]),
+                 call(["sfailme", smir])]
+        mocksearch.assert_has_calls(calls)
+
+        self.assertEqual(mirrors['MIRROR'],
+                         pmir)
+        self.assertEqual(mirrors['PRIMARY'],
+                         pmir)
+        self.assertEqual(mirrors['SECURITY'],
+                         smir)
+
+    def test_mirror_search_many2(self):
+        """test_mirror_search_many3 - Test both mirrors specs at once"""
+        pmir = "http://us.archive.ubuntu.com/ubuntu/"
+        smir = "http://security.ubuntu.com/ubuntu/"
+        cfg = {"primary": [{'arches': ["default"],
+                            "uri": pmir,
+                            "search": ["pfailme", "foo"]}],
+               "security": [{'arches': ["default"],
+                             "uri": smir,
+                             "search": ["sfailme", "bar"]}]}
+
+        arch = 'amd64'
+
+        # should be called only once per type, despite two mirror configs
+        with mock.patch.object(apt_config, 'get_mirror',
+                               return_value="http://mocked/foo") as mockgm:
+            mirrors = apt_config.find_apt_mirror_info(cfg, arch)
+        calls = [call(cfg, 'primary', arch), call(cfg, 'security', arch)]
+        mockgm.assert_has_calls(calls)
+
+        # should not be called, since primary is specified
+        with mock.patch.object(apt_config, 'search_for_mirror') as mockse:
+            mirrors = apt_config.find_apt_mirror_info(cfg, arch)
+        mockse.assert_not_called()
+
+        self.assertEqual(mirrors['MIRROR'],
+                         pmir)
+        self.assertEqual(mirrors['PRIMARY'],
+                         pmir)
+        self.assertEqual(mirrors['SECURITY'],
+                         smir)
+
+    def test_url_resolvable(self):
+        """test_url_resolvable - Test resolving urls"""
+
+        with mock.patch.object(util, 'is_resolvable') as mockresolve:
+            util.is_resolvable_url("http://1.2.3.4/ubuntu")
+        mockresolve.assert_called_with("1.2.3.4")
+
+        with mock.patch.object(util, 'is_resolvable') as mockresolve:
+            util.is_resolvable_url("http://us.archive.ubuntu.com/ubuntu")
+        mockresolve.assert_called_with("us.archive.ubuntu.com")
+
+        bad = [(None, None, None, "badname", ["10.3.2.1"])]
+        good = [(None, None, None, "goodname", ["10.2.3.4"])]
+        with mock.patch.object(socket, 'getaddrinfo',
+                               side_effect=[bad, bad, good,
+                                            good]) as mocksock:
+            ret = util.is_resolvable_url("http://us.archive.ubuntu.com/ubuntu")
+            ret2 = util.is_resolvable_url("http://1.2.3.4/ubuntu")
+        calls = [call('does-not-exist.example.com.', None, 0, 0, 1, 2),
+                 call('example.invalid.', None, 0, 0, 1, 2),
+                 call('us.archive.ubuntu.com', None),
+                 call('1.2.3.4', None)]
+        mocksock.assert_has_calls(calls)
+        self.assertTrue(ret)
+        self.assertTrue(ret2)
+
+        # side effect need only bad ret after initial call
+        with mock.patch.object(socket, 'getaddrinfo',
+                               side_effect=[bad]) as mocksock:
+            ret3 = util.is_resolvable_url("http://failme.com/ubuntu")
+        calls = [call('failme.com', None)]
+        mocksock.assert_has_calls(calls)
+        self.assertFalse(ret3)
+
+    def test_disable_suites(self):
+        """test_disable_suites - disable_suites with many configurations"""
+        release = "xenial"
+        orig = """deb http://ubuntu.com//ubuntu xenial main
+deb http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+
+        # disable nothing
+        disabled = []
+        expect = """deb http://ubuntu.com//ubuntu xenial main
+deb http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+        result = apt_config.disable_suites(disabled, orig, release)
+        self.assertEqual(expect, result)
+
+        # single disable release suite
+        disabled = ["$RELEASE"]
+        expect = """\
+# suite disabled by curtin: deb http://ubuntu.com//ubuntu xenial main
+deb http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+        result = apt_config.disable_suites(disabled, orig, release)
+        self.assertEqual(expect, result)
+
+        # single disable other suite
+        disabled = ["$RELEASE-updates"]
+        expect = """deb http://ubuntu.com//ubuntu xenial main
+# suite disabled by curtin: deb http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+        result = apt_config.disable_suites(disabled, orig, release)
+        self.assertEqual(expect, result)
+
+        # multi disable
+        disabled = ["$RELEASE-updates", "$RELEASE-security"]
+        expect = """deb http://ubuntu.com//ubuntu xenial main
+# suite disabled by curtin: deb http://ubuntu.com//ubuntu xenial-updates main
+# suite disabled by curtin: deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+        result = apt_config.disable_suites(disabled, orig, release)
+        self.assertEqual(expect, result)
+
+        # multi line disable (same suite multiple times in input)
+        disabled = ["$RELEASE-updates", "$RELEASE-security"]
+        orig = """deb http://ubuntu.com//ubuntu xenial main
+deb http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://UBUNTU.com//ubuntu xenial-updates main
+deb http://UBUNTU.COM//ubuntu xenial-updates main
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+        expect = """deb http://ubuntu.com//ubuntu xenial main
+# suite disabled by curtin: deb http://ubuntu.com//ubuntu xenial-updates main
+# suite disabled by curtin: deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+# suite disabled by curtin: deb http://UBUNTU.com//ubuntu xenial-updates main
+# suite disabled by curtin: deb http://UBUNTU.COM//ubuntu xenial-updates main
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+        result = apt_config.disable_suites(disabled, orig, release)
+        self.assertEqual(expect, result)
+
+        # comment in input
+        disabled = ["$RELEASE-updates", "$RELEASE-security"]
+        orig = """deb http://ubuntu.com//ubuntu xenial main
+deb http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+#foo
+#deb http://UBUNTU.com//ubuntu xenial-updates main
+deb http://UBUNTU.COM//ubuntu xenial-updates main
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+        expect = """deb http://ubuntu.com//ubuntu xenial main
+# suite disabled by curtin: deb http://ubuntu.com//ubuntu xenial-updates main
+# suite disabled by curtin: deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+#foo
+#deb http://UBUNTU.com//ubuntu xenial-updates main
+# suite disabled by curtin: deb http://UBUNTU.COM//ubuntu xenial-updates main
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+        result = apt_config.disable_suites(disabled, orig, release)
+        self.assertEqual(expect, result)
+
+        # single disable custom suite
+        disabled = ["foobar"]
+        orig = """deb http://ubuntu.com//ubuntu xenial main
+deb http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb http://ubuntu.com/ubuntu/ foobar main"""
+        expect = """deb http://ubuntu.com//ubuntu xenial main
+deb http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+# suite disabled by curtin: deb http://ubuntu.com/ubuntu/ foobar main"""
+        result = apt_config.disable_suites(disabled, orig, release)
+        self.assertEqual(expect, result)
+
+        # single disable non existing suite
+        disabled = ["foobar"]
+        orig = """deb http://ubuntu.com//ubuntu xenial main
+deb http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb http://ubuntu.com/ubuntu/ notfoobar main"""
+        expect = """deb http://ubuntu.com//ubuntu xenial main
+deb http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb http://ubuntu.com/ubuntu/ notfoobar main"""
+        result = apt_config.disable_suites(disabled, orig, release)
+        self.assertEqual(expect, result)
+
+        # single disable suite with option
+        disabled = ["$RELEASE-updates"]
+        orig = """deb http://ubuntu.com//ubuntu xenial main
+deb [a=b] http://ubu.com//ubu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+        expect = """deb http://ubuntu.com//ubuntu xenial main
+# suite disabled by curtin: deb [a=b] http://ubu.com//ubu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+        result = apt_config.disable_suites(disabled, orig, release)
+        self.assertEqual(expect, result)
+
+        # single disable suite with more options and auto $RELEASE expansion
+        disabled = ["updates"]
+        orig = """deb http://ubuntu.com//ubuntu xenial main
+deb [a=b c=d] http://ubu.com//ubu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+        expect = """deb http://ubuntu.com//ubuntu xenial main
+# suite disabled by curtin: deb [a=b c=d] \
+http://ubu.com//ubu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+        result = apt_config.disable_suites(disabled, orig, release)
+        self.assertEqual(expect, result)
+
+        # single disable suite while options at others
+        disabled = ["$RELEASE-security"]
+        orig = """deb http://ubuntu.com//ubuntu xenial main
+deb [arch=foo] http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+        expect = """deb http://ubuntu.com//ubuntu xenial main
+deb [arch=foo] http://ubuntu.com//ubuntu xenial-updates main
+# suite disabled by curtin: deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+        result = apt_config.disable_suites(disabled, orig, release)
+        self.assertEqual(expect, result)
+
+    def test_disable_suites_blank_lines(self):
+        """test_disable_suites_blank_lines - ensure blank lines allowed"""
+        lines = ["deb %(repo)s %(rel)s main universe",
+                 "",
+                 "deb %(repo)s %(rel)s-updates main universe",
+                 "   # random comment",
+                 "#comment here",
+                 ""]
+        rel = "trusty"
+        repo = 'http://example.com/mirrors/ubuntu'
+        orig = "\n".join(lines) % {'repo': repo, 'rel': rel}
+        self.assertEqual(
+            orig, apt_config.disable_suites(["proposed"], orig, rel))
+
+
+class TestDebconfSelections(TestCase):
+
+    @mock.patch("curtin.commands.apt_config.debconf_set_selections")
+    def test_no_set_sel_if_none_to_set(self, m_set_sel):
+        apt_config.apply_debconf_selections({'foo': 'bar'})
+        m_set_sel.assert_not_called()
+
+    @mock.patch("curtin.commands.apt_config.debconf_set_selections")
+    @mock.patch("curtin.commands.apt_config.util.get_installed_packages")
+    def test_set_sel_call_has_expected_input(self, m_get_inst, m_set_sel):
+        data = {
+            'set1': 'pkga pkga/q1 mybool false',
+            'set2': ('pkgb\tpkgb/b1\tstr\tthis is a string\n'
+                     'pkgc\tpkgc/ip\tstring\t10.0.0.1')}
+        lines = '\n'.join(data.values()).split('\n')
+
+        m_get_inst.return_value = ["adduser", "apparmor"]
+        m_set_sel.return_value = None
+
+        apt_config.apply_debconf_selections({'debconf_selections': data})
+        self.assertTrue(m_get_inst.called)
+        self.assertEqual(m_set_sel.call_count, 1)
+
+        # assumes called with *args value.
+        selections = m_set_sel.call_args_list[0][0][0].decode()
+
+        missing = [l for l in lines if l not in selections.splitlines()]
+        self.assertEqual([], missing)
+
+    @mock.patch("curtin.commands.apt_config.dpkg_reconfigure")
+    @mock.patch("curtin.commands.apt_config.debconf_set_selections")
+    @mock.patch("curtin.commands.apt_config.util.get_installed_packages")
+    def test_reconfigure_if_intersection(self, m_get_inst, m_set_sel,
+                                         m_dpkg_r):
+        data = {
+            'set1': 'pkga pkga/q1 mybool false',
+            'set2': ('pkgb\tpkgb/b1\tstr\tthis is a string\n'
+                     'pkgc\tpkgc/ip\tstring\t10.0.0.1'),
+            'cloud-init': ('cloud-init cloud-init/datasources'
+                           'multiselect MAAS')}
+
+        m_set_sel.return_value = None
+        m_get_inst.return_value = ["adduser", "apparmor", "pkgb",
+                                   "cloud-init", 'zdog']
+
+        apt_config.apply_debconf_selections({'debconf_selections': data})
+
+        # reconfigure should be called with the intersection
+        # of (packages in config, packages installed)
+        self.assertEqual(m_dpkg_r.call_count, 1)
+        # assumes called with *args (dpkg_reconfigure([a,b,c], target=))
+        packages = m_dpkg_r.call_args_list[0][0][0]
+        self.assertEqual(set(['cloud-init', 'pkgb']), set(packages))
+
+    @mock.patch("curtin.commands.apt_config.dpkg_reconfigure")
+    @mock.patch("curtin.commands.apt_config.debconf_set_selections")
+    @mock.patch("curtin.commands.apt_config.util.get_installed_packages")
+    def test_reconfigure_if_no_intersection(self, m_get_inst, m_set_sel,
+                                            m_dpkg_r):
+        data = {'set1': 'pkga pkga/q1 mybool false'}
+
+        m_get_inst.return_value = ["adduser", "apparmor", "pkgb",
+                                   "cloud-init", 'zdog']
+        m_set_sel.return_value = None
+
+        apt_config.apply_debconf_selections({'debconf_selections': data})
+
+        self.assertTrue(m_get_inst.called)
+        self.assertEqual(m_dpkg_r.call_count, 0)
+
+    @mock.patch("curtin.commands.apt_config.util.subp")
+    def test_dpkg_reconfigure_does_reconfigure(self, m_subp):
+        target = "/foo-target"
+
+        # due to the way the cleaners are called (via dictionary reference)
+        # mocking clean_cloud_init directly does not work.  So we mock
+        # the CONFIG_CLEANERS dictionary and assert our cleaner is called.
+        ci_cleaner = mock.MagicMock()
+        with mock.patch.dict("curtin.commands.apt_config.CONFIG_CLEANERS",
+                             values={'cloud-init': ci_cleaner}, clear=True):
+            apt_config.dpkg_reconfigure(['pkga', 'cloud-init'],
+                                        target=target)
+        # cloud-init is actually the only package we have a cleaner for
+        # so for now, its the only one that should reconfigured
+        self.assertTrue(m_subp.called)
+        ci_cleaner.assert_called_with(target)
+        self.assertEqual(m_subp.call_count, 1)
+        found = m_subp.call_args_list[0][0][0]
+        expected = ['dpkg-reconfigure', '--frontend=noninteractive',
+                    'cloud-init']
+        self.assertEqual(expected, found)
+
+    @mock.patch("curtin.commands.apt_config.util.subp")
+    def test_dpkg_reconfigure_not_done_on_no_data(self, m_subp):
+        apt_config.dpkg_reconfigure([])
+        m_subp.assert_not_called()
+
+    @mock.patch("curtin.commands.apt_config.util.subp")
+    def test_dpkg_reconfigure_not_done_if_no_cleaners(self, m_subp):
+        apt_config.dpkg_reconfigure(['pkgfoo', 'pkgbar'])
+        m_subp.assert_not_called()
+
+#
+# vi: ts=4 expandtab

=== modified file 'tests/unittests/test_block.py'
--- tests/unittests/test_block.py	2016-05-10 16:13:29 +0000
+++ tests/unittests/test_block.py	2016-10-03 18:55:20 +0000
@@ -1,9 +1,13 @@
 from unittest import TestCase
+import functools
 import os
 import mock
 import tempfile
 import shutil
 
+from collections import OrderedDict
+
+from .helpers import mocked_open
 from curtin import util
 from curtin import block
 
@@ -39,6 +43,36 @@
         self.assertEqual(sorted(mountpoints),
                          sorted(["/mnt", "/sys"]))
 
+    @mock.patch('curtin.block._lsblock')
+    def test_get_blockdev_sector_size(self, mock_lsblk):
+        mock_lsblk.return_value = {
+            'sda':  {'LOG-SEC': '512', 'PHY-SEC': '4096',
+                     'device_path': '/dev/sda'},
+            'sda1': {'LOG-SEC': '512', 'PHY-SEC': '4096',
+                     'device_path': '/dev/sda1'},
+            'dm-0': {'LOG-SEC': '512', 'PHY-SEC': '512',
+                     'device_path': '/dev/dm-0'},
+        }
+        for (devpath, expected) in [('/dev/sda', (512, 4096)),
+                                    ('/dev/sda1', (512, 4096)),
+                                    ('/dev/dm-0', (512, 512))]:
+            res = block.get_blockdev_sector_size(devpath)
+            mock_lsblk.assert_called_with([devpath])
+            self.assertEqual(res, expected)
+
+        # test that fallback works and gives right return
+        mock_lsblk.return_value = OrderedDict()
+        mock_lsblk.return_value.update({
+            'vda': {'LOG-SEC': '4096', 'PHY-SEC': '4096',
+                    'device_path': '/dev/vda'},
+        })
+        mock_lsblk.return_value.update({
+            'vda1': {'LOG-SEC': '512', 'PHY-SEC': '512',
+                     'device_path': '/dev/vda1'},
+        })
+        res = block.get_blockdev_sector_size('/dev/vda2')
+        self.assertEqual(res, (4096, 4096))
+
     @mock.patch("curtin.block.os.path.realpath")
     @mock.patch("curtin.block.os.path.exists")
     @mock.patch("curtin.block.os.listdir")
@@ -130,6 +164,17 @@
         self.assertEqual('/sys/class/block/foodev/md/b',
                          block.sys_block_path("foodev", "/md/b", strict=False))
 
+    @mock.patch('curtin.block.get_blockdev_for_partition')
+    @mock.patch('os.path.exists')
+    def test_cciss_sysfs_path(self, m_os_path_exists, m_get_blk):
+        m_os_path_exists.return_value = True
+        m_get_blk.return_value = ('cciss!c0d0', None)
+        self.assertEqual('/sys/class/block/cciss!c0d0',
+                         block.sys_block_path('/dev/cciss/c0d0'))
+        m_get_blk.return_value = ('cciss!c0d0', 1)
+        self.assertEqual('/sys/class/block/cciss!c0d0/cciss!c0d0p1',
+                         block.sys_block_path('/dev/cciss/c0d0p1'))
+
 
 class TestWipeFile(TestCase):
     def __init__(self, *args, **kwargs):
@@ -207,4 +252,169 @@
         found = util.load_file(trgfile)
         self.assertEqual(data, found)
 
+
+class TestWipeVolume(TestCase):
+    dev = '/dev/null'
+
+    @mock.patch('curtin.block.lvm')
+    @mock.patch('curtin.block.util')
+    def test_wipe_pvremove(self, mock_util, mock_lvm):
+        block.wipe_volume(self.dev, mode='pvremove')
+        mock_util.subp.assert_called_with(
+            ['pvremove', '--force', '--force', '--yes', self.dev], rcs=[0, 5],
+            capture=True)
+        self.assertTrue(mock_lvm.lvm_scan.called)
+
+    @mock.patch('curtin.block.quick_zero')
+    def test_wipe_superblock(self, mock_quick_zero):
+        block.wipe_volume(self.dev, mode='superblock')
+        mock_quick_zero.assert_called_with(self.dev, partitions=False)
+        block.wipe_volume(self.dev, mode='superblock-recursive')
+        mock_quick_zero.assert_called_with(self.dev, partitions=True)
+
+    @mock.patch('curtin.block.wipe_file')
+    def test_wipe_zero(self, mock_wipe_file):
+        with mocked_open() as mock_open:
+            block.wipe_volume(self.dev, mode='zero')
+            mock_wipe_file.assert_called_with(self.dev)
+            mock_open.return_value = mock.MagicMock()
+
+    @mock.patch('curtin.block.wipe_file')
+    def test_wipe_random(self, mock_wipe_file):
+        with mocked_open() as mock_open:
+            mock_open.return_value = mock.MagicMock()
+            block.wipe_volume(self.dev, mode='random')
+            mock_open.assert_called_with('/dev/urandom', 'rb')
+            mock_wipe_file.assert_called_with(
+                self.dev, reader=mock_open.return_value.__enter__().read)
+
+    def test_bad_input(self):
+        with self.assertRaises(ValueError):
+            block.wipe_volume(self.dev, mode='invalidmode')
+
+
+class TestBlockKnames(TestCase):
+    """Tests for some of the kname functions in block"""
+    def test_determine_partition_kname(self):
+        part_knames = [(('sda', 1), 'sda1'),
+                       (('vda', 1), 'vda1'),
+                       (('nvme0n1', 1), 'nvme0n1p1'),
+                       (('mmcblk0', 1), 'mmcblk0p1'),
+                       (('cciss!c0d0', 1), 'cciss!c0d0p1'),
+                       (('dm-0', 1), 'dm-0p1'),
+                       (('mpath1', 2), 'mpath1p2')]
+        for ((disk_kname, part_number), part_kname) in part_knames:
+            self.assertEqual(block.partition_kname(disk_kname, part_number),
+                             part_kname)
+
+    @mock.patch('curtin.block.os.path.realpath')
+    def test_path_to_kname(self, mock_os_realpath):
+        mock_os_realpath.side_effect = lambda x: os.path.normpath(x)
+        path_knames = [('/dev/sda', 'sda'),
+                       ('/dev/sda1', 'sda1'),
+                       ('/dev////dm-0/', 'dm-0'),
+                       ('vdb', 'vdb'),
+                       ('/dev/mmcblk0p1', 'mmcblk0p1'),
+                       ('/dev/nvme0n0p1', 'nvme0n0p1'),
+                       ('/sys/block/vdb', 'vdb'),
+                       ('/sys/block/vdb/vdb2/', 'vdb2'),
+                       ('/dev/cciss/c0d0', 'cciss!c0d0'),
+                       ('/dev/cciss/c0d0p1/', 'cciss!c0d0p1'),
+                       ('/sys/class/block/cciss!c0d0p1', 'cciss!c0d0p1'),
+                       ('nvme0n1p4', 'nvme0n1p4')]
+        for (path, expected_kname) in path_knames:
+            self.assertEqual(block.path_to_kname(path), expected_kname)
+            if os.path.sep in path:
+                mock_os_realpath.assert_called_with(path)
+
+    @mock.patch('curtin.block.os.path.exists')
+    @mock.patch('curtin.block.os.path.realpath')
+    @mock.patch('curtin.block.is_valid_device')
+    def test_kname_to_path(self, mock_is_valid_device, mock_os_realpath,
+                           mock_exists):
+        kname_paths = [('sda', '/dev/sda'),
+                       ('sda1', '/dev/sda1'),
+                       ('/dev/sda', '/dev/sda'),
+                       ('cciss!c0d0p1', '/dev/cciss/c0d0p1'),
+                       ('/dev/cciss/c0d0', '/dev/cciss/c0d0'),
+                       ('mmcblk0p1', '/dev/mmcblk0p1')]
+
+        mock_exists.return_value = True
+        mock_os_realpath.side_effect = lambda x: x.replace('!', '/')
+        # first call to is_valid_device needs to return false for nonpaths
+        mock_is_valid_device.side_effect = lambda x: x.startswith('/dev')
+        for (kname, expected_path) in kname_paths:
+            self.assertEqual(block.kname_to_path(kname), expected_path)
+            mock_is_valid_device.assert_called_with(expected_path)
+
+        # test failure
+        mock_is_valid_device.return_value = False
+        mock_is_valid_device.side_effect = None
+        for (kname, expected_path) in kname_paths:
+            with self.assertRaises(OSError):
+                block.kname_to_path(kname)
+
+
+class TestPartTableSignature(TestCase):
+    blockdev = '/dev/null'
+    dos_content = b'\x00' * 0x1fe + b'\x55\xAA' + b'\x00' * 0xf00
+    gpt_content = b'\x00' * 0x200 + b'EFI PART' + b'\x00' * (0x200 - 8)
+    gpt_content_4k = b'\x00' * 0x800 + b'EFI PART' + b'\x00' * (0x800 - 8)
+    null_content = b'\x00' * 0xf00
+
+    def _test_util_load_file(self, content, device, mode, read_len, offset):
+        return (bytes if 'b' in mode else str)(content[offset:offset+read_len])
+
+    @mock.patch('curtin.block.check_dos_signature')
+    @mock.patch('curtin.block.check_efi_signature')
+    def test_gpt_part_table_type(self, mock_check_efi, mock_check_dos):
+        """test block.get_part_table_type logic"""
+        for (has_dos, has_efi, expected) in [(True, True, 'gpt'),
+                                             (True, False, 'dos'),
+                                             (False, False, None)]:
+            mock_check_dos.return_value = has_dos
+            mock_check_efi.return_value = has_efi
+            self.assertEqual(
+                block.get_part_table_type(self.blockdev), expected)
+
+    @mock.patch('curtin.block.is_block_device')
+    @mock.patch('curtin.block.util')
+    def test_check_dos_signature(self, mock_util, mock_is_block_device):
+        """test block.check_dos_signature"""
+        for (is_block, f_size, contents, expected) in [
+                (True, 0x200, self.dos_content, True),
+                (False, 0x200, self.dos_content, False),
+                (True, 0, self.dos_content, False),
+                (True, 0x400, self.dos_content, True),
+                (True, 0x200, self.null_content, False)]:
+            mock_util.load_file.side_effect = (
+                functools.partial(self._test_util_load_file, contents))
+            mock_util.file_size.return_value = f_size
+            mock_is_block_device.return_value = is_block
+            (self.assertTrue if expected else self.assertFalse)(
+                block.check_dos_signature(self.blockdev))
+
+    @mock.patch('curtin.block.is_block_device')
+    @mock.patch('curtin.block.get_blockdev_sector_size')
+    @mock.patch('curtin.block.util')
+    def test_check_efi_signature(self, mock_util, mock_get_sector_size,
+                                 mock_is_block_device):
+        """test block.check_efi_signature"""
+        for (sector_size, gpt_dat) in zip(
+                (0x200, 0x800), (self.gpt_content, self.gpt_content_4k)):
+            mock_get_sector_size.return_value = (sector_size, sector_size)
+            for (is_block, f_size, contents, expected) in [
+                    (True, 2 * sector_size, gpt_dat, True),
+                    (True, 1 * sector_size, gpt_dat, False),
+                    (False, 2 * sector_size, gpt_dat, False),
+                    (True, 0, gpt_dat, False),
+                    (True, 2 * sector_size, self.dos_content, False),
+                    (True, 2 * sector_size, self.null_content, False)]:
+                mock_util.load_file.side_effect = (
+                    functools.partial(self._test_util_load_file, contents))
+                mock_util.file_size.return_value = f_size
+                mock_is_block_device.return_value = is_block
+                (self.assertTrue if expected else self.assertFalse)(
+                    block.check_efi_signature(self.blockdev))
+
 # vi: ts=4 expandtab syntax=python

=== added file 'tests/unittests/test_block_lvm.py'
--- tests/unittests/test_block_lvm.py	1970-01-01 00:00:00 +0000
+++ tests/unittests/test_block_lvm.py	2016-10-03 18:55:20 +0000
@@ -0,0 +1,94 @@
+from curtin.block import lvm
+
+from unittest import TestCase
+import mock
+
+
+class TestBlockLvm(TestCase):
+    vg_name = 'ubuntu-volgroup'
+
+    @mock.patch('curtin.block.lvm.util')
+    def test_filter_lvm_info(self, mock_util):
+        """make sure lvm._filter_lvm_info filters properly"""
+        match_name = "vg_name"
+        query_results = ["lv_1", "lv_2"]
+        lvtool_name = 'lvscan'
+        query_name = 'lv_name'
+        # NOTE: i didn't use textwrap.dedent here on purpose, want to make sure
+        #       that the function can handle leading spaces as some of the
+        #       tools have spaces before the fist column in their output
+        mock_util.subp.return_value = (
+            """
+            matchfield_bad1{sep}qfield1
+            {matchfield_good}{sep}{query_good1}
+            matchfield_bad2{sep}qfield2
+            {matchfield_good}{sep}{query_good2}
+            """.format(matchfield_good=self.vg_name,
+                       query_good1=query_results[0],
+                       query_good2=query_results[1],
+                       sep=lvm._SEP), "")
+        result_list = lvm._filter_lvm_info(lvtool_name, match_name,
+                                           query_name, self.vg_name)
+        self.assertEqual(len(result_list), 2)
+        mock_util.subp.assert_called_with(
+            [lvtool_name, '-C', '--separator', lvm._SEP, '--noheadings', '-o',
+             '{},{}'.format(match_name, query_name)], capture=True)
+        self.assertEqual(result_list, query_results)
+        # make sure _filter_lvm_info can fail gracefully if no match
+        result_list = lvm._filter_lvm_info(lvtool_name, match_name,
+                                           query_name, 'bad_match_val')
+        self.assertEqual(len(result_list), 0)
+
+    @mock.patch('curtin.block.lvm._filter_lvm_info')
+    def test_get_lvm_info(self, mock_filter_lvm_info):
+        """
+        make sure that the get lvm info functions make the right calls to
+        lvm._filter_lvm_info
+        """
+        lvm.get_pvols_in_volgroup(self.vg_name)
+        mock_filter_lvm_info.assert_called_with(
+            'pvdisplay', 'vg_name', 'pv_name', self.vg_name)
+        lvm.get_lvols_in_volgroup(self.vg_name)
+        mock_filter_lvm_info.assert_called_with(
+            'lvdisplay', 'vg_name', 'lv_name', self.vg_name)
+
+    @mock.patch('curtin.block.lvm.util')
+    def test_split_lvm_name(self, mock_util):
+        """
+        make sure that split_lvm_name makes the right call to dmsetup splitname
+        """
+        lv_name = 'root_lvol'
+        full_name = '{}-{}'.format(self.vg_name, lv_name)
+        mock_util.subp.return_value = (
+            '  {vg_name}{sep}{lv_name} '.format(
+                vg_name=self.vg_name, lv_name=lv_name, sep=lvm._SEP), '')
+        (res_vg_name, res_lv_name) = lvm.split_lvm_name(full_name)
+        self.assertEqual(res_vg_name, self.vg_name)
+        self.assertEqual(res_lv_name, lv_name)
+        mock_util.subp.assert_called_with(
+            ['dmsetup', 'splitname', full_name, '-c', '--noheadings',
+             '--separator', lvm._SEP, '-o', 'vg_name,lv_name'], capture=True)
+
+    @mock.patch('curtin.block.lvm.lvmetad_running')
+    @mock.patch('curtin.block.lvm.util')
+    def test_lvm_scan(self, mock_util, mock_lvmetad):
+        """check that lvm_scan formats commands correctly for each release"""
+        for (count, (codename, lvmetad_status, use_cache)) in enumerate(
+                [('precise', False, False), ('precise', True, False),
+                 ('trusty', False, False), ('trusty', True, True),
+                 ('vivid', False, False), ('vivid', True, True),
+                 ('wily', False, False), ('wily', True, True),
+                 ('xenial', False, False), ('xenial', True, True),
+                 ('yakkety', True, True), ('UNAVAILABLE', True, True),
+                 (None, True, True), (None, False, False)]):
+            mock_util.lsb_release.return_value = {'codename': codename}
+            mock_lvmetad.return_value = lvmetad_status
+            lvm.lvm_scan()
+            self.assertEqual(
+                len(mock_util.subp.call_args_list), 2 * (count + 1))
+            for (expected, actual) in zip(
+                    [['pvscan'], ['vgscan', '--mknodes']],
+                    mock_util.subp.call_args_list[2 * count:2 * count + 2]):
+                if use_cache:
+                    expected.append('--cache')
+                self.assertEqual(mock.call(expected, capture=True), actual)

=== modified file 'tests/unittests/test_block_mdadm.py'
--- tests/unittests/test_block_mdadm.py	2016-05-10 16:13:29 +0000
+++ tests/unittests/test_block_mdadm.py	2016-10-03 18:55:20 +0000
@@ -2,6 +2,7 @@
 from mock import call, patch
 from curtin.block import dev_short
 from curtin.block import mdadm
+from curtin import util
 import os
 import subprocess
 
@@ -24,34 +25,27 @@
         super(TestBlockMdadmAssemble, self).setUp()
         self.add_patch('curtin.block.mdadm.util', 'mock_util')
         self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid')
+        self.add_patch('curtin.block.mdadm.udev', 'mock_udev')
 
         # Common mock settings
         self.mock_valid.return_value = True
         self.mock_util.lsb_release.return_value = {'codename': 'precise'}
-        self.mock_util.subp.side_effect = [
-            ("", ""),  # mdadm assemble
-            ("", ""),  # udevadm settle
-        ]
+        self.mock_util.subp.return_value = ('', '')
 
     def test_mdadm_assemble_scan(self):
         mdadm.mdadm_assemble(scan=True)
-        expected_calls = [
-            call(["mdadm", "--assemble", "--scan"], capture=True,
-                 rcs=[0, 1, 2]),
-            call(["udevadm", "settle"]),
-        ]
-        self.mock_util.subp.assert_has_calls(expected_calls)
+        self.mock_util.subp.assert_called_with(
+            ["mdadm", "--assemble", "--scan", "-v"], capture=True,
+            rcs=[0, 1, 2])
+        self.assertTrue(self.mock_udev.udevadm_settle.called)
 
     def test_mdadm_assemble_md_devname(self):
         md_devname = "/dev/md0"
         mdadm.mdadm_assemble(md_devname=md_devname)
-
-        expected_calls = [
-            call(["mdadm", "--assemble", md_devname, "--run"], capture=True,
-                 rcs=[0, 1, 2]),
-            call(["udevadm", "settle"]),
-        ]
-        self.mock_util.subp.assert_has_calls(expected_calls)
+        self.mock_util.subp.assert_called_with(
+            ["mdadm", "--assemble", md_devname, "--run"], capture=True,
+            rcs=[0, 1, 2])
+        self.assertTrue(self.mock_udev.udevadm_settle.called)
 
     def test_mdadm_assemble_md_devname_short(self):
         with self.assertRaises(ValueError):
@@ -67,12 +61,23 @@
         md_devname = "/dev/md0"
         devices = ["/dev/vdc1", "/dev/vdd1"]
         mdadm.mdadm_assemble(md_devname=md_devname, devices=devices)
-        expected_calls = [
-            call(["mdadm", "--assemble", md_devname, "--run"] + devices,
-                 capture=True, rcs=[0, 1, 2]),
-            call(["udevadm", "settle"]),
-        ]
-        self.mock_util.subp.assert_has_calls(expected_calls)
+        self.mock_util.subp.assert_called_with(
+            ["mdadm", "--assemble", md_devname, "--run"] + devices,
+            capture=True, rcs=[0, 1, 2])
+        self.assertTrue(self.mock_udev.udevadm_settle.called)
+
+    def test_mdadm_assemble_exec_error(self):
+
+        def _raise_pexec_error(*args, **kwargs):
+            raise util.ProcessExecutionError()
+
+        self.mock_util.ProcessExecutionError = util.ProcessExecutionError
+        self.mock_util.subp.side_effect = _raise_pexec_error
+        with self.assertRaises(util.ProcessExecutionError):
+            mdadm.mdadm_assemble(scan=True, ignore_errors=False)
+        self.mock_util.subp.assert_called_with(
+            ['mdadm', '--assemble', '--scan', '-v'], capture=True,
+            rcs=[0, 1, 2])
 
 
 class TestBlockMdadmCreate(MdadmTestBase):

=== modified file 'tests/unittests/test_block_mkfs.py'
--- tests/unittests/test_block_mkfs.py	2016-05-10 16:13:29 +0000
+++ tests/unittests/test_block_mkfs.py	2016-10-03 18:55:20 +0000
@@ -82,13 +82,13 @@
 
     def test_mkfs_fat(self):
         conf = self._get_config("fat32")
-        expected_flags = [["-n", "format1"], ["-F", "32"]]
+        expected_flags = ["-I", ["-n", "format1"], ["-F", "32"]]
         self._run_mkfs_with_config(conf, "mkfs.vfat", expected_flags)
 
     def test_mkfs_vfat(self):
         """Ensure we can use vfat without fatsize"""
         conf = self._get_config("vfat")
-        expected_flags = [["-n", "format1"], ]
+        expected_flags = ["-I", ["-n", "format1"], ]
         self._run_mkfs_with_config(conf, "mkfs.vfat", expected_flags)
 
     def test_mkfs_invalid_fstype(self):

=== added file 'tests/unittests/test_clear_holders.py'
--- tests/unittests/test_clear_holders.py	1970-01-01 00:00:00 +0000
+++ tests/unittests/test_clear_holders.py	2016-10-03 18:55:20 +0000
@@ -0,0 +1,329 @@
+from unittest import TestCase
+import mock
+
+from curtin.block import clear_holders
+import os
+import textwrap
+
+
+class TestClearHolders(TestCase):
+    test_blockdev = '/dev/null'
+    test_syspath = '/sys/class/block/null'
+    example_holders_trees = [
+        [{'device': '/sys/class/block/sda', 'name': 'sda', 'holders':
+          [{'device': '/sys/class/block/sda/sda1', 'name': 'sda1',
+            'holders': [], 'dev_type': 'partition'},
+           {'device': '/sys/class/block/sda/sda2', 'name': 'sda2',
+            'holders': [], 'dev_type': 'partition'},
+           {'device': '/sys/class/block/sda/sda5', 'name': 'sda5', 'holders':
+            [{'device': '/sys/class/block/dm-0', 'name': 'dm-0', 'holders':
+              [{'device': '/sys/class/block/dm-1', 'name': 'dm-1',
+                'holders': [], 'dev_type': 'lvm'},
+               {'device': '/sys/class/block/dm-2', 'name': 'dm-2', 'holders':
+                [{'device': '/sys/class/block/dm-3', 'name': 'dm-3',
+                  'holders': [], 'dev_type': 'crypt'}],
+                'dev_type': 'lvm'}],
+              'dev_type': 'crypt'}],
+            'dev_type': 'partition'}],
+          'dev_type': 'disk'}],
+        [{"device": "/sys/class/block/vdb", 'name': 'vdb', "holders":
+          [{"device": "/sys/class/block/vdb/vdb1", 'name': 'vdb1',
+            "holders": [], "dev_type": "partition"},
+           {"device": "/sys/class/block/vdb/vdb2", 'name': 'vdb2',
+            "holders": [], "dev_type": "partition"},
+           {"device": "/sys/class/block/vdb/vdb3", 'name': 'vdb3', "holders":
+            [{"device": "/sys/class/block/md0", 'name': 'md0', "holders":
+              [{"device": "/sys/class/block/bcache1", 'name': 'bcache1',
+                "holders": [], "dev_type": "bcache"}],
+              "dev_type": "raid"}],
+            "dev_type": "partition"},
+           {"device": "/sys/class/block/vdb/vdb4", 'name': 'vdb4', "holders":
+            [{"device": "/sys/class/block/md0", 'name': 'md0', "holders":
+              [{"device": "/sys/class/block/bcache1", 'name': 'bcache1',
+                "holders": [], "dev_type": "bcache"}],
+              "dev_type": "raid"}],
+            "dev_type": "partition"},
+           {"device": "/sys/class/block/vdb/vdb5", 'name': 'vdb5', "holders":
+            [{"device": "/sys/class/block/md0", 'name': 'md0', "holders":
+              [{"device": "/sys/class/block/bcache1", 'name': 'bcache1',
+                "holders": [], "dev_type": "bcache"}],
+              "dev_type": "raid"}],
+            "dev_type": "partition"},
+           {"device": "/sys/class/block/vdb/vdb6", 'name': 'vdb6', "holders":
+            [{"device": "/sys/class/block/bcache1", 'name': 'bcache1',
+              "holders": [], "dev_type": "bcache"},
+             {"device": "/sys/class/block/bcache2", 'name': 'bcache2',
+              "holders": [], "dev_type": "bcache"}],
+            "dev_type": "partition"},
+           {"device": "/sys/class/block/vdb/vdb7", 'name': 'vdb7', "holders":
+            [{"device": "/sys/class/block/bcache2", 'name': 'bcache2',
+              "holders": [], "dev_type": "bcache"}],
+            "dev_type": "partition"},
+           {"device": "/sys/class/block/vdb/vdb8", 'name': 'vdb8',
+            "holders": [], "dev_type": "partition"}],
+          "dev_type": "disk"},
+         {"device": "/sys/class/block/vdc", 'name': 'vdc', "holders": [],
+          "dev_type": "disk"},
+         {"device": "/sys/class/block/vdd", 'name': 'vdd', "holders":
+          [{"device": "/sys/class/block/vdd/vdd1", 'name': 'vdd1',
+            "holders": [], "dev_type": "partition"}],
+          "dev_type": "disk"}],
+    ]
+
+    @mock.patch('curtin.block.clear_holders.block')
+    @mock.patch('curtin.block.clear_holders.util')
+    def test_get_dmsetup_uuid(self, mock_util, mock_block):
+        """ensure that clear_holders.get_dmsetup_uuid works as expected"""
+        uuid = "CRYPT-LUKS1-fe335a74374e4649af9776c1699676f8-sdb5_crypt"
+        mock_block.sysfs_to_devpath.return_value = self.test_blockdev
+        mock_util.subp.return_value = (' ' + uuid + '\n', None)
+        res = clear_holders.get_dmsetup_uuid(self.test_syspath)
+        mock_util.subp.assert_called_with(
+            ['dmsetup', 'info', self.test_blockdev, '-C', '-o',
+             'uuid', '--noheadings'], capture=True)
+        self.assertEqual(res, uuid)
+        mock_block.sysfs_to_devpath.assert_called_with(self.test_syspath)
+
+    @mock.patch('curtin.block.clear_holders.block')
+    @mock.patch('curtin.block.clear_holders.os')
+    def test_get_bcache_using_dev(self, mock_os, mock_block):
+        """Ensure that get_bcache_using_dev works"""
+        fake_bcache = '/sys/fs/bcache/fake'
+        mock_os.path.realpath.return_value = fake_bcache
+        mock_os.path.exists.side_effect = lambda x: x == fake_bcache
+        mock_block.sys_block_path.return_value = self.test_blockdev
+        bcache_dir = clear_holders.get_bcache_using_dev(self.test_blockdev)
+        self.assertEqual(bcache_dir, fake_bcache)
+
+    @mock.patch('curtin.block.clear_holders.get_dmsetup_uuid')
+    @mock.patch('curtin.block.clear_holders.block')
+    def test_differentiate_lvm_and_crypt(
+            self, mock_block, mock_get_dmsetup_uuid):
+        """test clear_holders.identify_lvm and clear_holders.identify_crypt"""
+        for (kname, dm_uuid, is_lvm, is_crypt) in [
+                ('dm-0', 'LVM-abcdefg', True, False),
+                ('sda', 'LVM-abcdefg', False, False),
+                ('sda', 'CRYPT-abcdefg', False, False),
+                ('dm-0', 'CRYPT-abcdefg', False, True),
+                ('dm-1', 'invalid', False, False)]:
+            mock_block.path_to_kname.return_value = kname
+            mock_get_dmsetup_uuid.return_value = dm_uuid
+            self.assertEqual(
+                is_lvm, clear_holders.identify_lvm(self.test_syspath))
+            self.assertEqual(
+                is_crypt, clear_holders.identify_crypt(self.test_syspath))
+            mock_block.path_to_kname.assert_called_with(self.test_syspath)
+            mock_get_dmsetup_uuid.assert_called_with(self.test_syspath)
+
+    @mock.patch('curtin.block.clear_holders.util')
+    @mock.patch('curtin.block.clear_holders.os')
+    @mock.patch('curtin.block.clear_holders.LOG')
+    @mock.patch('curtin.block.clear_holders.get_bcache_using_dev')
+    def test_shutdown_bcache(self, mock_get_bcache, mock_log, mock_os,
+                             mock_util):
+        """test clear_holders.shutdown_bcache"""
+        mock_os.path.exists.return_value = True
+        mock_os.path.join.side_effect = os.path.join
+        mock_get_bcache.return_value = self.test_blockdev
+        clear_holders.shutdown_bcache(self.test_syspath)
+        mock_get_bcache.assert_called_with(self.test_syspath)
+        self.assertTrue(mock_log.debug.called)
+        self.assertFalse(mock_log.warn.called)
+        mock_util.write_file.assert_called_with(self.test_blockdev + '/stop',
+                                                '1', mode=None)
+
+    @mock.patch('curtin.block.clear_holders.LOG')
+    @mock.patch('curtin.block.clear_holders.block.sys_block_path')
+    @mock.patch('curtin.block.clear_holders.lvm')
+    @mock.patch('curtin.block.clear_holders.util')
+    def test_shutdown_lvm(self, mock_util, mock_lvm, mock_syspath, mock_log):
+        """test clear_holders.shutdown_lvm"""
+        vg_name = 'volgroup1'
+        lv_name = 'lvol1'
+        mock_syspath.return_value = self.test_blockdev
+        mock_util.load_file.return_value = '-'.join((vg_name, lv_name))
+        mock_lvm.split_lvm_name.return_value = (vg_name, lv_name)
+        mock_lvm.get_lvols_in_volgroup.return_value = ['lvol2']
+        clear_holders.shutdown_lvm(self.test_blockdev)
+        mock_syspath.assert_called_with(self.test_blockdev)
+        mock_util.load_file.assert_called_with(self.test_blockdev + '/dm/name')
+        mock_lvm.split_lvm_name.assert_called_with(
+            '-'.join((vg_name, lv_name)))
+        self.assertTrue(mock_log.debug.called)
+        mock_util.subp.assert_called_with(
+            ['lvremove', '--force', '--force', '/'.join((vg_name, lv_name))],
+            rcs=[0, 5])
+        mock_lvm.get_lvols_in_volgroup.assert_called_with(vg_name)
+        self.assertEqual(len(mock_util.subp.call_args_list), 1)
+        self.assertTrue(mock_lvm.lvm_scan.called)
+        mock_lvm.get_lvols_in_volgroup.return_value = []
+        clear_holders.shutdown_lvm(self.test_blockdev)
+        mock_util.subp.assert_called_with(
+            ['vgremove', '--force', '--force', vg_name], rcs=[0, 5])
+
+    @mock.patch('curtin.block.clear_holders.block')
+    @mock.patch('curtin.block.clear_holders.util')
+    def test_shutdown_crypt(self, mock_util, mock_block):
+        """test clear_holders.shutdown_crypt"""
+        mock_block.sysfs_to_devpath.return_value = self.test_blockdev
+        clear_holders.shutdown_crypt(self.test_syspath)
+        mock_block.sysfs_to_devpath.assert_called_with(self.test_syspath)
+        mock_util.subp.assert_called_with(
+            ['cryptsetup', 'remove', self.test_blockdev], capture=True)
+
+    @mock.patch('curtin.block.clear_holders.LOG')
+    @mock.patch('curtin.block.clear_holders.block')
+    def test_shutdown_mdadm(self, mock_block, mock_log):
+        """test clear_holders.shutdown_mdadm"""
+        mock_block.sysfs_to_devpath.return_value = self.test_blockdev
+        clear_holders.shutdown_mdadm(self.test_syspath)
+        mock_block.mdadm.mdadm_stop.assert_called_with(self.test_blockdev)
+        mock_block.mdadm.mdadm_remove.assert_called_with(self.test_blockdev)
+        self.assertTrue(mock_log.debug.called)
+
+    @mock.patch('curtin.block.clear_holders.LOG')
+    @mock.patch('curtin.block.clear_holders.block')
+    def test_clear_holders_wipe_superblock(self, mock_block, mock_log):
+        """test clear_holders.wipe_superblock handles errors right"""
+        mock_block.sysfs_to_devpath.return_value = self.test_blockdev
+        mock_block.is_extended_partition.return_value = True
+        clear_holders.wipe_superblock(self.test_syspath)
+        self.assertFalse(mock_block.wipe_volume.called)
+        mock_block.is_extended_partition.return_value = False
+        clear_holders.wipe_superblock(self.test_syspath)
+        mock_block.sysfs_to_devpath.assert_called_with(self.test_syspath)
+        mock_block.wipe_volume.assert_called_with(
+            self.test_blockdev, mode='superblock')
+
+    @mock.patch('curtin.block.clear_holders.LOG')
+    @mock.patch('curtin.block.clear_holders.block')
+    @mock.patch('curtin.block.clear_holders.os')
+    def test_get_holders(self, mock_os, mock_block, mock_log):
+        """test clear_holders.get_holders"""
+        mock_block.sys_block_path.return_value = self.test_syspath
+        mock_os.path.join.side_effect = os.path.join
+        clear_holders.get_holders(self.test_blockdev)
+        mock_block.sys_block_path.assert_called_with(self.test_blockdev)
+        mock_os.path.join.assert_called_with(self.test_syspath, 'holders')
+        self.assertTrue(mock_log.debug.called)
+        mock_os.listdir.assert_called_with(
+            os.path.join(self.test_syspath, 'holders'))
+
+    def test_plan_shutdown_holders_trees(self):
+        """
+        make sure clear_holdrs.plan_shutdown_holders_tree orders shutdown
+        functions correctly and uses the appropriate shutdown function for each
+        dev type
+        """
+        # trees that have been generated, checked for correctness,
+        # and the order that they should be shut down in (by level)
+        test_trees_and_orders = [
+            (self.example_holders_trees[0][0],
+             ({'dm-3'}, {'dm-1', 'dm-2'}, {'dm-0'}, {'sda5', 'sda2', 'sda1'},
+              {'sda'})),
+            (self.example_holders_trees[1],
+             ({'bcache1'}, {'bcache2', 'md0'},
+              {'vdb1', 'vdb2', 'vdb3', 'vdb4', 'vdb5', 'vdb6', 'vdb7', 'vdb8',
+               'vdd1'},
+              {'vdb', 'vdc', 'vdd'}))
+        ]
+        for tree, correct_order in test_trees_and_orders:
+            res = clear_holders.plan_shutdown_holder_trees(tree)
+            for level in correct_order:
+                self.assertEqual({os.path.basename(e['device'])
+                                  for e in res[:len(level)]}, level)
+                res = res[len(level):]
+
+    def test_format_holders_tree(self):
+        """test output of clear_holders.format_holders_tree"""
+        test_trees_and_results = [
+            (self.example_holders_trees[0][0],
+             textwrap.dedent("""
+                 sda
+                 |-- sda1
+                 |-- sda2
+                 `-- sda5
+                     `-- dm-0
+                         |-- dm-1
+                         `-- dm-2
+                             `-- dm-3
+                 """).strip()),
+            (self.example_holders_trees[1][0],
+             textwrap.dedent("""
+                 vdb
+                 |-- vdb1
+                 |-- vdb2
+                 |-- vdb3
+                 |   `-- md0
+                 |       `-- bcache1
+                 |-- vdb4
+                 |   `-- md0
+                 |       `-- bcache1
+                 |-- vdb5
+                 |   `-- md0
+                 |       `-- bcache1
+                 |-- vdb6
+                 |   |-- bcache1
+                 |   `-- bcache2
+                 |-- vdb7
+                 |   `-- bcache2
+                 `-- vdb8
+                 """).strip()),
+            (self.example_holders_trees[1][1], 'vdc'),
+            (self.example_holders_trees[1][2],
+             textwrap.dedent("""
+                 vdd
+                 `-- vdd1
+                 """).strip())
+        ]
+        for tree, result in test_trees_and_results:
+            self.assertEqual(clear_holders.format_holders_tree(tree), result)
+
+    def test_get_holder_types(self):
+        """test clear_holders.get_holder_types"""
+        test_trees_and_results = [
+            (self.example_holders_trees[0][0],
+             {('disk', '/sys/class/block/sda'),
+              ('partition', '/sys/class/block/sda/sda1'),
+              ('partition', '/sys/class/block/sda/sda2'),
+              ('partition', '/sys/class/block/sda/sda5'),
+              ('crypt', '/sys/class/block/dm-0'),
+              ('lvm', '/sys/class/block/dm-1'),
+              ('lvm', '/sys/class/block/dm-2'),
+              ('crypt', '/sys/class/block/dm-3')}),
+            (self.example_holders_trees[1][0],
+             {('disk', '/sys/class/block/vdb'),
+              ('partition', '/sys/class/block/vdb/vdb1'),
+              ('partition', '/sys/class/block/vdb/vdb2'),
+              ('partition', '/sys/class/block/vdb/vdb3'),
+              ('partition', '/sys/class/block/vdb/vdb4'),
+              ('partition', '/sys/class/block/vdb/vdb5'),
+              ('partition', '/sys/class/block/vdb/vdb6'),
+              ('partition', '/sys/class/block/vdb/vdb7'),
+              ('partition', '/sys/class/block/vdb/vdb8'),
+              ('raid', '/sys/class/block/md0'),
+              ('bcache', '/sys/class/block/bcache1'),
+              ('bcache', '/sys/class/block/bcache2')})
+        ]
+        for tree, result in test_trees_and_results:
+            self.assertEqual(clear_holders.get_holder_types(tree), result)
+
+    @mock.patch('curtin.block.clear_holders.block.sys_block_path')
+    @mock.patch('curtin.block.clear_holders.gen_holders_tree')
+    def test_assert_clear(self, mock_gen_holders_tree, mock_syspath):
+        mock_gen_holders_tree.return_value = self.example_holders_trees[0][0]
+        mock_syspath.side_effect = lambda x: x
+        device = '/dev/null'
+        with self.assertRaises(OSError):
+            clear_holders.assert_clear(device)
+            mock_gen_holders_tree.assert_called_with(device)
+        mock_gen_holders_tree.return_value = self.example_holders_trees[1][1]
+        clear_holders.assert_clear(device)
+
+    @mock.patch('curtin.block.clear_holders.block.mdadm')
+    @mock.patch('curtin.block.clear_holders.util')
+    def test_start_clear_holders_deps(self, mock_util, mock_mdadm):
+        clear_holders.start_clear_holders_deps()
+        mock_mdadm.mdadm_assemble.assert_called_with(
+            scan=True, ignore_errors=True)
+        mock_util.subp.assert_called_with(['modprobe', 'bcache'], rcs=[0, 1])

=== added file 'tests/unittests/test_make_dname.py'
--- tests/unittests/test_make_dname.py	1970-01-01 00:00:00 +0000
+++ tests/unittests/test_make_dname.py	2016-10-03 18:55:20 +0000
@@ -0,0 +1,200 @@
+from unittest import TestCase
+import mock
+
+import textwrap
+import uuid
+
+from curtin.commands import block_meta
+
+
+class TestMakeDname(TestCase):
+    state = {'scratch': '/tmp/null'}
+    rules_d = '/tmp/null/rules.d'
+    rule_file = '/tmp/null/rules.d/{}.rules'
+    storage_config = {
+        'disk1': {'type': 'disk', 'id': 'disk1', 'name': 'main_disk'},
+        'disk1p1': {'type': 'partition', 'id': 'disk1p1', 'device': 'disk1'},
+        'disk2': {'type': 'disk', 'id': 'disk2',
+                  'name': 'in_valid/name!@#$% &*(+disk'},
+        'disk2p1': {'type': 'partition', 'id': 'disk2p1', 'device': 'disk2'},
+        'md_id': {'type': 'raid', 'id': 'md_id', 'name': 'mdadm_name'},
+        'md_id2': {'type': 'raid', 'id': 'md_id2', 'name': 'mdadm/name'},
+        'lvol_id': {'type': 'lvm_volgroup', 'id': 'lvol_id', 'name': 'vg1'},
+        'lpart_id': {'type': 'lvm_partition', 'id': 'lpart_id',
+                     'name': 'lpartition1', 'volgroup': 'lvol_id'},
+        'lpart2_id': {'type': 'lvm_partition', 'id': 'lpart2_id',
+                      'name': 'lvm part/2', 'volgroup': 'lvol_id'},
+    }
+    disk_blkid = textwrap.dedent("""
+        DEVNAME=/dev/sda
+        PTUUID={}
+        PTTYPE=dos""")
+    part_blkid = textwrap.dedent("""
+        DEVNAME=/dev/sda1
+        UUID=f3e6efc2-d586-4b35-a681-dffb987c66fd
+        TYPE=ext2
+        PARTUUID={}""")
+    trusty_blkid = ""
+
+    def _make_mock_subp_blkid(self, ident, blkid_out):
+
+        def subp_blkid(cmd, capture=False, rcs=None, retries=None):
+            return (blkid_out.format(ident), None)
+
+        return subp_blkid
+
+    def _formatted_rule(self, identifiers, target):
+        rule = ['SUBSYSTEM=="block"', 'ACTION=="add|change"']
+        rule.extend(['ENV{%s}=="%s"' % ident for ident in identifiers])
+        rule.append('SYMLINK+="disk/by-dname/{}"'.format(target))
+        return ', '.join(rule)
+
+    @mock.patch('curtin.commands.block_meta.LOG')
+    @mock.patch('curtin.commands.block_meta.get_path_to_storage_volume')
+    @mock.patch('curtin.commands.block_meta.util')
+    def test_make_dname_disk(self, mock_util, mock_get_path, mock_log):
+        disk_ptuuid = str(uuid.uuid1())
+        mock_util.subp.side_effect = self._make_mock_subp_blkid(
+            disk_ptuuid, self.disk_blkid)
+        mock_util.load_command_environment.return_value = self.state
+        rule_identifiers = [
+            ('DEVTYPE', 'disk'),
+            ('ID_PART_TABLE_UUID', disk_ptuuid)
+        ]
+
+        # simple run
+        res_dname = 'main_disk'
+        block_meta.make_dname('disk1', self.storage_config)
+        mock_util.ensure_dir.assert_called_with(self.rules_d)
+        self.assertTrue(mock_log.debug.called)
+        self.assertFalse(mock_log.warning.called)
+        mock_util.write_file.assert_called_with(
+            self.rule_file.format(res_dname),
+            self._formatted_rule(rule_identifiers, res_dname))
+
+        # run invalid dname
+        res_dname = 'in_valid-name----------disk'
+        block_meta.make_dname('disk2', self.storage_config)
+        self.assertTrue(mock_log.warning.called)
+        mock_util.write_file.assert_called_with(
+            self.rule_file.format(res_dname),
+            self._formatted_rule(rule_identifiers, res_dname))
+
+    @mock.patch('curtin.commands.block_meta.LOG')
+    @mock.patch('curtin.commands.block_meta.get_path_to_storage_volume')
+    @mock.patch('curtin.commands.block_meta.util')
+    def test_make_dname_failures(self, mock_util, mock_get_path, mock_log):
+        mock_util.subp.side_effect = self._make_mock_subp_blkid(
+            '', self.trusty_blkid)
+        mock_util.load_command_environment.return_value = self.state
+
+        warning_msg = "Can't find a uuid for volume: {}. Skipping dname."
+
+        # disk with no PT_UUID
+        block_meta.make_dname('disk1', self.storage_config)
+        mock_log.warning.assert_called_with(warning_msg.format('disk1'))
+        self.assertFalse(mock_util.write_file.called)
+
+        # partition with no PART_UUID
+        block_meta.make_dname('disk1p1', self.storage_config)
+        mock_log.warning.assert_called_with(warning_msg.format('disk1p1'))
+        self.assertFalse(mock_util.write_file.called)
+
+    @mock.patch('curtin.commands.block_meta.LOG')
+    @mock.patch('curtin.commands.block_meta.get_path_to_storage_volume')
+    @mock.patch('curtin.commands.block_meta.util')
+    def test_make_dname_partition(self, mock_util, mock_get_path, mock_log):
+        part_uuid = str(uuid.uuid1())
+        mock_util.subp.side_effect = self._make_mock_subp_blkid(
+            part_uuid, self.part_blkid)
+        mock_util.load_command_environment.return_value = self.state
+
+        rule_identifiers = [
+            ('DEVTYPE', 'partition'),
+            ('ID_PART_ENTRY_UUID', part_uuid),
+        ]
+
+        # simple run
+        res_dname = 'main_disk-part1'
+        block_meta.make_dname('disk1p1', self.storage_config)
+        mock_util.ensure_dir.assert_called_with(self.rules_d)
+        self.assertTrue(mock_log.debug.called)
+        self.assertFalse(mock_log.warning.called)
+        mock_util.write_file.assert_called_with(
+            self.rule_file.format(res_dname),
+            self._formatted_rule(rule_identifiers, res_dname))
+
+        # run invalid dname
+        res_dname = 'in_valid-name----------disk-part1'
+        block_meta.make_dname('disk2p1', self.storage_config)
+        self.assertTrue(mock_log.warning.called)
+        mock_util.write_file.assert_called_with(
+            self.rule_file.format(res_dname),
+            self._formatted_rule(rule_identifiers, res_dname))
+
+    @mock.patch('curtin.commands.block_meta.mdadm')
+    @mock.patch('curtin.commands.block_meta.LOG')
+    @mock.patch('curtin.commands.block_meta.get_path_to_storage_volume')
+    @mock.patch('curtin.commands.block_meta.util')
+    def test_make_dname_raid(self, mock_util, mock_get_path, mock_log,
+                             mock_mdadm):
+        md_uuid = str(uuid.uuid1())
+        mock_mdadm.mdadm_query_detail.return_value = {'MD_UUID': md_uuid}
+        mock_util.load_command_environment.return_value = self.state
+        rule_identifiers = [('MD_UUID', md_uuid)]
+
+        # simple
+        res_dname = 'mdadm_name'
+        block_meta.make_dname('md_id', self.storage_config)
+        self.assertTrue(mock_log.debug.called)
+        self.assertFalse(mock_log.warning.called)
+        mock_util.write_file.assert_called_with(
+            self.rule_file.format(res_dname),
+            self._formatted_rule(rule_identifiers, res_dname))
+
+        # invalid name
+        res_dname = 'mdadm-name'
+        block_meta.make_dname('md_id2', self.storage_config)
+        self.assertTrue(mock_log.warning.called)
+        mock_util.write_file.assert_called_with(
+            self.rule_file.format(res_dname),
+            self._formatted_rule(rule_identifiers, res_dname))
+
+    @mock.patch('curtin.commands.block_meta.LOG')
+    @mock.patch('curtin.commands.block_meta.get_path_to_storage_volume')
+    @mock.patch('curtin.commands.block_meta.util')
+    def test_make_dname_lvm_partition(self, mock_util, mock_get_path,
+                                      mock_log):
+        mock_util.load_command_environment.return_value = self.state
+
+        # simple
+        res_dname = 'vg1-lpartition1'
+        rule_identifiers = [('DM_NAME', res_dname)]
+        block_meta.make_dname('lpart_id', self.storage_config)
+        self.assertTrue(mock_log.debug.called)
+        self.assertFalse(mock_log.warning.called)
+        mock_util.write_file.assert_called_with(
+            self.rule_file.format(res_dname),
+            self._formatted_rule(rule_identifiers, res_dname))
+
+        # with invalid name
+        res_dname = 'vg1-lvm-part-2'
+        rule_identifiers = [('DM_NAME', 'vg1-lvm part/2')]
+        block_meta.make_dname('lpart2_id', self.storage_config)
+        self.assertTrue(mock_log.warning.called)
+        mock_util.write_file.assert_called_with(
+            self.rule_file.format(res_dname),
+            self._formatted_rule(rule_identifiers, res_dname))
+
+    def test_sanitize_dname(self):
+        unsanitized_to_sanitized = [
+            ('main_disk', 'main_disk'),
+            ('main-disk', 'main-disk'),
+            ('main/disk', 'main-disk'),
+            ('main disk', 'main-disk'),
+            ('m.a/i*n#  d~i+sk', 'm-a-i-n---d-i-sk'),
+        ]
+        for (unsanitized, sanitized) in unsanitized_to_sanitized:
+            self.assertEqual(block_meta.sanitize_dname(unsanitized), sanitized)
+
+# vi: ts=4 expandtab syntax=python

=== modified file 'tests/unittests/test_net.py'
--- tests/unittests/test_net.py	2016-10-03 18:00:41 +0000
+++ tests/unittests/test_net.py	2016-10-03 18:55:20 +0000
@@ -473,10 +473,9 @@
 
             auto eth0
             iface eth0 inet dhcp
-                post-up ifup eth0:1
 
-            auto eth0:1
-            iface eth0:1 inet static
+            # control-alias eth0
+            iface eth0 inet static
                 address 192.168.21.3/24
                 dns-nameservers 8.8.8.8 8.8.4.4
                 dns-search barley.maas sach.maas
@@ -515,12 +514,11 @@
             iface bond0 inet static
                 address 10.23.23.2/24
                 bond-mode active-backup
-                hwaddress 52:54:00:12:34:06
+                hwaddress ether 52:54:00:12:34:06
                 bond-slaves none
-                post-up ifup bond0:1
 
-            auto bond0:1
-            iface bond0:1 inet static
+            # control-alias bond0
+            iface bond0 inet static
                 address 10.23.24.2/24
 
             source /etc/network/interfaces.d/*.cfg
@@ -551,10 +549,9 @@
                 address 192.168.14.2/24
                 gateway 192.168.14.1
                 mtu 1492
-                post-up ifup interface1:1
 
-            auto interface1:1
-            iface interface1:1 inet static
+            # control-alias interface1
+            iface interface1 inet static
                 address 192.168.14.4/24
 
             allow-hotplug interface2
@@ -597,10 +594,9 @@
             auto eth0
             iface eth0 inet6 static
                 address fde9:8f83:4a81:1:0:1:0:6/64
-                post-up ifup eth0:1
 
-            auto eth0:1
-            iface eth0:1 inet static
+            # control-alias eth0
+            iface eth0 inet static
                 address 192.168.0.1/24
 
             source /etc/network/interfaces.d/*.cfg
@@ -613,5 +609,50 @@
         self.assertEqual(sorted(ifaces.split('\n')),
                          sorted(net_ifaces.split('\n')))
 
+    def test_render_interfaces_ipv4_multiple_alias(self):
+        network_yaml = '''
+network:
+    version: 1
+    config:
+        # multi_v4_alias: multiple v4 addrs on same interface
+        - type: physical
+          name: interface1
+          mac_address: "52:54:00:12:34:02"
+          subnets:
+              - type: dhcp
+              - type: static
+                address: 192.168.2.2/22
+                gateway: 192.168.2.1
+              - type: static
+                address: 10.23.23.7/23
+                gateway: 10.23.23.1
+'''
+
+        ns = self.get_net_state(network_yaml)
+        ifaces = dedent("""\
+            auto lo
+            iface lo inet loopback
+
+            auto interface1
+            iface interface1 inet dhcp
+
+            # control-alias interface1
+            iface interface1 inet static
+                address 192.168.2.2/22
+                gateway 192.168.2.1
+
+            # control-alias interface1
+            iface interface1 inet static
+                address 10.23.23.7/23
+                gateway 10.23.23.1
+
+            source /etc/network/interfaces.d/*.cfg
+            """)
+        net_ifaces = net.render_interfaces(ns.network_state)
+        print(net_ifaces)
+        print(ifaces)
+        self.assertEqual(sorted(ifaces.split('\n')),
+                         sorted(net_ifaces.split('\n')))
+
 
 # vi: ts=4 expandtab syntax=python

=== modified file 'tests/unittests/test_util.py'
--- tests/unittests/test_util.py	2016-02-12 21:54:46 +0000
+++ tests/unittests/test_util.py	2016-10-03 18:55:20 +0000
@@ -121,12 +121,13 @@
         rdata = {'id': 'Ubuntu', 'description': 'Ubuntu 14.04.2 LTS',
                  'codename': 'trusty', 'release': '14.04'}
 
-        def fake_subp(cmd, capture=False):
+        def fake_subp(cmd, capture=False, target=None):
             return output, 'No LSB modules are available.'
 
         mock_subp.side_effect = fake_subp
         found = util.lsb_release()
-        mock_subp.assert_called_with(['lsb_release', '--all'], capture=True)
+        mock_subp.assert_called_with(
+            ['lsb_release', '--all'], capture=True, target=None)
         self.assertEqual(found, rdata)
 
     @mock.patch("curtin.util.subp")
@@ -144,6 +145,8 @@
 
     stdin2err = ['bash', '-c', 'cat >&2']
     stdin2out = ['cat']
+    bin_true = ['bash', '-c', ':']
+    exit_with_value = ['bash', '-c', 'exit ${1:-0}', 'test_subp_exit_val']
     utf8_invalid = b'ab\xaadef'
     utf8_valid = b'start \xc3\xa9 end'
     utf8_valid_2 = b'd\xc3\xa9j\xc8\xa7'
@@ -160,6 +163,26 @@
         (out, _err) = util.subp(cmd, capture=True)
         self.assertEqual(out, self.utf8_valid_2.decode('utf-8'))
 
+    def test_subp_target_as_different_fors_of_slash_works(self):
+        # passing target=/ in any form should work.
+
+        # it is assumed that if chroot was used, then test case would
+        # fail unless user was root ('chroot /' is still priviledged)
+        util.subp(self.bin_true, target="/")
+        util.subp(self.bin_true, target="//")
+        util.subp(self.bin_true, target="///")
+        util.subp(self.bin_true, target="//etc/..//")
+
+    def test_subp_exit_nonzero_raises(self):
+        exc = None
+        try:
+            util.subp(self.exit_with_value + ["9"])
+        except util.ProcessExecutionError as e:
+            self.assertEqual(9, e.exit_code)
+            exc = e
+
+        self.assertNotEqual(exc, None)
+
     def test_subp_respects_decode_false(self):
         (out, err) = util.subp(self.stdin2out, capture=True, decode=False,
                                data=self.utf8_valid)
@@ -202,6 +225,69 @@
         self.assertEqual(err, None)
         self.assertEqual(out, None)
 
+    def _subp_wrap_popen(self, cmd, kwargs,
+                         returncode=0, stdout=b'', stderr=b''):
+        # mocks the subprocess.Popen as expected from subp
+        # checks that subp returned the output of 'communicate' and
+        # returns the (args, kwargs) that Popen() was called with.
+
+        capture = kwargs.get('capture')
+
+        with mock.patch("curtin.util.subprocess.Popen") as m_popen:
+            sp = mock.Mock()
+            m_popen.return_value = sp
+            if capture:
+                sp.communicate.return_value = (stdout, stderr)
+            else:
+                sp.communicate.return_value = (None, None)
+            sp.returncode = returncode
+            ret = util.subp(cmd, **kwargs)
+
+        # popen should only ever be called once
+        self.assertTrue(m_popen.called)
+        self.assertEqual(1, m_popen.call_count)
+        # communicate() needs to have been called.
+        self.assertTrue(sp.communicate.called)
+
+        if capture:
+            # capture response is decoded if decode is not False
+            decode = kwargs.get('decode', "replace")
+            if decode is False:
+                self.assertEqual(stdout.decode(stdout, stderr), ret)
+            else:
+                self.assertEqual((stdout.decode(errors=decode),
+                                  stderr.decode(errors=decode)), ret)
+        else:
+            # if capture is false, then return is None, None
+            self.assertEqual((None, None), ret)
+
+        popen_args, popen_kwargs = m_popen.call_args
+
+        # if target is not provided or is /, chroot should not be used
+        target = util.target_path(kwargs.get('target', None))
+        if target == "/":
+            self.assertEqual(cmd, popen_args[0])
+        else:
+            self.assertEqual(['chroot', target] + list(cmd), popen_args[0])
+        return m_popen.call_args
+
+    def test_with_target_gets_chroot(self):
+        args, kwargs = self._subp_wrap_popen(["my-command"],
+                                             {'target': "/mytarget"})
+        self.assertIn('chroot', args[0])
+
+    def test_with_target_as_slash_does_not_chroot(self):
+        args, kwargs = self._subp_wrap_popen(
+            ['whatever'], {'capture': True, 'target': "/"})
+        self.assertNotIn('chroot', args[0])
+
+    def test_with_no_target_does_not_chroot(self):
+        args, kwargs = self._subp_wrap_popen(['whatever'], {'capture': True})
+        # note this path is reasonably tested with all of the above
+        # tests that do not mock Popen as if we did try to chroot the
+        # unit tests would fail unless they were run as root.
+        self.assertNotIn('chroot', args[0])
+
 
 class TestHuman2Bytes(TestCase):
     GB = 1024 * 1024 * 1024
@@ -237,6 +323,25 @@
     def test_GB_equals_G(self):
         self.assertEqual(util.human2bytes("3GB"), util.human2bytes("3G"))
 
+    def test_b2h_errors(self):
+        self.assertRaises(ValueError, util.bytes2human, 10.4)
+        self.assertRaises(ValueError, util.bytes2human, 'notint')
+        self.assertRaises(ValueError, util.bytes2human, -1)
+        self.assertRaises(ValueError, util.bytes2human, -1.0)
+
+    def test_b2h_values(self):
+        self.assertEqual('10G', util.bytes2human(10 * self.GB))
+        self.assertEqual('10M', util.bytes2human(10 * self.MB))
+        self.assertEqual('1000B', util.bytes2human(1000))
+        self.assertEqual('1K', util.bytes2human(1024))
+        self.assertEqual('1K', util.bytes2human(1024.0))
+        self.assertEqual('1T', util.bytes2human(float(1024 * self.GB)))
+
+    def test_h2b_b2b(self):
+        for size_str in ['10G', '20G', '2T', '12K', '1M', '1023K']:
+            self.assertEqual(
+                util.bytes2human(util.human2bytes(size_str)), size_str)
+
 
 class TestSetUnExecutable(TestCase):
     tmpf = None
@@ -282,4 +387,77 @@
         bogus = os.path.join(self.tmpd, 'bogus')
         self.assertRaises(ValueError, util.set_unexecutable, bogus, True)
 
+
+class TestTargetPath(TestCase):
+    def test_target_empty_string(self):
+        self.assertEqual("/etc/passwd", util.target_path("", "/etc/passwd"))
+
+    def test_target_non_string_raises(self):
+        self.assertRaises(ValueError, util.target_path, False)
+        self.assertRaises(ValueError, util.target_path, 9)
+        self.assertRaises(ValueError, util.target_path, True)
+
+    def test_lots_of_slashes_is_slash(self):
+        self.assertEqual("/", util.target_path("/"))
+        self.assertEqual("/", util.target_path("//"))
+        self.assertEqual("/", util.target_path("///"))
+        self.assertEqual("/", util.target_path("////"))
+
+    def test_empty_string_is_slash(self):
+        self.assertEqual("/", util.target_path(""))
+
+    def test_recognizes_relative(self):
+        self.assertEqual("/", util.target_path("/foo/../"))
+        self.assertEqual("/", util.target_path("/foo//bar/../../"))
+
+    def test_no_path(self):
+        self.assertEqual("/my/target", util.target_path("/my/target"))
+
+    def test_no_target_no_path(self):
+        self.assertEqual("/", util.target_path(None))
+
+    def test_no_target_with_path(self):
+        self.assertEqual("/my/path", util.target_path(None, "/my/path"))
+
+    def test_trailing_slash(self):
+        self.assertEqual("/my/target/my/path",
+                         util.target_path("/my/target/", "/my/path"))
+
+    def test_bunch_of_slashes_in_path(self):
+        self.assertEqual("/target/my/path/",
+                         util.target_path("/target/", "//my/path/"))
+        self.assertEqual("/target/my/path/",
+                         util.target_path("/target/", "///my/path/"))
+
+
+class TestRunInChroot(TestCase):
+    """Test the legacy 'RunInChroot'.
+
+    The test works by mocking ChrootableTarget's __enter__ to do nothing.
+    The assumptions made are:
+      a.) RunInChroot is a subclass of ChrootableTarget
+      b.) ChrootableTarget's __exit__ only un-does work that its __enter__
+          did.  Meaning for our mocked case, it does nothing."""
+
+    @mock.patch.object(util.ChrootableTarget, "__enter__", new=lambda a: a)
+    def test_run_in_chroot_with_target_slash(self):
+        with util.RunInChroot("/") as i:
+            out, err = i(['echo', 'HI MOM'], capture=True)
+        self.assertEqual('HI MOM\n', out)
+
+    @mock.patch.object(util.ChrootableTarget, "__enter__", new=lambda a: a)
+    @mock.patch("curtin.util.subp")
+    def test_run_in_chroot_with_target(self, m_subp):
+        my_stdout = "my output"
+        my_stderr = "my stderr"
+        cmd = ['echo', 'HI MOM']
+        target = "/foo"
+        m_subp.return_value = (my_stdout, my_stderr)
+        with util.RunInChroot(target) as i:
+            out, err = i(cmd)
+        self.assertEqual(my_stdout, out)
+        self.assertEqual(my_stderr, err)
+        m_subp.assert_called_with(cmd, target=target)
+
+
 # vi: ts=4 expandtab syntax=python

=== modified file 'tests/vmtests/__init__.py'
--- tests/vmtests/__init__.py	2016-10-03 18:00:41 +0000
+++ tests/vmtests/__init__.py	2016-10-03 18:55:20 +0000
@@ -39,16 +39,12 @@
 KEEP_DATA = {"pass": "none", "fail": "all"}
 IMAGE_SYNCS = []
 TARGET_IMAGE_FORMAT = "raw"
-OVMF_CODE = "/usr/share/OVMF/OVMF_CODE.fd"
-OVMF_VARS = "/usr/share/OVMF/OVMF_VARS.fd"
-# precise -> vivid don't have split UEFI firmware, fallback
-if not os.path.exists(OVMF_CODE):
-    OVMF_CODE = "/usr/share/ovmf/OVMF.fd"
-    OVMF_VARS = OVMF_CODE
 
 
 DEFAULT_BRIDGE = os.environ.get("CURTIN_VMTEST_BRIDGE", "user")
 OUTPUT_DISK_NAME = 'output_disk.img'
+BOOT_TIMEOUT = int(os.environ.get("CURTIN_VMTEST_BOOT_TIMEOUT", 300))
+INSTALL_TIMEOUT = int(os.environ.get("CURTIN_VMTEST_INSTALL_TIMEOUT", 3000))
 
 _TOPDIR = None
 
@@ -351,7 +347,7 @@
 class VMBaseClass(TestCase):
     __test__ = False
     arch_skip = []
-    boot_timeout = 300
+    boot_timeout = BOOT_TIMEOUT
     collect_scripts = []
     conf_file = "examples/tests/basic.yaml"
     disk_block_size = 512
@@ -361,7 +357,8 @@
     extra_kern_args = None
     fstab_expected = {}
     image_store_class = ImageStore
-    install_timeout = 3000
+    boot_cloudconf = None
+    install_timeout = INSTALL_TIMEOUT
     interactive = False
     multipath = False
     multipath_num_paths = 2
@@ -369,6 +366,7 @@
     recorded_errors = 0
     recorded_failures = 0
     uefi = False
+    proxy = None
 
     # these get set from base_vm_classes
     release = None
@@ -398,7 +396,8 @@
         # set up tempdir
         cls.td = TempDir(
             name=cls.__name__,
-            user_data=generate_user_data(collect_scripts=cls.collect_scripts))
+            user_data=generate_user_data(collect_scripts=cls.collect_scripts,
+                                         boot_cloudconf=cls.boot_cloudconf))
         logger.info('Using tempdir: %s , Image: %s', cls.td.tmpdir,
                     img_verstr)
         cls.install_log = os.path.join(cls.td.logs, 'install-serial.log')
@@ -498,15 +497,18 @@
 
         # proxy config
         configs = [cls.conf_file]
-        proxy = get_apt_proxy()
-        if get_apt_proxy is not None:
+        cls.proxy = get_apt_proxy()
+        if cls.proxy is not None:
             proxy_config = os.path.join(cls.td.install, 'proxy.cfg')
             with open(proxy_config, "w") as fp:
-                fp.write(json.dumps({'apt_proxy': proxy}) + "\n")
+                fp.write(json.dumps({'apt_proxy': cls.proxy}) + "\n")
             configs.append(proxy_config)
 
+        uefi_flags = []
         if cls.uefi:
             logger.debug("Testcase requested launching with UEFI")
+            nvram = os.path.join(cls.td.disks, "ovmf_vars.fd")
+            uefi_flags = ["--uefi-nvram=%s" % nvram]
 
             # always attempt to update target nvram (via grub)
             grub_config = os.path.join(cls.td.install, 'grub.cfg')
@@ -514,22 +516,17 @@
                 fp.write(json.dumps({'grub': {'update_nvram': True}}))
             configs.append(grub_config)
 
-            # make our own copy so we can store guest modified values
-            nvram = os.path.join(cls.td.disks, "ovmf_vars.fd")
-            shutil.copy(OVMF_VARS, nvram)
-            cmd.extend(["--uefi", nvram])
-
         if cls.multipath:
             disks = disks * cls.multipath_num_paths
 
-        cmd.extend(netdevs + disks +
+        cmd.extend(uefi_flags + netdevs + disks +
                    [boot_img, "--kernel=%s" % boot_kernel, "--initrd=%s" %
                     boot_initrd, "--", "curtin", "-vv", "install"] +
                    ["--config=%s" % f for f in configs] +
                    [install_src])
 
         # run vm with installer
-        lout_path = os.path.join(cls.td.logs, "install-launch.out")
+        lout_path = os.path.join(cls.td.logs, "install-launch.log")
         logger.info('Running curtin installer: {}'.format(cls.install_log))
         try:
             with open(lout_path, "wb") as fpout:
@@ -622,7 +619,8 @@
         target_disks.extend([output_disk])
 
         # create xkvm cmd
-        cmd = (["tools/xkvm", "-v", dowait] + netdevs +
+        cmd = (["tools/xkvm", "-v", dowait] +
+               uefi_flags + netdevs +
                target_disks + extra_disks + nvme_disks +
                ["--", "-drive",
                 "file=%s,if=virtio,media=cdrom" % cls.td.seed_disk,
@@ -638,21 +636,11 @@
             else:
                 cmd.extend(["-nographic", "-serial", "file:" + cls.boot_log])
 
-        if cls.uefi:
-            logger.debug("Testcase requested booting with UEFI")
-            uefi_opts = ["-drive", "if=pflash,format=raw,file=" + nvram]
-            if OVMF_CODE != OVMF_VARS:
-                # reorder opts, code then writable space
-                uefi_opts = (["-drive",
-                              "if=pflash,format=raw,readonly,file=" +
-                              OVMF_CODE] + uefi_opts)
-            cmd.extend(uefi_opts)
-
         # run vm with installed system, fail if timeout expires
         try:
             logger.info('Booting target image: {}'.format(cls.boot_log))
             logger.debug('{}'.format(" ".join(cmd)))
-            xout_path = os.path.join(cls.td.logs, "boot-xkvm.out")
+            xout_path = os.path.join(cls.td.logs, "boot-xkvm.log")
             with open(xout_path, "wb") as fpout:
                 cls.boot_system(cmd, console_log=cls.boot_log, proc_out=fpout,
                                 timeout=cls.boot_timeout, purpose="first_boot")
@@ -748,17 +736,24 @@
     # Misc functions that are useful for many tests
     def output_files_exist(self, files):
         for f in files:
+            logger.debug('checking file %s', f)
             self.assertTrue(os.path.exists(os.path.join(self.td.collect, f)))
 
+    def output_files_dont_exist(self, files):
+        for f in files:
+            logger.debug('checking file %s', f)
+            self.assertFalse(os.path.exists(os.path.join(self.td.collect, f)))
+
+    def load_collect_file(self, filename, mode="r"):
+        with open(os.path.join(self.td.collect, filename), mode) as fp:
+            return fp.read()
+
     def check_file_strippedline(self, filename, search):
-        with open(os.path.join(self.td.collect, filename), "r") as fp:
-            data = list(i.strip() for i in fp.readlines())
-        self.assertIn(search, data)
+        lines = self.load_collect_file(filename).splitlines()
+        self.assertIn(search, [i.strip() for i in lines])
 
     def check_file_regex(self, filename, regex):
-        with open(os.path.join(self.td.collect, filename), "r") as fp:
-            data = fp.read()
-        self.assertRegex(data, regex)
+        self.assertRegex(self.load_collect_file(filename), regex)
 
     # To get rid of deprecation warning in python 3.
     def assertRegex(self, s, r):
@@ -978,7 +973,8 @@
     return None
 
 
-def generate_user_data(collect_scripts=None, apt_proxy=None):
+def generate_user_data(collect_scripts=None, apt_proxy=None,
+                       boot_cloudconf=None):
     # this returns the user data for the *booted* system
     # its a cloud-config-archive type, which is
     # just a list of parts.  the 'x-shellscript' parts
@@ -1003,6 +999,10 @@
               'content': yaml.dump(base_cloudconfig, indent=1)},
              {'type': 'text/cloud-config', 'content': ssh_keys}]
 
+    if boot_cloudconf is not None:
+        parts.append({'type': 'text/cloud-config', 'content':
+                      yaml.dump(boot_cloudconf, indent=1)})
+
     output_dir = '/mnt/output'
     output_dir_macro = 'OUTPUT_COLLECT_D'
     output_device = '/dev/disk/by-id/virtio-%s' % OUTPUT_DISK_NAME

=== modified file 'tests/vmtests/helpers.py'
--- tests/vmtests/helpers.py	2016-10-03 18:00:41 +0000
+++ tests/vmtests/helpers.py	2016-10-03 18:55:20 +0000
@@ -119,170 +119,133 @@
     return sorted(releases)
 
 
-def _parse_ifconfig_xenial(ifconfig_out):
-    """Parse ifconfig output from xenial or earlier and return a dictionary.
-    given content like below, return:
-    {'eth0': {'address': '10.8.1.78', 'broadcast': '10.8.1.255',
-              'inet6': [{'address': 'fe80::216:3eff:fe63:c05d',
-                         'prefixlen': '64', 'scope': 'Link'},
-                        {'address': 'fdec:2922:2f07:0:216:3eff:fe63:c05d',
-                         'prefixlen': '64', 'scope': 'Global'}],
-              'interface': 'eth0', 'link_encap': 'Ethernet',
-              'mac_address': '00:16:3e:63:c0:5d', 'mtu': 1500,
-              'multicast': True, 'netmask': '255.255.255.0',
-              'running': True, 'up': True}}
-
-    eth0  Link encap:Ethernet  HWaddr 00:16:3e:63:c0:5d
-          inet addr:10.8.1.78  Bcast:10.8.1.255  Mask:255.255.255.0
-          inet6 addr: fe80::216:3eff:fe63:c05d/64 Scope:Link
-          inet6 addr: fdec:2922:2f07:0:216:3eff:fe63:c05d/64 Scope:Global
-          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
-          RX packets:21503 errors:0 dropped:0 overruns:0 frame:0
-          TX packets:11346 errors:0 dropped:0 overruns:0 carrier:0
-          collisions:0 txqueuelen:1000
-          RX bytes:31556357 (31.5 MB)  TX bytes:870943 (870.9 KB)
-    """
-    ifaces = {}
-    combined_fields = {'addr': 'address', 'Bcast': 'broadcast',
-                       'Mask': 'netmask', 'MTU': 'mtu',
-                       'encap': 'link_encap'}
-    boolmap = {'RUNNING': 'running', 'UP': 'up', 'MULTICAST': 'multicast'}
-
-    for line in ifconfig_out.splitlines():
-        if not line:
-            continue
-        if not line.startswith(" "):
-            cur_iface = line.split()[0].rstrip(":")
-            cur_data = {'inet6': [], 'interface': cur_iface}
-            for t in boolmap.values():
-                cur_data[t] = False
-            ifaces[cur_iface] = cur_data
-
-        toks = line.split()
-
-        if toks[0] == "inet6":
-            cidr = toks[2]
-            address, prefixlen = cidr.split("/")
-            scope = toks[3].split(":")[1]
-            cur_ipv6 = {'address': address, 'scope': scope,
-                        'prefixlen': prefixlen}
-            cur_data['inet6'].append(cur_ipv6)
-            continue
-
-        for i in range(0, len(toks)):
-            cur_tok = toks[i]
-            try:
-                next_tok = toks[i+1]
-            except IndexError:
-                next_tok = None
-
-            if cur_tok == "HWaddr":
-                cur_data['mac_address'] = next_tok
-            elif ":" in cur_tok:
-                key, _colon, val = cur_tok.partition(":")
-                if key in combined_fields:
-                    cur_data[combined_fields[key]] = val
-            elif cur_tok in boolmap:
-                cur_data[boolmap[cur_tok]] = True
-
-        if 'mtu' in cur_data:
-            cur_data['mtu'] = int(cur_data['mtu'])
-
-    return ifaces
-
-
-def _parse_ifconfig_yakkety(ifconfig_out):
-    """Parse ifconfig output from yakkety or later(?) and return a dictionary.
-
-    given ifconfig output like below, return:
-    {'ens2': {'address': '10.5.0.78',
-              'broadcast': '10.5.255.255',
-              'broadcast_flag': True,
-              'inet6': [{'address': 'fe80::f816:3eff:fe05:9673',
-                         'prefixlen': '64', 'scopeid': '0x20<link>'},
-                        {'address': 'fe80::f816:3eff:fe05:9673',
-                         'prefixlen': '64', 'scopeid': '0x20<link>'}],
-              'interface': 'ens2', 'link_encap': 'Ethernet',
-              'mac_address': 'fa:16:3e:05:96:73', 'mtu': 1500,
-              'multicast': True, 'netmask': '255.255.0.0',
-              'running': True, 'up': True}}
-
-    ens2: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
-            inet 10.5.0.78  netmask 255.255.0.0  broadcast 10.5.255.255
-            inet6 fe80::f816:3eff:fe05:9673  prefixlen 64  scopeid 0x20<link>
-            inet6 fe80::f816:3eff:fe05:9673  prefixlen 64  scopeid 0x20<link>
-            ether fa:16:3e:05:96:73  txqueuelen 1000  (Ethernet)
-            RX packets 33196  bytes 48916947 (48.9 MB)
-            RX errors 0  dropped 0  overruns 0  frame 0
-            TX packets 5458  bytes 411486 (411.4 KB)
-            TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
-    """
-    fmap = {'mtu': 'mtu', 'inet': 'address',
-            'netmask': 'netmask', 'broadcast': 'broadcast',
-            'ether': 'mac_address'}
-    boolmap = {'RUNNING': 'running', 'UP': 'up', 'MULTICAST': 'multicast',
-               'BROADCAST': 'broadcast_flag'}
-
-    ifaces = {}
-    for line in ifconfig_out.splitlines():
-        if not line:
-            continue
-        if not line.startswith(" "):
-            cur_iface = line.split()[0].rstrip(":")
-            cur_data = {'inet6': [], 'interface': cur_iface}
-            for t in boolmap.values():
-                cur_data[t] = False
-            ifaces[cur_iface] = cur_data
-
-        toks = line.split()
-        if toks[0] == "inet6":
-            cur_ipv6 = {'address': toks[1]}
-            cur_data['inet6'].append(cur_ipv6)
-
-        for i in range(0, len(toks)):
-            cur_tok = toks[i]
-            try:
-                next_tok = toks[i+1]
-            except IndexError:
-                next_tok = None
-            if cur_tok in fmap:
-                cur_data[fmap[cur_tok]] = next_tok
-            elif cur_tok in ('prefixlen', 'scopeid'):
-                cur_ipv6[cur_tok] = next_tok
-                cur_data['inet6'].append
-            elif cur_tok.startswith("flags="):
-                # flags=4163<UP,BROADCAST,RUNNING,MULTICAST>
-                flags = cur_tok[cur_tok.find("<") + 1:
-                                cur_tok.rfind(">")].split(",")
-                for flag in flags:
-                    if flag in boolmap:
-                        cur_data[boolmap[flag]] = True
-            elif cur_tok == "(Ethernet)":
-                cur_data['link_encap'] = 'Ethernet'
-
-        if 'mtu' in cur_data:
-            cur_data['mtu'] = int(cur_data['mtu'])
-
-    return ifaces
-
-
-def ifconfig_to_dict(ifconfig_a):
-    # if the first token of the first line ends in a ':' then assume yakkety
-    # parse ifconfig output and return a dictionary.
-    #
+def _parse_ip_a(ip_a):
+    """
+    2: interface0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1480 qdisc pfifo_fast\
+        state UP group default qlen 1000
+        link/ether 52:54:00:12:34:00 brd ff:ff:ff:ff:ff:ff
+        inet 192.168.1.2/24 brd 192.168.1.255 scope global interface0
+            valid_lft forever preferred_lft forever
+        inet6 2001:4800:78ff:1b:be76:4eff:fe06:1000/64 scope global
+            valid_lft forever preferred_lft forever
+        inet6 fe80::5054:ff:fe12:3400/64 scope link
+        valid_lft forever preferred_lft forever
+    """
+    ifaces = {}
+    combined_fields = {
+        'brd': 'broadcast',
+        'link/ether': 'mac_address',
+    }
+    interface_fields = [
+        'group',
+        'master',
+        'mtu',
+        'qdisc',
+        'qlen',
+        'state',
+    ]
+    inet_fields = [
+        'valid_lft',
+        'preferred_left'
+    ]
+    boolmap = {
+        'BROADCAST': 'broadcast',
+        'LOOPBACK': 'loopback',
+        'LOWER_UP': 'lower_up',
+        'MULTICAST': 'multicast',
+        'RUNNING': 'running',
+        'UP': 'up',
+    }
+
+    for line in ip_a.splitlines():
+        if not line:
+            continue
+
+        toks = line.split()
+        if not line.startswith("    "):
+            cur_iface = line.split()[1].rstrip(":")
+            cur_data = {
+                'inet4': [],
+                'inet6': [],
+                'interface': cur_iface
+            }
+            # vlan's get a fancy name <iface name>@<vlan_link>
+            if '@' in cur_iface:
+                cur_iface, vlan_link = cur_iface.split("@")
+                cur_data.update({'interface': cur_iface,
+                                 'vlan_link': vlan_link})
+            for t in boolmap.values():
+                # <BROADCAST,MULTICAST,UP,LOWER_UP>
+                cur_data[t] = t.upper() in line[2]
+            ifaces[cur_iface] = cur_data
+
+        for i in range(0, len(toks)):
+            cur_tok = toks[i]
+            try:
+                next_tok = toks[i+1]
+            except IndexError:
+                next_tok = None
+
+            # parse link/ether, brd
+            if cur_tok in combined_fields.keys():
+                cur_data[combined_fields[cur_tok]] = next_tok
+            # mtu an other interface line key/value pairs
+            elif cur_tok in interface_fields:
+                cur_data[cur_tok] = next_tok
+            elif cur_tok.startswith("inet"):
+                cidr = toks[1]
+                address, prefixlen = cidr.split("/")
+                cur_ip = {
+                    'address': address,
+                    'prefixlen': prefixlen,
+                }
+                if ":" in address:
+                    cur_ipv6 = cur_ip.copy()
+                    cur_ipv6.update({'scope': toks[3]})
+                    cur_data['inet6'].append(cur_ipv6)
+                else:
+                    cur_ipv4 = cur_ip.copy()
+                    if len(toks) > 5:
+                        cur_ipv4.update({'scope': toks[5]})
+                    else:
+                        cur_ipv4.update({'scope': toks[3]})
+                    cur_data['inet4'].append(cur_ipv4)
+
+                continue
+            elif cur_tok in inet_fields:
+                if ":" in address:
+                    cur_ipv6[cur_tok] = next_tok
+                else:
+                    cur_ipv4[cur_tok] = next_tok
+                continue
+
+    return ifaces
+
+
+def ip_a_to_dict(ip_a):
     # return a dictionary of network information like:
-    #  {'ens2': {'address': '10.5.0.78', 'broadcast': '10.5.255.255',
-    #         'broadcast_flag': True,
-    #         'inet6': [{'address': 'fe80::f816:3eff:fe05:9673',
-    #                    'prefixlen': '64', 'scopeid': '0x20<link>'},
-    #                   {'address': 'fe80::f816:3eff:fe05:9673',
-    #                    'prefixlen': '64', 'scopeid': '0x20<link>'}],
-    #         'interface': 'ens2', 'link_encap': 'Ethernet',
-    #         'mac_address': 'fa:16:3e:05:96:73', 'mtu': 1500,
-    #         'multicast': True, 'netmask': '255.255.0.0',
-    #         'running': True, 'up': True}}
-    line = ifconfig_a.lstrip().splitlines()[0]
-    if line.split()[0].endswith(":"):
-        return _parse_ifconfig_yakkety(ifconfig_a)
-    else:
-        return _parse_ifconfig_xenial(ifconfig_a)
+    # {'interface0': {'broadcast': '10.0.2.255',
+    #                 'group': 'default',
+    #                 'inet4': [{'address': '10.0.2.15',
+    #                            'preferred_lft': 'forever',
+    #                            'prefixlen': '24',
+    #                            'scope': 'global',
+    #                            'valid_lft': 'forever'}],
+    #                 'inet6': [{'address': 'fe80::5054:ff:fe12:3400',
+    #                            'preferred_lft': 'forever',
+    #                            'prefixlen': '64',
+    #                            'scope': 'link',
+    #                            'valid_lft': 'forever'}],
+    #                 'interface': 'interface0',
+    #                 'loopback': False,
+    #                 'lower_up': False,
+    #                 'mac_address': '52:54:00:12:34:00',
+    #                 'mtu': '1500',
+    #                 'multicast': False,
+    #                 'qdisc': 'pfifo_fast',
+    #                 'qlen': '1000',
+    #                 'running': False,
+    #                 'state': 'UP',
+    #                 'up': False},
+    # from iproute2 `ip a` command output
+    return _parse_ip_a(ip_a)

=== added file 'tests/vmtests/test_apt_config_cmd.py'
--- tests/vmtests/test_apt_config_cmd.py	1970-01-01 00:00:00 +0000
+++ tests/vmtests/test_apt_config_cmd.py	2016-10-03 18:55:20 +0000
@@ -0,0 +1,55 @@
+""" test_apt_config_cmd
+    Collection of tests for the apt configuration features when called via the
+    apt-config standalone command.
+"""
+import textwrap
+
+from . import VMBaseClass
+from .releases import base_vm_classes as relbase
+
+
+class TestAptConfigCMD(VMBaseClass):
+    """TestAptConfigCMD - test standalone command"""
+    conf_file = "examples/tests/apt_config_command.yaml"
+    interactive = False
+    extra_disks = []
+    fstab_expected = {}
+    disk_to_check = []
+    collect_scripts = [textwrap.dedent("""
+        cd OUTPUT_COLLECT_D
+        cat /etc/fstab > fstab
+        ls /dev/disk/by-dname > ls_dname
+        find /etc/network/interfaces.d > find_interfacesd
+        cp /etc/apt/sources.list.d/curtin-dev-ubuntu-test-archive-*.list .
+        cp /etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg .
+        apt-cache policy | grep proposed > proposed-enabled
+        """)]
+
+    def test_cmd_proposed_enabled(self):
+        """check if proposed was enabled"""
+        self.output_files_exist(["proposed-enabled"])
+        self.check_file_regex("proposed-enabled",
+                              r"500.*%s-proposed" % self.release)
+
+    def test_cmd_ppa_enabled(self):
+        """check if specified curtin-dev ppa was enabled"""
+        self.output_files_exist(
+            ["curtin-dev-ubuntu-test-archive-%s.list" % self.release])
+        self.check_file_regex("curtin-dev-ubuntu-test-archive-%s.list" %
+                              self.release,
+                              (r"http://ppa.launchpad.net/"
+                               r"curtin-dev/test-archive/ubuntu"
+                               r" %s main" % self.release))
+
+    def test_cmd_preserve_source(self):
+        """check if cloud-init was prevented from overwriting"""
+        self.output_files_exist(["curtin-preserve-sources.cfg"])
+        self.check_file_regex("curtin-preserve-sources.cfg",
+                              "apt_preserve_sources_list.*true")
+
+
+class XenialTestAptConfigCMDCMD(relbase.xenial, TestAptConfigCMD):
+    """ XenialTestAptSrcModifyCMD
+        apt feature Test for Xenial using the standalone command
+    """
+    __test__ = True

=== added file 'tests/vmtests/test_apt_source.py'
--- tests/vmtests/test_apt_source.py	1970-01-01 00:00:00 +0000
+++ tests/vmtests/test_apt_source.py	2016-10-03 18:55:20 +0000
@@ -0,0 +1,238 @@
+""" test_apt_source
+    Collection of tests for the apt configuration features
+"""
+import textwrap
+
+from . import VMBaseClass
+from .releases import base_vm_classes as relbase
+
+from unittest import SkipTest
+from curtin import util
+
+
+class TestAptSrcAbs(VMBaseClass):
+    """TestAptSrcAbs - Basic tests for apt features of curtin"""
+    interactive = False
+    extra_disks = []
+    fstab_expected = {}
+    disk_to_check = []
+    collect_scripts = [textwrap.dedent("""
+        cd OUTPUT_COLLECT_D
+        cat /etc/fstab > fstab
+        ls /dev/disk/by-dname > ls_dname
+        find /etc/network/interfaces.d > find_interfacesd
+        apt-key list "F430BBA5" > keyid-F430BBA5
+        apt-key list "0165013E" > keyppa-0165013E
+        apt-key list "F470A0AC" > keylongid-F470A0AC
+        apt-key list "8280B242" > keyraw-8280B242
+        ls -laF /etc/apt/sources.list.d/ > sources.list.d
+        cp /etc/apt/sources.list.d/curtin-dev-ppa.list .
+        cp /etc/apt/sources.list.d/my-repo2.list .
+        cp /etc/apt/sources.list.d/my-repo4.list .
+        cp /etc/apt/sources.list.d/curtin-dev-ubuntu-test-archive-*.list .
+        find /etc/apt/sources.list.d/ -maxdepth 1 -name "*ignore*" | wc -l > ic
+        apt-config dump | grep Retries > aptconf
+        cp /etc/apt/sources.list sources.list
+        cp /etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg .
+        """)]
+    mirror = "http://us.archive.ubuntu.com/ubuntu"
+    secmirror = "http://security.ubuntu.com/ubuntu"
+
+    def test_output_files_exist(self):
+        """test_output_files_exist - Check if all output files exist"""
+        self.output_files_exist(
+            ["fstab", "ic", "keyid-F430BBA5", "keylongid-F470A0AC",
+             "keyraw-8280B242", "keyppa-0165013E", "aptconf", "sources.list",
+             "curtin-dev-ppa.list", "my-repo2.list", "my-repo4.list"])
+        self.output_files_exist(
+            ["curtin-dev-ubuntu-test-archive-%s.list" % self.release])
+
+    def test_keys_imported(self):
+        """test_keys_imported - Check if all keys are imported correctly"""
+        self.check_file_regex("keyid-F430BBA5",
+                              r"Launchpad PPA for Ubuntu Screen Profile")
+        self.check_file_regex("keylongid-F470A0AC",
+                              r"Ryan Harper")
+        self.check_file_regex("keyppa-0165013E",
+                              r"Launchpad PPA for curtin developers")
+        self.check_file_regex("keyraw-8280B242",
+                              r"Christian Ehrhardt")
+
+    def test_preserve_source(self):
+        """test_preserve_source - no clobbering sources.list by cloud-init"""
+        self.output_files_exist(["curtin-preserve-sources.cfg"])
+        self.check_file_regex("curtin-preserve-sources.cfg",
+                              "apt_preserve_sources_list.*true")
+
+    def test_source_files(self):
+        """test_source_files - Check generated .lists for correct content"""
+        # hard coded deb lines
+        self.check_file_strippedline("curtin-dev-ppa.list",
+                                     ("deb http://ppa.launchpad.net/curtin-dev"
+                                      "/test-archive/ubuntu xenial main"))
+        self.check_file_strippedline("my-repo4.list",
+                                     ("deb http://ppa.launchpad.net/curtin-dev"
+                                      "/test-archive/ubuntu xenial main"))
+        # mirror and release replacement in deb line
+        self.check_file_strippedline("my-repo2.list", "deb %s %s multiverse" %
+                                     (self.mirror, self.release))
+        # auto creation by apt-add-repository
+        self.check_file_regex("curtin-dev-ubuntu-test-archive-%s.list" %
+                              self.release,
+                              (r"http://ppa.launchpad.net/"
+                               r"curtin-dev/test-archive/ubuntu"
+                               r" %s main" % self.release))
+
+    def test_ignore_count(self):
+        """test_ignore_count - Check for files that should not be created"""
+        self.check_file_strippedline("ic", "0")
+
+    def test_apt_conf(self):
+        """test_apt_conf - Check if the selected apt conf was set"""
+        self.check_file_strippedline("aptconf", 'Acquire::Retries "3";')
+
+
+class TestAptSrcCustom(TestAptSrcAbs):
+    """TestAptSrcNormal - tests valid in the custom sources.list case"""
+    conf_file = "examples/tests/apt_source_custom.yaml"
+
+    def test_custom_source_list(self):
+        """test_custom_source_list - Check custom sources with replacement"""
+        # check that all replacements happened
+        self.check_file_strippedline("sources.list",
+                                     "deb %s %s main restricted" %
+                                     (self.mirror, self.release))
+        self.check_file_strippedline("sources.list",
+                                     "deb-src %s %s main restricted" %
+                                     (self.mirror, self.release))
+        self.check_file_strippedline("sources.list",
+                                     "deb %s %s universe restricted" %
+                                     (self.mirror, self.release))
+        self.check_file_strippedline("sources.list",
+                                     "deb %s %s-security multiverse" %
+                                     (self.secmirror, self.release))
+        # check for something that guarantees us to come from our test
+        self.check_file_strippedline("sources.list",
+                                     "# nice line to check in test")
+
+
+class TestAptSrcPreserve(TestAptSrcAbs):
+    """TestAptSrcPreserve - tests valid in the preserved sources.list case"""
+    conf_file = "examples/tests/apt_source_preserve.yaml"
+    boot_cloudconf = None
+
+    def test_preserved_source_list(self):
+        """test_preserved_source_list - Check sources to be preserved as-is"""
+        # curtin didn't touch it, so we should find what curtin set as default
+        self.check_file_regex("sources.list",
+                              r"this file is written by cloud-init")
+
+    # overwrite inherited check to match situation here
+    def test_preserve_source(self):
+        """test_preserve_source - check apt_preserve_sources_list not set"""
+        self.output_files_dont_exist(["curtin-preserve-sources.cfg"])
+
+
+class TestAptSrcModify(TestAptSrcAbs):
+    """TestAptSrcModify - tests modifying sources.list"""
+    conf_file = "examples/tests/apt_source_modify.yaml"
+
+    def test_modified_source_list(self):
+        """test_modified_source_list - Check sources with replacement"""
+        # we set us.archive which is non default, check for that
+        # this will catch if a target ever changes the expected defaults we
+        # have to replace in case there is no custom template
+        self.check_file_regex("sources.list",
+                              r"us.archive.ubuntu.com")
+        self.check_file_regex("sources.list",
+                              r"security.ubuntu.com")
+
+
+class TestAptSrcDisablePockets(TestAptSrcAbs):
+    """TestAptSrcDisablePockets - tests disabling a suite in sources.list"""
+    conf_file = "examples/tests/apt_source_modify_disable_suite.yaml"
+
+    def test_disabled_suite(self):
+        """test_disabled_suite - Check if suites were disabled"""
+        # two not disabled
+        self.check_file_regex("sources.list",
+                              r"deb.*us.archive.ubuntu.com")
+        self.check_file_regex("sources.list",
+                              r"deb.*security.ubuntu.com")
+        # updates disabled
+        self.check_file_regex("sources.list",
+                              r"# suite disabled by curtin:.*-updates")
+
+
+class TestAptSrcModifyArches(TestAptSrcModify):
+    """TestAptSrcModify - tests modifying sources.list with per arch mirror"""
+    # same test, just different yaml to specify the mirrors per arch
+    conf_file = "examples/tests/apt_source_modify_arches.yaml"
+
+
+class TestAptSrcSearch(TestAptSrcAbs):
+    """TestAptSrcSearch - tests checking a list of mirror options"""
+    conf_file = "examples/tests/apt_source_search.yaml"
+
+    def test_mirror_search(self):
+        """test_mirror_search
+           Check searching through a mirror list
+           This is checked in the test (late) intentionally.
+           No matter if resolution worked or failed it shouldn't fail
+           fatally (python error and trace).
+           We just can't rely on the content to be found in that case
+           so we skip the check then."""
+        res1 = util.is_resolvable_url("http://does.not.exist/ubuntu")
+        res2 = util.is_resolvable_url("http://does.also.not.exist/ubuntu")
+        res3 = util.is_resolvable_url("http://us.archive.ubuntu.com/ubuntu")
+        res4 = util.is_resolvable_url("http://security.ubuntu.com/ubuntu")
+        if res1 or res2 or not res3 or not res4:
+            raise SkipTest(("Name resolution not as required"
+                            "(%s, %s, %s, %s)" % (res1, res2, res3, res4)))
+
+        self.check_file_regex("sources.list",
+                              r"us.archive.ubuntu.com")
+        self.check_file_regex("sources.list",
+                              r"security.ubuntu.com")
+
+
+class XenialTestAptSrcCustom(relbase.xenial, TestAptSrcCustom):
+    """ XenialTestAptSrcCustom
+       apt feature Test for Xenial with a custom template
+    """
+    __test__ = True
+
+
+class XenialTestAptSrcPreserve(relbase.xenial, TestAptSrcPreserve):
+    """ XenialTestAptSrcPreserve
+       apt feature Test for Xenial with apt_preserve_sources_list enabled
+    """
+    __test__ = True
+
+
+class XenialTestAptSrcModify(relbase.xenial, TestAptSrcModify):
+    """ XenialTestAptSrcModify
+        apt feature Test for Xenial modifying the sources.list of the image
+    """
+    __test__ = True
+
+
+class XenialTestAptSrcSearch(relbase.xenial, TestAptSrcSearch):
+    """ XenialTestAptSrcModify
+        apt feature Test for Xenial searching for mirrors
+    """
+    __test__ = True
+
+
+class XenialTestAptSrcModifyArches(relbase.xenial, TestAptSrcModifyArches):
+    """ XenialTestAptSrcModifyArches
+        apt feature Test for Xenial checking per arch mirror specification
+    """
+    __test__ = True
+
+
+class XenialTestAptSrcDisablePockets(relbase.xenial, TestAptSrcDisablePockets):
+    """ XenialTestAptSrcDisablePockets
+        apt feature Test for Xenial disabling a suite
+    """
+    __test__ = True

=== modified file 'tests/vmtests/test_basic.py'
--- tests/vmtests/test_basic.py	2016-10-03 18:00:41 +0000
+++ tests/vmtests/test_basic.py	2016-10-03 18:55:20 +0000
@@ -3,7 +3,6 @@
     get_apt_proxy)
 from .releases import base_vm_classes as relbase
 
-import os
 import re
 import textwrap
 
@@ -13,7 +12,8 @@
     conf_file = "examples/tests/basic.yaml"
     extra_disks = ['128G', '128G', '4G']
     nvme_disks = ['4G']
-    disk_to_check = [('main_disk', 1), ('main_disk', 2)]
+    disk_to_check = [('main_disk_with_in---valid--dname', 1),
+                     ('main_disk_with_in---valid--dname', 2)]
     collect_scripts = [textwrap.dedent("""
         cd OUTPUT_COLLECT_D
         blkid -o export /dev/vda > blkid_output_vda
@@ -46,21 +46,17 @@
     def test_partition_numbers(self):
         # vde should have partitions 1 and 10
         disk = "vde"
-        proc_partitions_path = os.path.join(self.td.collect,
-                                            'proc_partitions')
-        self.assertTrue(os.path.exists(proc_partitions_path))
         found = []
-        with open(proc_partitions_path, 'r') as fp:
-            for line in fp.readlines():
-                if disk in line:
-                    found.append(line.split()[3])
+        proc_partitions = self.load_collect_file('proc_partitions')
+        for line in proc_partitions.splitlines():
+            if disk in line:
+                found.append(line.split()[3])
         # /proc/partitions should have 3 lines with 'vde' in them.
         expected = [disk + s for s in ["", "1", "10"]]
         self.assertEqual(found, expected)
 
     def test_partitions(self):
-        with open(os.path.join(self.td.collect, "fstab")) as fp:
-            fstab_lines = fp.readlines()
+        fstab_lines = self.load_collect_file('fstab').splitlines()
         print("\n".join(fstab_lines))
         # Test that vda1 is on /
         blkid_info = self.get_blkid_data("blkid_output_vda1")
@@ -93,12 +89,8 @@
 
     def test_whole_disk_format(self):
         # confirm the whole disk format is the expected device
-        with open(os.path.join(self.td.collect,
-                  "btrfs_show_super_vdd"), "r") as fp:
-            btrfs_show_super = fp.read()
-
-        with open(os.path.join(self.td.collect, "ls_uuid"), "r") as fp:
-            ls_uuid = fp.read()
+        btrfs_show_super = self.load_collect_file('btrfs_show_super_vdd')
+        ls_uuid = self.load_collect_file("ls_uuid")
 
         # extract uuid from btrfs superblock
         btrfs_fsid = [line for line in btrfs_show_super.split('\n')
@@ -122,8 +114,7 @@
 
     def test_proxy_set(self):
         expected = get_apt_proxy()
-        with open(os.path.join(self.td.collect, "apt-proxy")) as fp:
-            apt_proxy_found = fp.read().rstrip()
+        apt_proxy_found = self.load_collect_file("apt-proxy").rstrip()
         if expected:
             # the proxy should have gotten set through
             self.assertIn(expected, apt_proxy_found)
@@ -156,12 +147,8 @@
 
     def test_whole_disk_format(self):
         # confirm the whole disk format is the expected device
-        with open(os.path.join(self.td.collect,
-                  "btrfs_show_super_vdd"), "r") as fp:
-            btrfs_show_super = fp.read()
-
-        with open(os.path.join(self.td.collect, "ls_uuid"), "r") as fp:
-            ls_uuid = fp.read()
+        btrfs_show_super = self.load_collect_file("btrfs_show_super_vdd")
+        ls_uuid = self.load_collect_file("ls_uuid")
 
         # extract uuid from btrfs superblock
         btrfs_fsid = re.findall('.*uuid:\ (.*)\n', btrfs_show_super)
@@ -224,7 +211,8 @@
 
 
 class WilyTestBasic(relbase.wily, TestBasicAbs):
-    __test__ = True
+    # EOL - 2016-07-28
+    __test__ = False
 
 
 class XenialTestBasic(relbase.xenial, TestBasicAbs):
@@ -273,21 +261,17 @@
     def test_partition_numbers(self):
         # vde should have partitions 1 and 10
         disk = "sdd"
-        proc_partitions_path = os.path.join(self.td.collect,
-                                            'proc_partitions')
-        self.assertTrue(os.path.exists(proc_partitions_path))
         found = []
-        with open(proc_partitions_path, 'r') as fp:
-            for line in fp.readlines():
-                if disk in line:
-                    found.append(line.split()[3])
+        proc_partitions = self.load_collect_file('proc_partitions')
+        for line in proc_partitions.splitlines():
+            if disk in line:
+                found.append(line.split()[3])
         # /proc/partitions should have 3 lines with 'vde' in them.
         expected = [disk + s for s in ["", "1", "10"]]
         self.assertEqual(found, expected)
 
     def test_partitions(self):
-        with open(os.path.join(self.td.collect, "fstab")) as fp:
-            fstab_lines = fp.readlines()
+        fstab_lines = self.load_collect_file('fstab').splitlines()
         print("\n".join(fstab_lines))
         # Test that vda1 is on /
         blkid_info = self.get_blkid_data("blkid_output_sda1")
@@ -320,12 +304,8 @@
 
     def test_whole_disk_format(self):
         # confirm the whole disk format is the expected device
-        with open(os.path.join(self.td.collect,
-                  "btrfs_show_super_sdc"), "r") as fp:
-            btrfs_show_super = fp.read()
-
-        with open(os.path.join(self.td.collect, "ls_uuid"), "r") as fp:
-            ls_uuid = fp.read()
+        btrfs_show_super = self.load_collect_file("btrfs_show_super_sdc")
+        ls_uuid = self.load_collect_file("ls_uuid")
 
         # extract uuid from btrfs superblock
         btrfs_fsid = [line for line in btrfs_show_super.split('\n')

=== modified file 'tests/vmtests/test_bcache_basic.py'
--- tests/vmtests/test_bcache_basic.py	2016-10-03 18:00:41 +0000
+++ tests/vmtests/test_bcache_basic.py	2016-10-03 18:55:20 +0000
@@ -2,7 +2,6 @@
 from .releases import base_vm_classes as relbase
 
 import textwrap
-import os
 
 
 class TestBcacheBasic(VMBaseClass):
@@ -27,14 +26,12 @@
 
     def test_bcache_status(self):
         bcache_cset_uuid = None
-        fname = os.path.join(self.td.collect, "bcache_super_vda2")
-        with open(fname, "r") as fp:
-            for line in fp.read().splitlines():
-                if line != "" and line.split()[0] == "cset.uuid":
-                    bcache_cset_uuid = line.split()[-1].rstrip()
+        for line in self.load_collect_file("bcache_super_vda2").splitlines():
+            if line != "" and line.split()[0] == "cset.uuid":
+                bcache_cset_uuid = line.split()[-1].rstrip()
         self.assertIsNotNone(bcache_cset_uuid)
-        with open(os.path.join(self.td.collect, "bcache_ls"), "r") as fp:
-            self.assertTrue(bcache_cset_uuid in fp.read().splitlines())
+        self.assertTrue(bcache_cset_uuid in
+                        self.load_collect_file("bcache_ls").splitlines())
 
     def test_bcache_cachemode(self):
         self.check_file_regex("bcache_cache_mode", r"\[writeback\]")

=== removed file 'tests/vmtests/test_bonding.py'
--- tests/vmtests/test_bonding.py	2016-10-03 18:00:41 +0000
+++ tests/vmtests/test_bonding.py	1970-01-01 00:00:00 +0000
@@ -1,204 +0,0 @@
-from . import VMBaseClass, logger, helpers
-from .releases import base_vm_classes as relbase
-
-import ipaddress
-import os
-import re
-import textwrap
-import yaml
-
-
-class TestNetworkAbs(VMBaseClass):
-    interactive = False
-    conf_file = "examples/tests/bonding_network.yaml"
-    extra_disks = []
-    extra_nics = []
-    collect_scripts = [textwrap.dedent("""
-        cd OUTPUT_COLLECT_D
-        ifconfig -a > ifconfig_a
-        cp -av /etc/network/interfaces .
-        cp -av /etc/udev/rules.d/70-persistent-net.rules .
-        ip -o route show > ip_route_show
-        route -n > route_n
-        dpkg-query -W -f '${Status}' ifenslave > ifenslave_installed
-        find /etc/network/interfaces.d > find_interfacesd
-        """)]
-
-    def test_output_files_exist(self):
-        self.output_files_exist(["ifconfig_a",
-                                 "interfaces",
-                                 "70-persistent-net.rules",
-                                 "ip_route_show",
-                                 "ifenslave_installed",
-                                 "route_n"])
-
-    def test_ifenslave_installed(self):
-        with open(os.path.join(self.td.collect, "ifenslave_installed")) as fp:
-            status = fp.read().strip()
-            logger.debug('ifenslave installed: {}'.format(status))
-            self.assertEqual('install ok installed', status)
-
-    def test_etc_network_interfaces(self):
-        with open(os.path.join(self.td.collect, "interfaces")) as fp:
-            eni = fp.read()
-            logger.debug('etc/network/interfaces:\n{}'.format(eni))
-
-        expected_eni = self.get_expected_etc_network_interfaces()
-        eni_lines = eni.split('\n')
-        for line in expected_eni.split('\n'):
-            self.assertTrue(line in eni_lines)
-
-    def test_ifconfig_output(self):
-        '''check ifconfig output with test input'''
-        network_state = self.get_network_state()
-        logger.debug('expected_network_state:\n{}'.format(
-            yaml.dump(network_state, default_flow_style=False, indent=4)))
-
-        with open(os.path.join(self.td.collect, "ifconfig_a")) as fp:
-            ifconfig_a = fp.read()
-            logger.debug('ifconfig -a:\n{}'.format(ifconfig_a))
-
-        ifconfig_dict = helpers.ifconfig_to_dict(ifconfig_a)
-        logger.debug('parsed ifcfg dict:\n{}'.format(
-            yaml.dump(ifconfig_dict, default_flow_style=False, indent=4)))
-
-        with open(os.path.join(self.td.collect, "ip_route_show")) as fp:
-            ip_route_show = fp.read()
-            logger.debug("ip route show:\n{}".format(ip_route_show))
-            for line in [line for line in ip_route_show.split('\n')
-                         if 'src' in line]:
-                m = re.search(r'^(?P<network>\S+)\sdev\s' +
-                              r'(?P<devname>\S+)\s+' +
-                              r'proto kernel\s+scope link' +
-                              r'\s+src\s(?P<src_ip>\S+)',
-                              line)
-                route_info = m.groupdict('')
-                logger.debug(route_info)
-
-        with open(os.path.join(self.td.collect, "route_n")) as fp:
-            route_n = fp.read()
-            logger.debug("route -n:\n{}".format(route_n))
-
-        interfaces = network_state.get('interfaces')
-        for iface in interfaces.values():
-            subnets = iface.get('subnets', {})
-            if subnets:
-                for index, subnet in zip(range(0, len(subnets)), subnets):
-                    iface['index'] = index
-                    if index == 0:
-                        ifname = "{name}".format(**iface)
-                    else:
-                        ifname = "{name}:{index}".format(**iface)
-
-                    self.check_interface(iface,
-                                         ifconfig_dict.get(ifname),
-                                         route_n)
-            else:
-                iface['index'] = 0
-                self.check_interface(iface,
-                                     ifconfig_dict.get(iface['name']),
-                                     route_n)
-
-    def check_interface(self, iface, ifconfig, route_n):
-        logger.debug(
-            'testing iface:\n{}\n\nifconfig:\n{}'.format(iface, ifconfig))
-        subnets = iface.get('subnets', {})
-        if subnets and iface['index'] != 0:
-            ifname = "{name}:{index}".format(**iface)
-        else:
-            ifname = "{name}".format(**iface)
-
-        # initial check, do we have the correct iface ?
-        logger.debug('ifname={}'.format(ifname))
-        logger.debug("ifconfig['interface']={}".format(ifconfig['interface']))
-        self.assertEqual(ifname, ifconfig['interface'])
-
-        # check physical interface attributes
-        # FIXME: can't check mac_addr under bonding since
-        # the bond might change slave mac addrs
-        for key in ['mtu']:
-            if key in iface and iface[key]:
-                self.assertEqual(iface[key],
-                                 ifconfig[key])
-
-        def __get_subnet(subnets, subidx):
-            for index, subnet in zip(range(0, len(subnets)), subnets):
-                if index == subidx:
-                    break
-            return subnet
-
-        # check subnet related attributes, and specifically only
-        # the subnet specified by iface['index']
-        subnets = iface.get('subnets', {})
-        if subnets:
-            subnet = __get_subnet(subnets, iface['index'])
-            if 'address' in subnet and subnet['address']:
-                if ':' in subnet['address']:
-                    inet_iface = ipaddress.IPv6Interface(
-                        subnet['address'])
-                else:
-                    inet_iface = ipaddress.IPv4Interface(
-                        subnet['address'])
-
-                # check ip addr
-                self.assertEqual(str(inet_iface.ip),
-                                 ifconfig['address'])
-
-                self.assertEqual(str(inet_iface.netmask),
-                                 ifconfig['netmask'])
-
-                self.assertEqual(
-                    str(inet_iface.network.broadcast_address),
-                    ifconfig['broadcast'])
-
-            # handle gateway by looking at routing table
-            if 'gateway' in subnet and subnet['gateway']:
-                gw_ip = subnet['gateway']
-                gateways = [line for line in route_n.split('\n')
-                            if 'UG' in line and gw_ip in line]
-                logger.debug('matching gateways:\n{}'.format(gateways))
-                self.assertEqual(len(gateways), 1)
-                [gateways] = gateways
-                (dest, gw, genmask, flags, metric, ref, use, iface) = \
-                    gateways.split()
-                logger.debug('expected gw:{} found gw:{}'.format(gw_ip, gw))
-                self.assertEqual(gw_ip, gw)
-
-
-class PreciseHWETTestBonding(relbase.precise_hwe_t, TestNetworkAbs):
-    __test__ = True
-    # package names on precise are different, need to check on ifenslave-2.6
-    collect_scripts = TestNetworkAbs.collect_scripts + [textwrap.dedent("""
-             cd OUTPUT_COLLECT_D
-             dpkg-query -W -f '${Status}' ifenslave-2.6 > ifenslave_installed
-             """)]
-
-
-class TrustyTestBonding(relbase.trusty, TestNetworkAbs):
-    __test__ = False
-
-
-class TrustyHWEUTestBonding(relbase.trusty_hwe_u, TrustyTestBonding):
-    __test__ = True
-
-
-class TrustyHWEVTestBonding(relbase.trusty_hwe_v, TrustyTestBonding):
-    # Working, but off by default to safe test suite runtime
-    # oldest/newest HWE-* covered above/below
-    __test__ = False
-
-
-class TrustyHWEWTestBonding(relbase.trusty_hwe_w, TrustyTestBonding):
-    __test__ = True
-
-
-class WilyTestBonding(relbase.wily, TestNetworkAbs):
-    __test__ = True
-
-
-class XenialTestBonding(relbase.xenial, TestNetworkAbs):
-    __test__ = True
-
-
-class YakketyTestBonding(relbase.yakkety, TestNetworkAbs):
-    __test__ = True

=== modified file 'tests/vmtests/test_lvm.py'
--- tests/vmtests/test_lvm.py	2016-10-03 18:00:41 +0000
+++ tests/vmtests/test_lvm.py	2016-10-03 18:55:20 +0000
@@ -64,7 +64,8 @@
 
 
 class WilyTestLvm(relbase.wily, TestLvmAbs):
-    __test__ = True
+    # EOL - 2016-07-28
+    __test__ = False
 
 
 class XenialTestLvm(relbase.xenial, TestLvmAbs):

=== modified file 'tests/vmtests/test_mdadm_bcache.py'
--- tests/vmtests/test_mdadm_bcache.py	2016-10-03 18:00:41 +0000
+++ tests/vmtests/test_mdadm_bcache.py	2016-10-03 18:55:20 +0000
@@ -2,7 +2,6 @@
 from .releases import base_vm_classes as relbase
 
 import textwrap
-import os
 
 
 class TestMdadmAbs(VMBaseClass):
@@ -82,17 +81,16 @@
         bcache_cset_uuid = None
         found = {}
         for bcache_super in bcache_supers:
-            with open(os.path.join(self.td.collect, bcache_super), "r") as fp:
-                for line in fp.read().splitlines():
-                    if line != "" and line.split()[0] == "cset.uuid":
-                        bcache_cset_uuid = line.split()[-1].rstrip()
-                        if bcache_cset_uuid in found:
-                            found[bcache_cset_uuid].append(bcache_super)
-                        else:
-                            found[bcache_cset_uuid] = [bcache_super]
+            for line in self.load_collect_file(bcache_super).splitlines():
+                if line != "" and line.split()[0] == "cset.uuid":
+                    bcache_cset_uuid = line.split()[-1].rstrip()
+                    if bcache_cset_uuid in found:
+                        found[bcache_cset_uuid].append(bcache_super)
+                    else:
+                        found[bcache_cset_uuid] = [bcache_super]
             self.assertIsNotNone(bcache_cset_uuid)
-            with open(os.path.join(self.td.collect, "bcache_ls"), "r") as fp:
-                self.assertTrue(bcache_cset_uuid in fp.read().splitlines())
+            self.assertTrue(bcache_cset_uuid in
+                            self.load_collect_file("bcache_ls").splitlines())
 
         # one cset.uuid for all devices
         self.assertEqual(len(found), 1)
@@ -131,7 +129,8 @@
 
 
 class WilyTestMdadmBcache(relbase.wily, TestMdadmBcacheAbs):
-    __test__ = True
+    # EOL - 2016-07-28
+    __test__ = False
 
 
 class XenialTestMdadmBcache(relbase.xenial, TestMdadmBcacheAbs):
@@ -171,7 +170,8 @@
 
 
 class WilyTestMirrorboot(relbase.wily, TestMirrorbootAbs):
-    __test__ = True
+    # EOL - 2016-07-28
+    __test__ = False
 
 
 class XenialTestMirrorboot(relbase.xenial, TestMirrorbootAbs):
@@ -212,7 +212,8 @@
 
 
 class WilyTestRaid5boot(relbase.wily, TestRaid5bootAbs):
-    __test__ = True
+    # EOL - 2016-07-28
+    __test__ = False
 
 
 class XenialTestRaid5boot(relbase.xenial, TestRaid5bootAbs):
@@ -265,7 +266,8 @@
 
 
 class WilyTestRaid6boot(relbase.wily, TestRaid6bootAbs):
-    __test__ = True
+    # EOL - 2016-07-28
+    __test__ = False
 
 
 class XenialTestRaid6boot(relbase.xenial, TestRaid6bootAbs):
@@ -306,7 +308,8 @@
 
 
 class WilyTestRaid10boot(relbase.wily, TestRaid10bootAbs):
-    __test__ = True
+    # EOL - 2016-07-28
+    __test__ = False
 
 
 class XenialTestRaid10boot(relbase.xenial, TestRaid10bootAbs):
@@ -404,7 +407,8 @@
 
 
 class WilyTestAllindata(relbase.wily, TestAllindataAbs):
-    __test__ = True
+    # EOL - 2016-07-28
+    __test__ = False
 
 
 class XenialTestAllindata(relbase.xenial, TestAllindataAbs):

=== modified file 'tests/vmtests/test_multipath.py'
--- tests/vmtests/test_multipath.py	2016-07-12 16:17:29 +0000
+++ tests/vmtests/test_multipath.py	2016-10-03 18:55:20 +0000
@@ -1,7 +1,6 @@
 from . import VMBaseClass
 from .releases import base_vm_classes as relbase
 
-import os
 import textwrap
 
 
@@ -29,7 +28,7 @@
         ls -al /dev/disk/by-uuid/ > ls_uuid
         ls -al /dev/disk/by-id/ > ls_disk_id
         readlink -f /sys/class/block/sda/holders/dm-0 > holders_sda
-        readlink /sys/class/block/sdb/holders/dm-0 > holders_sdb
+        readlink -f /sys/class/block/sdb/holders/dm-0 > holders_sdb
         cat /etc/fstab > fstab
         mkdir -p /dev/disk/by-dname
         ls /dev/disk/by-dname/ > ls_dname
@@ -37,17 +36,10 @@
         """)]
 
     def test_multipath_disks_match(self):
-        sda = os.path.join(self.td.collect, 'holders_sda')
-        sdb = os.path.join(self.td.collect, 'holders_sdb')
-        self.assertTrue(os.path.exists(sda))
-        self.assertTrue(os.path.exists(sdb))
-        with open(sda, 'r') as fp:
-            sda_data = fp.read()
-            print('sda holders:\n%s' % sda_data)
-        with open(sda, 'r') as fp:
-            sdb_data = fp.read()
-            print('sdb holders:\n%s' % sda_data)
-
+        sda_data = self.load_collect_file("holders_sda")
+        print('sda holders:\n%s' % sda_data)
+        sdb_data = self.load_collect_file("holders_sdb")
+        print('sdb holders:\n%s' % sdb_data)
         self.assertEqual(sda_data, sdb_data)
 
 

=== modified file 'tests/vmtests/test_network.py'
--- tests/vmtests/test_network.py	2016-10-03 18:00:41 +0000
+++ tests/vmtests/test_network.py	2016-10-03 18:55:20 +0000
@@ -4,36 +4,46 @@
 import ipaddress
 import os
 import re
-import subprocess
 import textwrap
 import yaml
 
 
-class TestNetworkAbs(VMBaseClass):
+class TestNetworkBaseTestsAbs(VMBaseClass):
     interactive = False
-    conf_file = "examples/tests/basic_network.yaml"
     extra_disks = []
     extra_nics = []
     collect_scripts = [textwrap.dedent("""
         cd OUTPUT_COLLECT_D
+        echo "waiting for ipv6 to settle" && sleep 5
         ifconfig -a > ifconfig_a
+        ip link show > ip_link_show
+        ip a > ip_a
+        find /etc/network/interfaces.d > find_interfacesd
         cp -av /etc/network/interfaces .
         cp -av /etc/network/interfaces.d .
-        find /etc/network/interfaces.d > find_interfacesd
         cp /etc/resolv.conf .
         cp -av /etc/udev/rules.d/70-persistent-net.rules .
         ip -o route show > ip_route_show
+        ip -6 -o route show > ip_6_route_show
         route -n > route_n
+        route -6 -n > route_6_n
         cp -av /run/network ./run_network
+        cp -av /var/log/upstart ./upstart ||:
+        sleep 10 && ip a > ip_a
         """)]
 
     def test_output_files_exist(self):
-        self.output_files_exist(["ifconfig_a",
-                                 "interfaces",
-                                 "resolv.conf",
-                                 "70-persistent-net.rules",
-                                 "ip_route_show",
-                                 "route_n"])
+        self.output_files_exist([
+            "70-persistent-net.rules",
+            "find_interfacesd",
+            "ifconfig_a",
+            "interfaces",
+            "ip_a",
+            "ip_route_show",
+            "resolv.conf",
+            "route_6_n",
+            "route_n",
+        ])
 
     def test_etc_network_interfaces(self):
         with open(os.path.join(self.td.collect, "interfaces")) as fp:
@@ -76,27 +86,44 @@
         '''
         expected_ifaces = self.get_expected_etc_resolvconf()
         logger.debug('parsed eni ifaces:\n{}'.format(expected_ifaces))
+
+        def _mk_dns_lines(dns_type, config):
+            """ nameservers get a line per ns
+                search is a space-separated list """
+            lines = []
+            if dns_type == 'nameservers':
+                if ' ' in config:
+                    config = config.split()
+                for ns in config:
+                    lines.append("nameserver %s" % ns)
+            elif dns_type == 'search':
+                if isinstance(config, list):
+                    config = " ".join(config)
+                lines.append("search %s" % config)
+
+            return lines
+
         for ifname in expected_ifaces.keys():
             iface = expected_ifaces.get(ifname)
             for k, v in iface.get('dns', {}).items():
-                dns_line = '{} {}'.format(
-                    k.replace('nameservers', 'nameserver'), " ".join(v))
-                logger.debug('dns_line:{}'.format(dns_line))
-                self.assertTrue(dns_line in resolv_lines)
+                print('k=%s v=%s' % (k, v))
+                for dns_line in _mk_dns_lines(k, v):
+                    logger.debug('dns_line:%s', dns_line)
+                    self.assertTrue(dns_line in resolv_lines)
 
-    def test_ifconfig_output(self):
-        '''check ifconfig output with test input'''
+    def test_ip_output(self):
+        '''check iproute2 'ip a' output with test input'''
         network_state = self.get_network_state()
         logger.debug('expected_network_state:\n{}'.format(
             yaml.dump(network_state, default_flow_style=False, indent=4)))
 
-        with open(os.path.join(self.td.collect, "ifconfig_a")) as fp:
-            ifconfig_a = fp.read()
-            logger.debug('ifconfig -a:\n{}'.format(ifconfig_a))
+        with open(os.path.join(self.td.collect, "ip_a")) as fp:
+            ip_a = fp.read()
+            logger.debug('ip a:\n{}'.format(ip_a))
 
-        ifconfig_dict = helpers.ifconfig_to_dict(ifconfig_a)
-        logger.debug('parsed ifcfg dict:\n{}'.format(
-            yaml.dump(ifconfig_dict, default_flow_style=False, indent=4)))
+        ip_dict = helpers.ip_a_to_dict(ip_a)
+        print('parsed ip_a dict:\n{}'.format(
+            yaml.dump(ip_dict, default_flow_style=False, indent=4)))
 
         with open(os.path.join(self.td.collect, "ip_route_show")) as fp:
             ip_route_show = fp.read()
@@ -115,337 +142,167 @@
             route_n = fp.read()
             logger.debug("route -n:\n{}".format(route_n))
 
+        with open(os.path.join(self.td.collect, "route_6_n")) as fp:
+            route_6_n = fp.read()
+            logger.debug("route -6 -n:\n{}".format(route_6_n))
+
+        routes = {
+            '4': route_n,
+            '6': route_6_n,
+        }
         interfaces = network_state.get('interfaces')
         for iface in interfaces.values():
-            subnets = iface.get('subnets', {})
-            if subnets:
-                for index, subnet in zip(range(0, len(subnets)), subnets):
-                    iface['index'] = index
-                    if index == 0:
-                        ifname = "{name}".format(**iface)
-                    else:
-                        ifname = "{name}:{index}".format(**iface)
-
-                    self.check_interface(iface,
-                                         ifconfig_dict.get(ifname),
-                                         route_n)
-            else:
-                iface['index'] = 0
-                self.check_interface(iface,
-                                     ifconfig_dict.get(iface['name']),
-                                     route_n)
-
-    def check_interface(self, iface, ifconfig, route_n):
-        logger.debug(
-            'testing iface:\n{}\n\nifconfig:\n{}'.format(iface, ifconfig))
-        subnets = iface.get('subnets', {})
-        if subnets and iface['index'] != 0:
-            ifname = "{name}:{index}".format(**iface)
-        else:
-            ifname = "{name}".format(**iface)
-
+            print("\nnetwork_state iface: %s" % (
+                yaml.dump(iface, default_flow_style=False, indent=4)))
+            self.check_interface(iface['name'],
+                                 iface,
+                                 ip_dict.get(iface['name']),
+                                 routes)
+
+    def check_interface(self, ifname, iface, ipcfg, routes):
+        print('check_interface: testing '
+              'ifname:{}\niface:\n{}\n\nipcfg:\n{}'.format(ifname, iface,
+                                                           ipcfg))
+        # FIXME: remove check?
         # initial check, do we have the correct iface ?
-        logger.debug('ifname={}'.format(ifname))
-        logger.debug("ifconfig['interface']={}".format(ifconfig['interface']))
-        self.assertEqual(ifname, ifconfig['interface'])
-
-        # check physical interface attributes
-        for key in ['mac_address', 'mtu']:
+        print('ifname={}'.format(ifname))
+        self.assertEqual(ifname, ipcfg['interface'])
+        print("ipcfg['interface']={}".format(ipcfg['interface']))
+
+        # check physical interface attributes (skip bond members, macs change)
+        if iface['type'] in ['physical'] and 'bond-master' not in iface:
+            for key in ['mac_address']:
+                print("checking mac on iface: %s" % iface['name'])
+                if key in iface and iface[key]:
+                    self.assertEqual(iface[key].lower(),
+                                     ipcfg[key].lower())
+
+        # we can check mtu on all interfaces
+        for key in ['mtu']:
             if key in iface and iface[key]:
-                self.assertEqual(iface[key],
-                                 ifconfig[key])
-
-        def __get_subnet(subnets, subidx):
-            for index, subnet in zip(range(0, len(subnets)), subnets):
-                if index == subidx:
-                    break
-            return subnet
-
-        # check subnet related attributes, and specifically only
-        # the subnet specified by iface['index']
-        subnets = iface.get('subnets', {})
-        if subnets:
-            subnet = __get_subnet(subnets, iface['index'])
+                print("checking mtu on iface: %s" % iface['name'])
+                self.assertEqual(int(iface[key]),
+                                 int(ipcfg[key]))
+
+        # check subnet related attributes
+        subnets = iface.get('subnets')
+        if subnets is None:
+            subnets = []
+        for subnet in subnets:
+            config_inet_iface = None
+            found_inet_iface = None
+            print('validating subnet:\n%s' % subnet)
             if 'address' in subnet and subnet['address']:
+                # we will create to ipaddress.IPvXInterface objects
+                # one based on config, and other from collected data
+                # and compare.
+                config_ipstr = subnet['address']
+                if 'netmask' in subnet:
+                    config_ipstr += "/%s" % subnet['netmask']
+
+                # One more bit is how to construct the
+                # right Version interface, detecting on ":" in address
+                # detect ipv6 or v4
                 if ':' in subnet['address']:
-                    inet_iface = ipaddress.IPv6Interface(
-                        subnet['address'])
-                else:
-                    inet_iface = ipaddress.IPv4Interface(
-                        subnet['address'])
-
-                # check ip addr
-                self.assertEqual(str(inet_iface.ip),
-                                 ifconfig['address'])
-
-                self.assertEqual(str(inet_iface.netmask),
-                                 ifconfig['netmask'])
-
-                self.assertEqual(
-                    str(inet_iface.network.broadcast_address),
-                    ifconfig['broadcast'])
-
-            # handle gateway by looking at routing table
-            if 'gateway' in subnet and subnet['gateway']:
-                gw_ip = subnet['gateway']
-                gateways = [line for line in route_n.split('\n')
-                            if 'UG' in line and gw_ip in line]
-                logger.debug('matching gateways:\n{}'.format(gateways))
-                self.assertEqual(len(gateways), 1)
-                [gateways] = gateways
-                (dest, gw, genmask, flags, metric, ref, use, iface) = \
-                    gateways.split()
-                logger.debug('expected gw:{} found gw:{}'.format(gw_ip, gw))
-                self.assertEqual(gw_ip, gw)
-
-
-class TestNetworkStaticAbs(TestNetworkAbs):
-    conf_file = "examples/tests/basic_network_static.yaml"
-
-
-class TestNetworkVlanAbs(TestNetworkAbs):
-    conf_file = "examples/tests/vlan_network.yaml"
-    collect_scripts = TestNetworkAbs.collect_scripts + [textwrap.dedent("""
-             cd OUTPUT_COLLECT_D
-             dpkg-query -W -f '${Status}' vlan > vlan_installed
-             ip -d link show interface1.2667 > ip_link_show_interface1.2667
-             ip -d link show interface1.2668 > ip_link_show_interface1.2668
-             ip -d link show interface1.2669 > ip_link_show_interface1.2669
-             ip -d link show interface1.2670 > ip_link_show_interface1.2670
-             """)]
-
-    def get_vlans(self):
-        network_state = self.get_network_state()
-        logger.debug('get_vlans ns:\n{}'.format(
-            yaml.dump(network_state, default_flow_style=False, indent=4)))
-        interfaces = network_state.get('interfaces')
-        return [iface for iface in interfaces.values()
-                if iface['type'] == 'vlan']
-
-    def test_output_files_exist_vlan(self):
-        link_files = ["ip_link_show_{}".format(vlan['name'])
-                      for vlan in self.get_vlans()]
-        self.output_files_exist(["vlan_installed"] + link_files)
-
-    def test_vlan_installed(self):
-        with open(os.path.join(self.td.collect, "vlan_installed")) as fp:
-            status = fp.read().strip()
-            logger.debug('vlan installed?: {}'.format(status))
-            self.assertEqual('install ok installed', status)
-
-    def test_vlan_enabled(self):
-
-        # we must have at least one
-        self.assertGreaterEqual(len(self.get_vlans()), 1)
-
-        # did they get configured?
-        for vlan in self.get_vlans():
-            link_file = "ip_link_show_" + vlan['name']
-            vlan_msg = "vlan protocol 802.1Q id " + str(vlan['vlan_id'])
-            self.check_file_regex(link_file, vlan_msg)
-
-
-class TestNetworkENISource(TestNetworkAbs):
-    """ Curtin now emits a source /etc/network/interfaces.d/*.cfg
-        line.  This test exercises this feature by emitting additional
-        network configuration in /etc/network/interfaces.d/eth2.cfg
-
-        This relies on the network_config.yaml of the TestClass to
-        define a spare nic with no configuration.  This ensures that
-        a udev rule for eth2 is emitted so we can reference the interface
-        in our injected configuration.
-
-        Note, ifupdown allows multiple stanzas with the same iface name
-        and combines the options together during ifup.  We rely on this
-        feature allowing etc/network/interfaces to have an unconfigured
-        iface eth2 inet manual line, and then defer the configuration
-        to /etc/network/interfaces.d/eth2.cfg
-
-        This testcase then uses curtin.net.deb_parse_config method to
-        extract information about what curtin wrote and compare that
-        with what was actually configured (which we capture via ifconfig)
+                    # v6
+                    config_inet_iface = ipaddress.IPv6Interface(config_ipstr)
+                    ip_func = ipaddress.IPv6Interface
+                    addresses = ipcfg.get('inet6', [])
+                else:
+                    # v4
+                    config_inet_iface = ipaddress.IPv4Interface(config_ipstr)
+                    ip_func = ipaddress.IPv4Interface
+                    addresses = ipcfg.get('inet4', [])
+
+                # find a matching
+                print('found addresses: %s' % addresses)
+                for ip in addresses:
+                    print('cur ip=%s\nsubnet=%s' % (ip, subnet))
+                    # drop /CIDR if present for matching
+                    if (ip['address'].split("/")[0] ==
+                       subnet['address'].split("/")[0]):
+                        print('found a match!')
+                        found_ipstr = ip['address']
+                        if ('netmask' in subnet or '/' in subnet['address']):
+                            found_ipstr += "/%s" % ip.get('prefixlen')
+                        found_inet_iface = ip_func(found_ipstr)
+                        print('returning inet iface')
+                        break
+
+                # check ipaddress interface matches (config vs. found)
+                self.assertIsNotNone(config_inet_iface)
+                self.assertIsNotNone(found_inet_iface)
+                self.assertEqual(config_inet_iface, found_inet_iface)
+
+            def __find_gw_config(subnet):
+                gateways = []
+                if 'gateway' in subnet:
+                    gateways.append(subnet.get('gateway'))
+                for route in subnet.get('routes', []):
+                    gateways += __find_gw_config(route)
+                return gateways
+
+            # handle gateways by looking at routing table
+            configured_gws = __find_gw_config(subnet)
+            print('iface:%s configured_gws: %s' % (ifname, configured_gws))
+            for gw_ip in configured_gws:
+                logger.debug('found a gateway in subnet config: %s', gw_ip)
+                if ":" in gw_ip:
+                    route_d = routes['6']
+                else:
+                    route_d = routes['4']
+
+                found_gws = [line for line in route_d.split('\n')
+                             if 'UG' in line and gw_ip in line]
+                logger.debug('found gateways in guest output:\n%s', found_gws)
+
+                print('found_gws: %s\nexpected: %s' % (found_gws,
+                                                       configured_gws))
+                self.assertEqual(len(found_gws), len(configured_gws))
+                for fgw in found_gws:
+                    if ":" in gw_ip:
+                        (dest, gw, flags, metric, ref, use, iface) = \
+                            fgw.split()
+                    else:
+                        (dest, gw, genmask, flags, metric, ref, use, iface) = \
+                            fgw.split()
+                    logger.debug('configured gw:%s found gw:%s', gw_ip, gw)
+                    self.assertEqual(gw_ip, gw)
+
+
+class TestNetworkBasicAbs(TestNetworkBaseTestsAbs):
+    """ Basic network testing with ipv4
     """
-
-    conf_file = "examples/tests/network_source.yaml"
-    collect_scripts = [textwrap.dedent("""
-        cd OUTPUT_COLLECT_D
-        ifconfig -a > ifconfig_a
-        cp -av /etc/network/interfaces .
-        cp -a /etc/network/interfaces.d .
-        find /etc/network/interfaces.d > find_interfacesd
-        cp /etc/resolv.conf .
-        cp -av /etc/udev/rules.d/70-persistent-net.rules .
-        ip -o route show > ip_route_show
-        route -n > route_n
-        """)]
-
-    def test_source_cfg_exists(self):
-        """Test that our curthooks wrote our injected config."""
-        self.output_files_exist(["interfaces.d/interface2.cfg"])
-
-    def test_etc_network_interfaces_source_cfg(self):
-        """ Compare injected configuration as parsed by curtin matches
-            how ifup configured the interface."""
-        # interfaces uses absolute paths, fix for test-case
-        interfaces = os.path.join(self.td.collect, "interfaces")
-        cmd = ['sed', '-i.orig', '-e', 's,/etc/network/,,g',
-               '{}'.format(interfaces)]
-        subprocess.check_call(cmd, stderr=subprocess.STDOUT)
-
-        curtin_ifaces = self.parse_deb_config(interfaces)
-        logger.debug('parsed eni dict:\n{}'.format(
-            yaml.dump(curtin_ifaces, default_flow_style=False, indent=4)))
-        print('parsed eni dict:\n{}'.format(
-            yaml.dump(curtin_ifaces, default_flow_style=False, indent=4)))
-
-        with open(os.path.join(self.td.collect, "ifconfig_a")) as fp:
-            ifconfig_a = fp.read()
-            logger.debug('ifconfig -a:\n{}'.format(ifconfig_a))
-
-        ifconfig_dict = helpers.ifconfig_to_dict(ifconfig_a)
-        logger.debug('parsed ifconfig dict:\n{}'.format(
-            yaml.dump(ifconfig_dict, default_flow_style=False, indent=4)))
-        print('parsed ifconfig dict:\n{}'.format(
-            yaml.dump(ifconfig_dict, default_flow_style=False, indent=4)))
-
-        iface = 'interface2'
-        self.assertTrue(iface in curtin_ifaces)
-
-        expected_address = curtin_ifaces[iface].get('address', None)
-        self.assertIsNotNone(expected_address)
-
-        # handle CIDR notation
-        def _nocidr(addr):
-            return addr.split("/")[0]
-        actual_address = ifconfig_dict[iface].get('address', "")
-        self.assertEqual(_nocidr(expected_address), _nocidr(actual_address))
-
-
-class PreciseHWETTestNetwork(relbase.precise_hwe_t, TestNetworkAbs):
-    # FIXME: off due to hang at test: Starting execute cloud user/final scripts
-    __test__ = False
-
-
-class PreciseHWETTestNetworkStatic(relbase.precise_hwe_t,
-                                   TestNetworkStaticAbs):
-    # FIXME: off due to hang at test: Starting execute cloud user/final scripts
-    __test__ = False
-
-
-class TrustyTestNetwork(relbase.trusty, TestNetworkAbs):
-    __test__ = True
-
-
-class TrustyTestNetworkStatic(relbase.trusty, TestNetworkStaticAbs):
-    __test__ = True
-
-
-class TrustyHWEUTestNetwork(relbase.trusty_hwe_u, TrustyTestNetwork):
-    # Working, off by default to safe test suite runtime, covered by bonding
-    __test__ = False
-
-
-class TrustyHWEUTestNetworkStatic(relbase.trusty_hwe_u,
-                                  TestNetworkStaticAbs):
-    # Working, off by default to safe test suite runtime, covered by bonding
-    __test__ = False
-
-
-class TrustyHWEVTestNetwork(relbase.trusty_hwe_v, TrustyTestNetwork):
-    # Working, off by default to safe test suite runtime, covered by bonding
-    __test__ = False
-
-
-class TrustyHWEVTestNetworkStatic(relbase.trusty_hwe_v,
-                                  TestNetworkStaticAbs):
-    # Working, off by default to safe test suite runtime, covered by bonding
-    __test__ = False
-
-
-class TrustyHWEWTestNetwork(relbase.trusty_hwe_w, TrustyTestNetwork):
-    # Working, off by default to safe test suite runtime, covered by bonding
-    __test__ = False
-
-
-class TrustyHWEWTestNetworkStatic(relbase.trusty_hwe_w,
-                                  TestNetworkStaticAbs):
-    # Working, off by default to safe test suite runtime, covered by bonding
-    __test__ = False
-
-
-class WilyTestNetwork(relbase.wily, TestNetworkAbs):
-    __test__ = True
-
-
-class WilyTestNetworkStatic(relbase.wily, TestNetworkStaticAbs):
-    __test__ = True
-
-
-class XenialTestNetwork(relbase.xenial, TestNetworkAbs):
-    __test__ = True
-
-
-class XenialTestNetworkStatic(relbase.xenial, TestNetworkStaticAbs):
-    __test__ = True
-
-
-class YakketyTestNetwork(relbase.yakkety, TestNetworkAbs):
-    __test__ = True
-
-
-class YakketyTestNetworkStatic(relbase.yakkety, TestNetworkStaticAbs):
-    __test__ = True
-
-
-class PreciseTestNetworkVlan(relbase.precise, TestNetworkVlanAbs):
-    __test__ = True
-
-    # precise ip -d link show output is different (of course)
-    def test_vlan_enabled(self):
-
-        # we must have at least one
-        self.assertGreaterEqual(len(self.get_vlans()), 1)
-
-        # did they get configured?
-        for vlan in self.get_vlans():
-            link_file = "ip_link_show_" + vlan['name']
-            vlan_msg = "vlan id " + str(vlan['vlan_id'])
-            self.check_file_regex(link_file, vlan_msg)
-
-
-class TrustyTestNetworkVlan(relbase.trusty, TestNetworkVlanAbs):
-    __test__ = True
-
-
-class WilyTestNetworkVlan(relbase.wily, TestNetworkVlanAbs):
-    __test__ = True
-
-
-class XenialTestNetworkVlan(relbase.xenial, TestNetworkVlanAbs):
-    __test__ = True
-
-
-class YakketyTestNetworkVlan(relbase.yakkety, TestNetworkVlanAbs):
-    __test__ = True
-
-
-class PreciseTestNetworkENISource(relbase.precise, TestNetworkENISource):
-    __test__ = False
-    # not working, still debugging though; possible older ifupdown doesn't
-    # like the multiple iface method.
-
-
-class TrustyTestNetworkENISource(relbase.trusty, TestNetworkENISource):
-    __test__ = True
-
-
-class WilyTestNetworkENISource(relbase.wily, TestNetworkENISource):
-    __test__ = True
-
-
-class XenialTestNetworkENISource(relbase.xenial, TestNetworkENISource):
-    __test__ = True
-
-
-class YakketyTestNetworkENISource(relbase.yakkety, TestNetworkENISource):
+    conf_file = "examples/tests/basic_network.yaml"
+
+
+class PreciseHWETTestNetworkBasic(relbase.precise_hwe_t, TestNetworkBasicAbs):
+    # FIXME: off due to hang at test: Starting execute cloud user/final scripts
+    __test__ = False
+
+
+class TrustyTestNetworkBasic(relbase.trusty, TestNetworkBasicAbs):
+    __test__ = True
+
+
+class TrustyHWEUTestNetworkBasic(relbase.trusty_hwe_u, TrustyTestNetworkBasic):
+    # Working, off by default to safe test suite runtime, covered by bonding
+    __test__ = False
+
+
+class TrustyHWEVTestNetworkBasic(relbase.trusty_hwe_v, TrustyTestNetworkBasic):
+    # Working, off by default to safe test suite runtime, covered by bonding
+    __test__ = False
+
+
+class TrustyHWEWTestNetworkBasic(relbase.trusty_hwe_w, TrustyTestNetworkBasic):
+    # Working, off by default to safe test suite runtime, covered by bonding
+    __test__ = False
+
+
+class XenialTestNetworkBasic(relbase.xenial, TestNetworkBasicAbs):
+    __test__ = True
+
+
+class YakketyTestNetworkBasic(relbase.yakkety, TestNetworkBasicAbs):
     __test__ = True

=== added file 'tests/vmtests/test_network_alias.py'
--- tests/vmtests/test_network_alias.py	1970-01-01 00:00:00 +0000
+++ tests/vmtests/test_network_alias.py	2016-10-03 18:55:20 +0000
@@ -0,0 +1,40 @@
+from .releases import base_vm_classes as relbase
+from .test_network import TestNetworkBaseTestsAbs
+
+
+class TestNetworkAliasAbs(TestNetworkBaseTestsAbs):
+    """ Multi-ip address network testing
+    """
+    conf_file = "examples/tests/network_alias.yaml"
+
+
+class PreciseHWETTestNetworkAlias(relbase.precise_hwe_t, TestNetworkAliasAbs):
+    # FIXME: off due to hang at test: Starting execute cloud user/final scripts
+    __test__ = True
+
+
+class TrustyTestNetworkAlias(relbase.trusty, TestNetworkAliasAbs):
+    __test__ = True
+
+
+class TrustyHWEUTestNetworkAlias(relbase.trusty_hwe_u, TrustyTestNetworkAlias):
+    # Working, off by default to safe test suite runtime, covered by bonding
+    __test__ = False
+
+
+class TrustyHWEVTestNetworkAlias(relbase.trusty_hwe_v, TrustyTestNetworkAlias):
+    # Working, off by default to safe test suite runtime, covered by bonding
+    __test__ = False
+
+
+class TrustyHWEWTestNetworkAlias(relbase.trusty_hwe_w, TrustyTestNetworkAlias):
+    # Working, off by default to safe test suite runtime, covered by bonding
+    __test__ = False
+
+
+class XenialTestNetworkAlias(relbase.xenial, TestNetworkAliasAbs):
+    __test__ = True
+
+
+class YakketyTestNetworkAlias(relbase.yakkety, TestNetworkAliasAbs):
+    __test__ = True

=== added file 'tests/vmtests/test_network_bonding.py'
--- tests/vmtests/test_network_bonding.py	1970-01-01 00:00:00 +0000
+++ tests/vmtests/test_network_bonding.py	2016-10-03 18:55:20 +0000
@@ -0,0 +1,63 @@
+from . import logger
+from .releases import base_vm_classes as relbase
+from .test_network import TestNetworkBaseTestsAbs
+
+import textwrap
+
+
+class TestNetworkBondingAbs(TestNetworkBaseTestsAbs):
+    conf_file = "examples/tests/bonding_network.yaml"
+    collect_scripts = TestNetworkBaseTestsAbs.collect_scripts + [
+        textwrap.dedent("""
+        cd OUTPUT_COLLECT_D
+        dpkg-query -W -f '${Status}' ifenslave > ifenslave_installed
+        """)]
+
+    def test_output_files_exist_ifenslave(self):
+        self.output_files_exist(["ifenslave_installed"])
+
+    def test_ifenslave_installed(self):
+        status = self.load_collect_file("ifenslave_installed")
+        logger.debug('ifenslave installed: {}'.format(status))
+        self.assertEqual('install ok installed', status)
+
+
+class PreciseHWETTestBonding(relbase.precise_hwe_t, TestNetworkBondingAbs):
+    __test__ = True
+    # package names on precise are different, need to check on ifenslave-2.6
+    collect_scripts = TestNetworkBondingAbs.collect_scripts + [
+        textwrap.dedent("""
+        cd OUTPUT_COLLECT_D
+        dpkg-query -W -f '${Status}' ifenslave-2.6 > ifenslave_installed
+        """)]
+
+
+class TrustyTestBonding(relbase.trusty, TestNetworkBondingAbs):
+    __test__ = False
+
+
+class TrustyHWEUTestBonding(relbase.trusty_hwe_u, TrustyTestBonding):
+    __test__ = True
+
+
+class TrustyHWEVTestBonding(relbase.trusty_hwe_v, TrustyTestBonding):
+    # Working, but off by default to safe test suite runtime
+    # oldest/newest HWE-* covered above/below
+    __test__ = False
+
+
+class TrustyHWEWTestBonding(relbase.trusty_hwe_w, TrustyTestBonding):
+    __test__ = True
+
+
+class WilyTestBonding(relbase.wily, TestNetworkBondingAbs):
+    # EOL - 2016-07-28
+    __test__ = False
+
+
+class XenialTestBonding(relbase.xenial, TestNetworkBondingAbs):
+    __test__ = True
+
+
+class YakketyTestBonding(relbase.yakkety, TestNetworkBondingAbs):
+    __test__ = True

=== added file 'tests/vmtests/test_network_enisource.py'
--- tests/vmtests/test_network_enisource.py	1970-01-01 00:00:00 +0000
+++ tests/vmtests/test_network_enisource.py	2016-10-03 18:55:20 +0000
@@ -0,0 +1,91 @@
+from . import logger, helpers
+from .releases import base_vm_classes as relbase
+from .test_network import TestNetworkBaseTestsAbs
+
+import os
+import subprocess
+import yaml
+
+
+class TestNetworkENISource(TestNetworkBaseTestsAbs):
+    """ Curtin now emits a source /etc/network/interfaces.d/*.cfg
+        line.  This test exercises this feature by emitting additional
+        network configuration in /etc/network/interfaces.d/interface2.cfg
+
+        This relies on the network_config.yaml of the TestClass to
+        define a spare nic with no configuration.  This ensures that
+        a udev rule for interface2 is emitted so we can reference the interface
+        in our injected configuration.
+
+        Note, ifupdown allows multiple stanzas with the same iface name
+        and combines the options together during ifup.  We rely on this
+        feature allowing etc/network/interfaces to have an unconfigured
+        iface interface2 inet manual line, and then defer the configuration
+        to /etc/network/interfaces.d/interface2.cfg
+
+        This testcase then uses curtin.net.deb_parse_config method to
+        extract information about what curtin wrote and compare that
+        with what was actually configured (which we capture via ifconfig)
+    """
+
+    conf_file = "examples/tests/network_source.yaml"
+
+    def test_source_cfg_exists(self):
+        """Test that our curthooks wrote our injected config."""
+        self.output_files_exist(["interfaces.d/interface2.cfg"])
+
+    def test_etc_network_interfaces_source_cfg(self):
+        """ Compare injected configuration as parsed by curtin matches
+            how ifup configured the interface."""
+        # interfaces uses absolute paths, fix for test-case
+        interfaces = os.path.join(self.td.collect, "interfaces")
+        cmd = ['sed', '-i.orig', '-e', 's,/etc/network/,,g',
+               '{}'.format(interfaces)]
+        subprocess.check_call(cmd, stderr=subprocess.STDOUT)
+
+        curtin_ifaces = self.parse_deb_config(interfaces)
+        logger.debug('parsed eni dict:\n{}'.format(
+            yaml.dump(curtin_ifaces, default_flow_style=False, indent=4)))
+        print('parsed eni dict:\n{}'.format(
+            yaml.dump(curtin_ifaces, default_flow_style=False, indent=4)))
+
+        ip_a = self.load_collect_file("ip_a")
+        logger.debug('ip a:\n{}'.format(ip_a))
+
+        ip_a_dict = helpers.ip_a_to_dict(ip_a)
+        logger.debug('parsed ip_a dict:\n{}'.format(
+            yaml.dump(ip_a_dict, default_flow_style=False, indent=4)))
+        print('parsed ip_a dict:\n{}'.format(
+            yaml.dump(ip_a_dict, default_flow_style=False, indent=4)))
+
+        iface = 'interface2'
+        self.assertTrue(iface in curtin_ifaces)
+
+        expected_address = curtin_ifaces[iface].get('address', None)
+        self.assertIsNotNone(expected_address)
+
+        # handle CIDR notation
+        def _nocidr(addr):
+            return addr.split("/")[0]
+
+        [actual_address] = [ip.get('address') for ip in
+                            ip_a_dict[iface].get('inet4', [])]
+        self.assertEqual(_nocidr(expected_address), _nocidr(actual_address))
+
+
+class PreciseTestNetworkENISource(relbase.precise, TestNetworkENISource):
+    __test__ = False
+    # not working, still debugging though; possible older ifupdown doesn't
+    # like the multiple iface method.
+
+
+class TrustyTestNetworkENISource(relbase.trusty, TestNetworkENISource):
+    __test__ = True
+
+
+class XenialTestNetworkENISource(relbase.xenial, TestNetworkENISource):
+    __test__ = True
+
+
+class YakketyTestNetworkENISource(relbase.yakkety, TestNetworkENISource):
+    __test__ = True

=== added file 'tests/vmtests/test_network_ipv6.py'
--- tests/vmtests/test_network_ipv6.py	1970-01-01 00:00:00 +0000
+++ tests/vmtests/test_network_ipv6.py	2016-10-03 18:55:20 +0000
@@ -0,0 +1,53 @@
+from .releases import base_vm_classes as relbase
+from .test_network import TestNetworkBaseTestsAbs
+
+import textwrap
+
+
+class TestNetworkIPV6Abs(TestNetworkBaseTestsAbs):
+    """ IPV6 complex testing.  The configuration exercises
+        - ipv4 and ipv6 address on same interface
+        - bonding in LACP mode
+        - unconfigured subnets on bond
+        - vlans over bonds
+        - all IP is static
+    """
+    conf_file = "examples/network-ipv6-bond-vlan.yaml"
+    collect_scripts = TestNetworkBaseTestsAbs.collect_scripts + [
+        textwrap.dedent("""
+        grep . -r /sys/class/net/bond0/ > sysfs_bond0 || :
+        grep . -r /sys/class/net/bond0.108/ > sysfs_bond0.108 || :
+        grep . -r /sys/class/net/bond0.208/ > sysfs_bond0.208 || :
+        """)]
+
+
+class PreciseHWETTestNetwork(relbase.precise_hwe_t, TestNetworkIPV6Abs):
+    # FIXME: off due to hang at test: Starting execute cloud user/final scripts
+    __test__ = False
+
+
+class TrustyTestNetworkIPV6(relbase.trusty, TestNetworkIPV6Abs):
+    __test__ = True
+
+
+class TrustyHWEUTestNetworkIPV6(relbase.trusty_hwe_u, TrustyTestNetworkIPV6):
+    # Working, off by default to safe test suite runtime, covered by bonding
+    __test__ = False
+
+
+class TrustyHWEVTestNetworkIPV6(relbase.trusty_hwe_v, TrustyTestNetworkIPV6):
+    # Working, off by default to safe test suite runtime, covered by bonding
+    __test__ = False
+
+
+class TrustyHWEWTestNetworkIPV6(relbase.trusty_hwe_w, TrustyTestNetworkIPV6):
+    # Working, off by default to safe test suite runtime, covered by bonding
+    __test__ = False
+
+
+class XenialTestNetworkIPV6(relbase.xenial, TestNetworkIPV6Abs):
+    __test__ = True
+
+
+class YakketyTestNetworkIPV6(relbase.yakkety, TestNetworkIPV6Abs):
+    __test__ = True

=== added file 'tests/vmtests/test_network_ipv6_enisource.py'
--- tests/vmtests/test_network_ipv6_enisource.py	1970-01-01 00:00:00 +0000
+++ tests/vmtests/test_network_ipv6_enisource.py	2016-10-03 18:55:20 +0000
@@ -0,0 +1,26 @@
+from .releases import base_vm_classes as relbase
+from .test_network_enisource import TestNetworkENISource
+
+
+class TestNetworkIPV6ENISource(TestNetworkENISource):
+    conf_file = "examples/tests/network_source_ipv6.yaml"
+
+
+class PreciseTestNetworkIPV6ENISource(relbase.precise,
+                                      TestNetworkIPV6ENISource):
+    __test__ = False
+    # not working, still debugging though; possible older ifupdown doesn't
+    # like the multiple iface method.
+
+
+class TrustyTestNetworkIPV6ENISource(relbase.trusty, TestNetworkIPV6ENISource):
+    __test__ = True
+
+
+class XenialTestNetworkIPV6ENISource(relbase.xenial, TestNetworkIPV6ENISource):
+    __test__ = True
+
+
+class YakketyTestNetworkIPV6ENISource(relbase.yakkety,
+                                      TestNetworkIPV6ENISource):
+    __test__ = True

=== added file 'tests/vmtests/test_network_ipv6_static.py'
--- tests/vmtests/test_network_ipv6_static.py	1970-01-01 00:00:00 +0000
+++ tests/vmtests/test_network_ipv6_static.py	2016-10-03 18:55:20 +0000
@@ -0,0 +1,42 @@
+from .releases import base_vm_classes as relbase
+from .test_network_static import TestNetworkStaticAbs
+
+
+# reuse basic network tests but with different config (static, no dhcp)
+class TestNetworkIPV6StaticAbs(TestNetworkStaticAbs):
+    conf_file = "examples/tests/basic_network_static_ipv6.yaml"
+
+
+class PreciseHWETTestNetworkIPV6Static(relbase.precise_hwe_t,
+                                       TestNetworkIPV6StaticAbs):
+    __test__ = True
+
+
+class TrustyTestNetworkIPV6Static(relbase.trusty, TestNetworkIPV6StaticAbs):
+    __test__ = True
+
+
+class TrustyHWEUTestNetworkIPV6Static(relbase.trusty_hwe_u,
+                                      TestNetworkIPV6StaticAbs):
+    # unsupported kernel, 2016-08
+    __test__ = False
+
+
+class TrustyHWEVTestNetworkIPV6Static(relbase.trusty_hwe_v,
+                                      TestNetworkIPV6StaticAbs):
+    # unsupported kernel, 2016-08
+    __test__ = False
+
+
+class TrustyHWEWTestNetworkIPV6Static(relbase.trusty_hwe_w,
+                                      TestNetworkIPV6StaticAbs):
+    # unsupported kernel, 2016-08
+    __test__ = False
+
+
+class XenialTestNetworkIPV6Static(relbase.xenial, TestNetworkIPV6StaticAbs):
+    __test__ = True
+
+
+class YakketyTestNetworkIPV6Static(relbase.yakkety, TestNetworkIPV6StaticAbs):
+    __test__ = True

=== added file 'tests/vmtests/test_network_ipv6_vlan.py'
--- tests/vmtests/test_network_ipv6_vlan.py	1970-01-01 00:00:00 +0000
+++ tests/vmtests/test_network_ipv6_vlan.py	2016-10-03 18:55:20 +0000
@@ -0,0 +1,34 @@
+from .releases import base_vm_classes as relbase
+from .test_network_vlan import TestNetworkVlanAbs
+
+
+class TestNetworkIPV6VlanAbs(TestNetworkVlanAbs):
+    conf_file = "examples/tests/vlan_network_ipv6.yaml"
+
+
+class PreciseTestNetworkIPV6Vlan(relbase.precise, TestNetworkIPV6VlanAbs):
+    __test__ = True
+
+    # precise ip -d link show output is different (of course)
+    def test_vlan_enabled(self):
+
+        # we must have at least one
+        self.assertGreaterEqual(len(self.get_vlans()), 1)
+
+        # did they get configured?
+        for vlan in self.get_vlans():
+            link_file = "ip_link_show_" + vlan['name']
+            vlan_msg = "vlan id " + str(vlan['vlan_id'])
+            self.check_file_regex(link_file, vlan_msg)
+
+
+class TrustyTestNetworkIPV6Vlan(relbase.trusty, TestNetworkIPV6VlanAbs):
+    __test__ = True
+
+
+class XenialTestNetworkIPV6Vlan(relbase.xenial, TestNetworkIPV6VlanAbs):
+    __test__ = True
+
+
+class YakketyTestNetworkIPV6Vlan(relbase.yakkety, TestNetworkIPV6VlanAbs):
+    __test__ = True

=== added file 'tests/vmtests/test_network_mtu.py'
--- tests/vmtests/test_network_mtu.py	1970-01-01 00:00:00 +0000
+++ tests/vmtests/test_network_mtu.py	2016-10-03 18:55:20 +0000
@@ -0,0 +1,155 @@
+from .releases import base_vm_classes as relbase
+from .test_network_ipv6 import TestNetworkIPV6Abs
+from curtin import util
+
+import os
+import textwrap
+
+
+class TestNetworkMtuAbs(TestNetworkIPV6Abs):
+    """ Test that the mtu of the ipv6 address is properly
+
+    1.  devices default MTU to 1500, test if mtu under
+        inet6 stanza can be set separately from device
+        mtu (works on Xenial and newer ifupdown), check
+        via sysctl.
+
+    2.  if ipv6 mtu is > than underlying device, this fails
+        and is unnoticed, ifupdown/hook should fix by changing
+        mtu of underlying device to the same size as the ipv6
+        mtu
+
+    3.  order of the v4 vs. v6 stanzas could affect final mtu
+        ipv6 first, then ipv4 with mtu.
+    """
+    conf_file = "examples/tests/network_mtu.yaml"
+    collect_scripts = TestNetworkIPV6Abs.collect_scripts + [textwrap.dedent("""
+        cd OUTPUT_COLLECT_D
+        proc_v6="/proc/sys/net/ipv6/conf"
+        for f in `seq 0 7`; do
+            cat /sys/class/net/interface${f}/mtu > interface${f}_dev_mtu;
+            cat $proc_v6/interface${f}/mtu > interface${f}_ipv6_mtu;
+        done
+        if [ -e /var/log/upstart ]; then
+          cp -a /var/log/upstart ./var_log_upstart
+        fi
+        """)]
+
+    def _load_mtu_data(self, ifname):
+        """ load mtu related files by interface name.
+            returns a dictionary with the follwing
+            keys:  'device', and 'ipv6'.  """
+
+        mtu_fn = {
+            'device': "%s_dev_mtu" % ifname,
+            'ipv6': "%s_ipv6_mtu" % ifname,
+        }
+        mtu_val = {}
+        for fnk in mtu_fn.keys():
+            fn = os.path.join(self.td.collect, mtu_fn[fnk])
+            mtu_val.update({fnk: int(util.load_file(fn))})
+
+        return mtu_val
+
+    def _check_subnet_mtu(self, subnet, iface):
+        mtu_data = self._load_mtu_data(iface['name'])
+        print('subnet:%s' % subnet)
+        print('mtu_data:%s' % mtu_data)
+        # ipv4 address mtu changes *device* mtu
+        if '.' in subnet['address']:
+            print('subnet_mtu=%s device_mtu=%s' % (int(subnet['mtu']),
+                                                   int(mtu_data['device'])))
+            self.assertEqual(int(subnet['mtu']),
+                             int(mtu_data['device']))
+        # ipv6 address mtu changes *protocol* mtu
+        elif ':' in subnet['address']:
+            print('subnet_mtu=%s ipv6_mtu=%s' % (int(subnet['mtu']),
+                                                 int(mtu_data['device'])))
+            self.assertEqual(int(subnet['mtu']),
+                             int(mtu_data['ipv6']))
+
+    def _check_iface_subnets(self, ifname):
+        network_state = self.get_network_state()
+        interfaces = network_state.get('interfaces')
+
+        iface = interfaces.get(ifname)
+        subnets = iface.get('subnets')
+        print('iface=%s subnets=%s' % (iface['name'], subnets))
+        for subnet in subnets:
+            if 'mtu' in subnet:
+                self._check_subnet_mtu(subnet, iface)
+
+    def _disabled_ipv4_and_ipv6_mtu_all(self):
+        """ we don't pass all tests, skip for now """
+        network_state = self.get_network_state()
+        interfaces = network_state.get('interfaces')
+
+        for iface in interfaces.values():
+            subnets = iface.get('subnets', {})
+            if subnets:
+                for index, subnet in zip(range(0, len(subnets)), subnets):
+                    print("iface=%s subnet=%s" % (iface['name'], subnet))
+                    if 'mtu' in subnet:
+                        self._check_subnet_mtu(subnet, iface)
+
+    def test_ipv6_mtu_smaller_than_ipv4_non_default(self):
+        self._check_iface_subnets('interface0')
+
+    def test_ipv6_mtu_equal_ipv4_non_default(self):
+        self._check_iface_subnets('interface1')
+
+    def test_ipv6_mtu_higher_than_default_no_ipv4_mtu(self):
+        self._check_iface_subnets('interface2')
+
+    def test_ipv6_mtu_higher_than_default_no_ipv4_iface_up(self):
+        self._check_iface_subnets('interface3')
+
+    def test_ipv6_mtu_smaller_than_ipv4_v6_iface_first(self):
+        self._check_iface_subnets('interface4')
+
+    def test_ipv6_mtu_equal_ipv4_non_default_v6_iface_first(self):
+        self._check_iface_subnets('interface5')
+
+    def test_ipv6_mtu_higher_than_default_no_ipv4_mtu_v6_iface_first(self):
+        self._check_iface_subnets('interface6')
+
+    def test_ipv6_mtu_higher_than_default_no_ipv4_iface_v6_iface_first(self):
+        self._check_iface_subnets('interface7')
+
+
+class PreciseHWETTestNetworkMtu(relbase.precise_hwe_t, TestNetworkMtuAbs):
+    # FIXME: Precise mtu / ipv6 is buggy
+    __test__ = False
+
+
+class TrustyTestNetworkMtu(relbase.trusty, TestNetworkMtuAbs):
+    __test__ = True
+
+    # FIXME: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=809714
+    # fixed in newer ifupdown than is in trusty
+    def test_ipv6_mtu_smaller_than_ipv4_non_default(self):
+        # trusty ifupdown uses device mtu to change v6 mtu
+        pass
+
+
+class TrustyHWEUTestNetworkMtu(relbase.trusty_hwe_u, TrustyTestNetworkMtu):
+    # unsupported kernel, 2016-08
+    __test__ = False
+
+
+class TrustyHWEVTestNetworkMtu(relbase.trusty_hwe_v, TrustyTestNetworkMtu):
+    # unsupported kernel, 2016-08
+    __test__ = False
+
+
+class TrustyHWEWTestNetworkMtu(relbase.trusty_hwe_w, TrustyTestNetworkMtu):
+    # unsupported kernel, 2016-08
+    __test__ = False
+
+
+class XenialTestNetworkMtu(relbase.xenial, TestNetworkMtuAbs):
+    __test__ = True
+
+
+class YakketyTestNetworkMtu(relbase.yakkety, TestNetworkMtuAbs):
+    __test__ = True

=== added file 'tests/vmtests/test_network_static.py'
--- tests/vmtests/test_network_static.py	1970-01-01 00:00:00 +0000
+++ tests/vmtests/test_network_static.py	2016-10-03 18:55:20 +0000
@@ -0,0 +1,44 @@
+from .releases import base_vm_classes as relbase
+from .test_network import TestNetworkBaseTestsAbs
+
+
+class TestNetworkStaticAbs(TestNetworkBaseTestsAbs):
+    """ Static network testing with ipv4
+    """
+    conf_file = "examples/tests/basic_network_static.yaml"
+
+
+class PreciseHWETTestNetworkStatic(relbase.precise_hwe_t,
+                                   TestNetworkStaticAbs):
+    # FIXME: off due to hang at test: Starting execute cloud user/final scripts
+    __test__ = False
+
+
+class TrustyTestNetworkStatic(relbase.trusty, TestNetworkStaticAbs):
+    __test__ = True
+
+
+class TrustyHWEUTestNetworkStatic(relbase.trusty_hwe_u,
+                                  TrustyTestNetworkStatic):
+    # Working, off by default to safe test suite runtime, covered by bonding
+    __test__ = False
+
+
+class TrustyHWEVTestNetworkStatic(relbase.trusty_hwe_v,
+                                  TrustyTestNetworkStatic):
+    # Working, off by default to safe test suite runtime, covered by bonding
+    __test__ = False
+
+
+class TrustyHWEWTestNetworkStatic(relbase.trusty_hwe_w,
+                                  TrustyTestNetworkStatic):
+    # Working, off by default to safe test suite runtime, covered by bonding
+    __test__ = False
+
+
+class XenialTestNetworkStatic(relbase.xenial, TestNetworkStaticAbs):
+    __test__ = True
+
+
+class YakketyTestNetworkStatic(relbase.yakkety, TestNetworkStaticAbs):
+    __test__ = True

=== added file 'tests/vmtests/test_network_vlan.py'
--- tests/vmtests/test_network_vlan.py	1970-01-01 00:00:00 +0000
+++ tests/vmtests/test_network_vlan.py	2016-10-03 18:55:20 +0000
@@ -0,0 +1,77 @@
+from . import logger
+from .releases import base_vm_classes as relbase
+from .test_network import TestNetworkBaseTestsAbs
+
+import textwrap
+import yaml
+
+
+class TestNetworkVlanAbs(TestNetworkBaseTestsAbs):
+    conf_file = "examples/tests/vlan_network.yaml"
+    collect_scripts = TestNetworkBaseTestsAbs.collect_scripts + [
+        textwrap.dedent("""
+             cd OUTPUT_COLLECT_D
+             dpkg-query -W -f '${Status}' vlan > vlan_installed
+             ip -d link show interface1.2667 > ip_link_show_interface1.2667
+             ip -d link show interface1.2668 > ip_link_show_interface1.2668
+             ip -d link show interface1.2669 > ip_link_show_interface1.2669
+             ip -d link show interface1.2670 > ip_link_show_interface1.2670
+             """)]
+
+    def get_vlans(self):
+        network_state = self.get_network_state()
+        logger.debug('get_vlans ns:\n%s', yaml.dump(network_state,
+                                                    default_flow_style=False,
+                                                    indent=4))
+        interfaces = network_state.get('interfaces')
+        return [iface for iface in interfaces.values()
+                if iface['type'] == 'vlan']
+
+    def test_output_files_exist_vlan(self):
+        link_files = ["ip_link_show_%s" % vlan['name']
+                      for vlan in self.get_vlans()]
+        self.output_files_exist(["vlan_installed"] + link_files)
+
+    def test_vlan_installed(self):
+        status = self.load_collect_file("vlan_installed").strip()
+        logger.debug('vlan installed?: %s', status)
+        self.assertEqual('install ok installed', status)
+
+    def test_vlan_enabled(self):
+
+        # we must have at least one
+        self.assertGreaterEqual(len(self.get_vlans()), 1)
+
+        # did they get configured?
+        for vlan in self.get_vlans():
+            link_file = "ip_link_show_" + vlan['name']
+            vlan_msg = "vlan protocol 802.1Q id " + str(vlan['vlan_id'])
+            self.check_file_regex(link_file, vlan_msg)
+
+
+class PreciseTestNetworkVlan(relbase.precise, TestNetworkVlanAbs):
+    __test__ = True
+
+    # precise ip -d link show output is different (of course)
+    def test_vlan_enabled(self):
+
+        # we must have at least one
+        self.assertGreaterEqual(len(self.get_vlans()), 1)
+
+        # did they get configured?
+        for vlan in self.get_vlans():
+            link_file = "ip_link_show_" + vlan['name']
+            vlan_msg = "vlan id " + str(vlan['vlan_id'])
+            self.check_file_regex(link_file, vlan_msg)
+
+
+class TrustyTestNetworkVlan(relbase.trusty, TestNetworkVlanAbs):
+    __test__ = True
+
+
+class XenialTestNetworkVlan(relbase.xenial, TestNetworkVlanAbs):
+    __test__ = True
+
+
+class YakketyTestNetworkVlan(relbase.yakkety, TestNetworkVlanAbs):
+    __test__ = True

=== modified file 'tests/vmtests/test_nvme.py'
--- tests/vmtests/test_nvme.py	2016-10-03 18:00:41 +0000
+++ tests/vmtests/test_nvme.py	2016-10-03 18:55:20 +0000
@@ -11,8 +11,6 @@
     ]
     interactive = False
     conf_file = "examples/tests/nvme.yaml"
-    install_timeout = 600
-    boot_timeout = 120
     extra_disks = []
     nvme_disks = ['4G', '4G']
     disk_to_check = [('main_disk', 1), ('main_disk', 2), ('main_disk', 15),
@@ -75,7 +73,8 @@
 
 
 class WilyTestNvme(relbase.wily, TestNvmeAbs):
-    __test__ = True
+    # EOL - 2016-07-28
+    __test__ = False
 
 
 class XenialTestNvme(relbase.xenial, TestNvmeAbs):

=== added file 'tests/vmtests/test_old_apt_features.py'
--- tests/vmtests/test_old_apt_features.py	1970-01-01 00:00:00 +0000
+++ tests/vmtests/test_old_apt_features.py	2016-10-03 18:55:20 +0000
@@ -0,0 +1,89 @@
+""" testold_apt_features
+    Testing the former minimal apt features of curtin
+"""
+import re
+import textwrap
+
+from . import VMBaseClass
+from .releases import base_vm_classes as relbase
+
+from curtin import util
+
+
+class TestOldAptAbs(VMBaseClass):
+    """TestOldAptAbs - Basic tests for old apt features of curtin"""
+    interactive = False
+    extra_disks = []
+    fstab_expected = {}
+    disk_to_check = []
+    collect_scripts = [textwrap.dedent("""
+        cd OUTPUT_COLLECT_D
+        cat /etc/fstab > fstab
+        ls /dev/disk/by-dname > ls_dname
+        find /etc/network/interfaces.d > find_interfacesd
+        grep -A 3 "Name: debconf/priority" /var/cache/debconf/config.dat > debc
+        apt-config dump > aptconf
+        cp /etc/apt/apt.conf.d/90curtin-aptproxy .
+        cp /etc/apt/sources.list .
+        cp /etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg .
+        cp /etc/cloud/cloud.cfg.d/90_dpkg.cfg .
+        """)]
+    arch = util.get_architecture()
+    if arch in ['amd64', 'i386']:
+        conf_file = "examples/tests/test_old_apt_features.yaml"
+        exp_mirror = "http://us.archive.ubuntu.com/ubuntu"
+        exp_secmirror = "http://archive.ubuntu.com/ubuntu"
+    if arch in ['s390x', 'arm64', 'armhf', 'powerpc', 'ppc64el']:
+        conf_file = "examples/tests/test_old_apt_features_ports.yaml"
+        exp_mirror = "http://ports.ubuntu.com/ubuntu-ports"
+        exp_secmirror = "http://ports.ubuntu.com/ubuntu-ports"
+
+    def test_output_files_exist(self):
+        """test_output_files_exist - Check if all output files exist"""
+        self.output_files_exist(
+            ["debc", "aptconf", "sources.list", "90curtin-aptproxy",
+             "curtin-preserve-sources.cfg", "90_dpkg.cfg"])
+
+    def test_preserve_source(self):
+        """test_preserve_source - no clobbering sources.list by cloud-init"""
+        self.check_file_regex("curtin-preserve-sources.cfg",
+                              "apt_preserve_sources_list.*true")
+
+    def test_debconf(self):
+        """test_debconf - Check if debconf is in place"""
+        self.check_file_strippedline("debc", "Value: low")
+
+    def test_aptconf(self):
+        """test_aptconf - Check if apt conf for proxy is in place"""
+        # this gets configured by tools/launch and get_apt_proxy in
+        # tests/vmtests/__init__.py, so compare with those
+        rproxy = r"Acquire::http::Proxy \"" + re.escape(self.proxy) + r"\";"
+        self.check_file_regex("aptconf", rproxy)
+        self.check_file_regex("90curtin-aptproxy", rproxy)
+
+    def test_mirrors(self):
+        """test_mirrors - Check for mirrors placed in source.list"""
+
+        self.check_file_strippedline("sources.list",
+                                     "deb %s %s" %
+                                     (self.exp_mirror, self.release) +
+                                     " main restricted universe multiverse")
+        self.check_file_strippedline("sources.list",
+                                     "deb %s %s-security" %
+                                     (self.exp_secmirror, self.release) +
+                                     " main restricted universe multiverse")
+
+    def test_cloudinit_seeded(self):
+        content = self.load_collect_file("90_dpkg.cfg")
+        # not the greatest test, but we seeded NoCloud as the only datasource
+        # in examples/tests/test_old_apt_features.yaml.  Just verify that
+        # there are no others there.
+        self.assertIn("nocloud", content.lower())
+        self.assertNotIn("maas", content.lower())
+
+
+class XenialTestOldApt(relbase.xenial, TestOldAptAbs):
+    """ XenialTestOldApt
+       Old apt features for Xenial
+    """
+    __test__ = True

=== modified file 'tests/vmtests/test_raid5_bcache.py'
--- tests/vmtests/test_raid5_bcache.py	2016-10-03 18:00:41 +0000
+++ tests/vmtests/test_raid5_bcache.py	2016-10-03 18:55:20 +0000
@@ -2,7 +2,6 @@
 from .releases import base_vm_classes as relbase
 
 import textwrap
-import os
 
 
 class TestMdadmAbs(VMBaseClass):
@@ -55,14 +54,12 @@
 
     def test_bcache_status(self):
         bcache_cset_uuid = None
-        fname = os.path.join(self.td.collect, "bcache_super_vda2")
-        with open(fname, "r") as fp:
-            for line in fp.read().splitlines():
-                if line != "" and line.split()[0] == "cset.uuid":
-                    bcache_cset_uuid = line.split()[-1].rstrip()
+        for line in self.load_collect_file("bcache_super_vda2").splitlines():
+            if line != "" and line.split()[0] == "cset.uuid":
+                bcache_cset_uuid = line.split()[-1].rstrip()
         self.assertIsNotNone(bcache_cset_uuid)
-        with open(os.path.join(self.td.collect, "bcache_ls"), "r") as fp:
-            self.assertTrue(bcache_cset_uuid in fp.read().splitlines())
+        self.assertTrue(bcache_cset_uuid in
+                        self.load_collect_file("bcache_ls").splitlines())
 
     def test_bcache_cachemode(self):
         self.check_file_regex("bcache_cache_mode", r"\[writeback\]")

=== modified file 'tests/vmtests/test_uefi_basic.py'
--- tests/vmtests/test_uefi_basic.py	2016-10-03 18:00:41 +0000
+++ tests/vmtests/test_uefi_basic.py	2016-10-03 18:55:20 +0000
@@ -2,7 +2,6 @@
 
 from .releases import base_vm_classes as relbase
 
-import os
 import textwrap
 
 
@@ -12,7 +11,7 @@
     conf_file = "examples/tests/uefi_basic.yaml"
     extra_disks = []
     uefi = True
-    disk_to_check = [('main_disk', 1), ('main_disk', 2)]
+    disk_to_check = [('main_disk', 1), ('main_disk', 2), ('main_disk', 3)]
     collect_scripts = [textwrap.dedent("""
         cd OUTPUT_COLLECT_D
         blkid -o export /dev/vda > blkid_output_vda
@@ -40,7 +39,7 @@
              "proc_partitions"])
 
     def test_sys_firmware_efi(self):
-        sys_efi_expected = [
+        sys_efi_possible = [
             'config_table',
             'efivars',
             'fw_platform_size',
@@ -50,22 +49,20 @@
             'systab',
             'vars',
         ]
-        sys_efi = self.td.collect + "ls_sys_firmware_efi"
-        if (os.path.exists(sys_efi)):
-            with open(sys_efi) as fp:
-                efi_lines = fp.read().strip().split('\n')
-                self.assertEqual(sorted(sys_efi_expected),
-                                 sorted(efi_lines))
+        efi_lines = self.load_collect_file(
+            "ls_sys_firmware_efi").strip().split('\n')
+
+        # sys/firmware/efi contents differ based on kernel and configuration
+        for efi_line in efi_lines:
+            self.assertIn(efi_line, sys_efi_possible)
 
     def test_disk_block_sizes(self):
         """ Test disk logical and physical block size are match
             the class block size.
         """
         for bs in ['lbs', 'pbs']:
-            with open(os.path.join(self.td.collect,
-                      'vda_' + bs), 'r') as fp:
-                size = int(fp.read())
-                self.assertEqual(self.disk_block_size, size)
+            size = int(self.load_collect_file('vda_' + bs))
+            self.assertEqual(self.disk_block_size, size)
 
     def test_disk_block_size_with_blockdev(self):
         """ validate maas setting
@@ -75,10 +72,8 @@
         --getbsz                  get blocksize
         """
         for syscall in ['getss', 'getpbsz']:
-            with open(os.path.join(self.td.collect,
-                      'vda_blockdev_' + syscall), 'r') as fp:
-                size = int(fp.read())
-                self.assertEqual(self.disk_block_size, size)
+            size = int(self.load_collect_file('vda_blockdev_' + syscall))
+            self.assertEqual(self.disk_block_size, size)
 
 
 class PreciseUefiTestBasic(relbase.precise, TestBasicAbs):
@@ -105,7 +100,8 @@
 
 
 class WilyUefiTestBasic(relbase.wily, TestBasicAbs):
-    __test__ = True
+    # EOL - 2016-07-28
+    __test__ = False
 
 
 class XenialUefiTestBasic(relbase.xenial, TestBasicAbs):
@@ -125,6 +121,8 @@
 
 
 class WilyUefiTestBasic4k(WilyUefiTestBasic):
+    # EOL - 2016-07-28
+    __test__ = False
     disk_block_size = 4096
 
 

=== modified file 'tools/jenkins-runner'
--- tools/jenkins-runner	2016-10-03 18:00:41 +0000
+++ tools/jenkins-runner	2016-10-03 18:55:20 +0000
@@ -1,12 +1,13 @@
 #!/bin/bash
 
-topdir=${CURTIN_VMTEST_TOPDIR:-"${WORKSPACE:-$PWD}/output"}
+topdir="${CURTIN_VMTEST_TOPDIR:-${WORKSPACE:-$PWD}/output}"
 pkeep=${CURTIN_VMTEST_KEEP_DATA_PASS:-logs,collect}
 fkeep=${CURTIN_VMTEST_KEEP_DATA_FAIL:-logs,collect}
 export CURTIN_VMTEST_KEEP_DATA_PASS=$pkeep
 export CURTIN_VMTEST_KEEP_DATA_FAIL=$fkeep
 export CURTIN_VMTEST_TOPDIR="$topdir"
-export CURTIN_VMTEST_LOG=${CURTIN_VMTEST_LOG:-"$topdir/debug.log"}
+export CURTIN_VMTEST_LOG="${CURTIN_VMTEST_LOG:-$topdir/debug.log}"
+export CURTIN_VMTEST_PARALLEL=${CURTIN_VMTEST_PARALLEL:-0}
 export IMAGE_DIR=${IMAGE_DIR:-/srv/images}
 
 fail() { echo "$@" 1>&2; exit 1; }
@@ -16,16 +17,41 @@
 fi
 mkdir -p "$topdir" || fail "failed mkdir $topdir"
 
-if [ $# -eq 0 ]; then
+start_s=$(date +%s)
+parallel=${CURTIN_VMTEST_PARALLEL}
+ntargs=( )
+while [ $# -ne 0 ]; do
+    case "$1" in
+        -p|--parallel) parallel="$2"; shift;;
+        --parallel=*) parallel=${1#*=};;
+        -p[0-9]|-p-1|-p[0-9][0-9]) parallel=${1#-p};;
+        --)
+            shift
+            break
+            ;;
+        *) ntargs[${#ntargs[@]}]="$1";;
+    esac
+    shift;
+done
+
+CURTIN_VMTEST_PARALLEL=$parallel
+
+if [ ${#ntargs[@]} -eq 0 ]; then
    set -- -vv --nologcapture tests/vmtests/
 fi
 
-start_s=$(date +%s)
 # dump CURTIN* variables just for info
 for v in ${!CURTIN_*}; do
    echo "$v=${!v}"
 done
 
+ntargs=( "${ntargs[@]}" "$@" )
+
+pargs=( )
+if [ -n "$parallel" -a "$parallel" != "0" -a "$parallel" != "1" ]; then
+    pargs=( --process-timeout=86400 "--processes=$parallel" )
+fi
+
 # avoid LOG info by running python3 tests/vmtests/image_sync.py
 # rather than python3 -m tests.vmtests.image_sync (LP: #1594465)
 echo "Working with images in $IMAGE_DIR"
@@ -34,11 +60,11 @@
     --output-format="$fmt" "$IMAGE_DIR" ftype=root-image.gz ||
     { echo "WARNING: error querying images in $IMAGE_DIR" 1>&2; }
 
-echo "$(date -R): vmtest start: nosetests3 $*"
-nosetests3 "$@"
+echo "$(date -R): vmtest start: nosetests3 ${pargs[*]} ${ntargs[*]}"
+nosetests3 "${pargs[@]}" "${ntargs[@]}"
 ret=$?
 end_s=$(date +%s)
 echo "$(date -R): vmtest end [$ret] in $(($end_s-$start_s))s"
 exit $ret
 
-# vi: ts=4 expandtab
+# vi: ts=4 expandtab syntax=sh

=== modified file 'tools/launch'
--- tools/launch	2016-10-03 18:00:41 +0000
+++ tools/launch	2016-10-03 18:55:20 +0000
@@ -34,7 +34,6 @@
                         elements. Examine qemu-kvm -device scsi-hd,? for
                         details.
            --vnc D      use -vnc D (mutually exclusive with --silent)
-           --uefi   N   enable uefi boot method, store nvram at N
       -h | --help       show this message
       -i | --initrd F   use initramfs F
       -k | --kernel F   use kernel K
@@ -52,6 +51,10 @@
            --no-install-deps  do not install insert '--install-deps'
                               on curtin command invocations
 
+    the following are passed through to xkvm:
+        --uefi-nvram
+        --bios
+
    use of --kernel/--initrd will seed cloud-init via cmdline
    rather than the local datasource
 
@@ -249,7 +252,7 @@
 
 main() {
     local short_opts="a:A:d:h:i:k:n:p:v"
-    local long_opts="add:,append:,arch:,bios:,disk:,dowait,help,initrd:,kernel:,mem:,netdev:,no-dowait,power:,publish:,root-arg:,silent,serial-log:,uefi:,verbose,vnc:"
+    local long_opts="add:,append:,arch:,bios:,disk:,dowait,help,initrd:,kernel:,mem:,netdev:,no-dowait,power:,publish:,root-arg:,silent,serial-log:,uefi-nvram:,verbose,vnc:"
     local getopt_out=""
     getopt_out=$(getopt --name "${0##*/}" \
         --options "${short_opts}" --long "${long_opts}" -- "$@") &&
@@ -262,7 +265,7 @@
     local tmp="" top_d
     local initrd="" kernel="" uappend="" iargs="" disk_args=""
     local pubs="" disks="" pstate="null"
-    local uefi="" bios="" bsize="512"
+    local bsize="512"
     local netdevs="" install_deps="--install-deps"
     local arch_hint=""
     local video="-curses -vga std" serial_log="serial.log"
@@ -290,7 +293,6 @@
                --add) addfiles[${#addfiles[@]}]="$next"; shift;;
             -a|--append) uappend="$next"; shift;;
             -A|--arch) arch_hint="$next"; shift;;
-               --bios) bios="$2"; shift;;
             -d|--disk) disks[${#disks[@]}]="$next"; shift;;
                --dowait) pt[${#pt[@]}]="$cur"; dowait=true;;
             -h|--help) Usage ; exit 0;;
@@ -312,7 +314,9 @@
                --root-arg) root_arg="$next";;
                --serial-log) serial_log="$next"; shift;;
                --silent) video="-nographic";;
-               --uefi) uefi="$2"; shift;;
+            --uefi-nvram|--bios)
+                # handle all --opt=* pass through here.
+                pt[${#pt[@]}]="$cur=$next";;
             -v|--verbose) VERBOSITY=$((${VERBOSITY}+1));;
                --vnc)
                     video="-vnc $next"
@@ -340,48 +344,6 @@
     trap cleanup EXIT
     register_signal_handlers
 
-    local bios_opts=""
-
-    bios_opts=( )
-    if [ -n "$bios" ]; then
-        bios_opts=(  -drive "if=pflash,format=raw,file=$bios" )
-    elif [ -n "$uefi" ]; then
-        case `lsb_release -sc` in
-            precise|trusty|vivid)
-                # for non-split UEFI firmware, the code and
-                # var space are in the same file.  We must
-                # make a copy so we can retain modifications.
-                local ovmf_code="/usr/share/ovmf/OVMF.fd"
-                local ovmf_var=$ovmf_code
-                ;;
-            *)
-                # anything newer than vivid has split UEFI firmware
-                local ovmf_code="/usr/share/OVMF/OVMF_CODE.fd"
-                local ovmf_var="/usr/share/OVMF/OVMF_VARS.fd"
-                ;;
-        esac
-        [ -f "$ovmf_code" ] || {
-            error "no --uefi requires ovmf bios: apt-get install ovmf"
-            return 1;
-        }
-        # user specified where to write nvram data in --uefi param
-        # pre-populate it with the OVMF_VARS.fd template
-        local nvram=${uefi}
-        cp -a "${ovmf_var}" "${nvram}" || {
-            error "failed to create OVMF nvram file: '$nvram'"
-            return 1;
-        }
-        # default to the rw copy of UEFI code
-        local uefi_opts="-drive file=$nvram,if=pflash,format=raw"
-        # if firmware is split, use readonly-code section
-        if [ "$ovmf_code" != "$ovmf_var" ]; then
-            # to ensure bootability, re-order firmware, code then variables
-            uefi_opts="-drive file=$ovmf_code,if=pflash,format=raw,readonly $uefi_opts"
-        fi
-
-        bios_opts=( $uefi_opts )
-    fi
-
     if [ "${#disks[@]}" -eq 0 ]; then
         disks=( "${TEMP_D}/disk1.img" )
     fi
@@ -624,7 +586,6 @@
     # -monitor stdio
     cmd=(
         xkvm "${pt[@]}" "${netargs[@]}" --
-        "${bios_opts[@]}"
         -m ${mem} ${serial_args} ${video}
         -drive "file=$bootimg,if=none,cache=unsafe,format=qcow2,id=boot,index=0"
         -device "virtio-blk,drive=boot"

=== modified file 'tools/xkvm'
--- tools/xkvm	2016-10-03 18:00:41 +0000
+++ tools/xkvm	2016-10-03 18:55:20 +0000
@@ -75,6 +75,10 @@
       -d | --disk  DISK.img   attach DISK.img as a disk (via virtio)
            --dry-run          only report what would be done
 
+           --uefi             boot with efi
+           --uefi-nvram=FILE  boot with efi, using nvram settings in FILE
+                              if FILE not present, copy from defaults.
+
    NETDEV:
     Above, 'NETDEV' is a comma delimited string
     The first field must be
@@ -239,9 +243,78 @@
     echo "$vline"
 }
 
+get_bios_opts() {
+    # get_bios_opts(bios, uefi, nvram)
+    # bios is a explicit bios to boot.
+    # uefi is boolean indicating uefi
+    # nvram is optional and indicates that ovmf vars should be copied
+    # to that file if it does not exist. if it exists, use it.
+    local bios="$1" uefi="${2:-false}" nvram="$3"
+    local ovmf_dir="/usr/share/OVMF"
+    local bios_opts="" pflash_common="if=pflash,format=raw"
+    unset _RET
+    _RET=( )
+    if [ -n "$bios" ]; then
+        _RET=( -drive "${pflash_common},file=$bios" )
+        return 0
+    elif ! $uefi; then
+        return 0
+    fi
+
+    # ovmf in older releases (14.04) shipped only a single file
+    #   /usr/share/ovmf/OVMF.fd
+    # newer ovmf ships split files
+    #   /usr/share/OVMF/OVMF_CODE.fd
+    #   /usr/share/OVMF/OVMF_VARS.fd
+    # with single file, pass only one file and read-write
+    # with split, pass code as readonly and vars as read-write
+    local joined="/usr/share/ovmf/OVMF.fd"
+    local code="/usr/share/OVMF/OVMF_CODE.fd"
+    local vars="/usr/share/OVMF/OVMF_VARS.fd"
+    local split="" nvram_src=""
+    if [ -e "$code" -o -e "$vars" ]; then
+        split=true
+        nvram_src="$vars"
+    elif [ -e "$joined" ]; then
+        split=false
+        nvram_src="$joined"
+    elif [ -n "$nvram" -a -e "$nvram" ]; then
+        error "WARN: nvram given, but did not find expected ovmf files."
+        error "      assuming this is code and vars (OVMF.fd)"
+        split=false
+    else
+        error "uefi support requires ovmf bios: apt-get install -qy ovmf"
+        return 1
+    fi
+
+    if [ -n "$nvram" ]; then
+        if [ ! -f "$nvram" ]; then
+            cp "$nvram_src" "$nvram" || 
+                { error "failed copy $nvram_src to $nvram"; return 1; }
+            debug 1 "copied $nvram_src to $nvram"
+        fi
+    else
+        debug 1 "uefi without --uefi-nvram storage." \
+            "nvram settings likely will not persist."
+        nvram="${nvram_src}"
+    fi
+
+    if [ ! -w "$nvram" ]; then
+        debug 1 "nvram file ${nvram} is readonly"
+        nvram_ro="readonly"
+    fi
+
+    if $split; then
+        # to ensure bootability firmware must be first, then variables
+        _RET=( -drive "${pflash_common},file=$code,readonly" )
+    fi
+    _RET=( "${_RET[@]}"
+           -drive "${pflash_common},file=$nvram${nvram_ro:+,${nvram_ro}}" )
+}
+
 main() {
     local short_opts="hd:n:v"
-    local long_opts="help,dowait,disk:,dry-run,kvm:,no-dowait,netdev:,verbose"
+    local long_opts="bios:,help,dowait,disk:,dry-run,kvm:,no-dowait,netdev:,uefi,uefi-nvram:,verbose"
     local getopt_out=""
     getopt_out=$(getopt --name "${0##*/}" \
         --options "${short_opts}" --long "${long_opts}" -- "$@") &&
@@ -252,6 +325,7 @@
     local kvm="" kvmcmd="" archopts=""
     local def_disk_driver=${DEF_DISK_DRIVER:-"virtio-blk"}
     local def_netmodel=${DEF_NETMODEL:-"virtio-net-pci"}
+    local bios="" uefi=false uefi_nvram=""
 
     archopts=( )
     kvmcmd=( )
@@ -282,6 +356,9 @@
             -v|--verbose) VERBOSITY=$((${VERBOSITY}+1));;
             --dowait) dowait=true;;
             --no-dowait) dowait=false;;
+            --bios) bios="$next"; shift;;
+            --uefi) uefi=true;;
+            --uefi-nvram) uefi=true; uefi_nvram="$next"; shift;;
             --) shift; break;;
         esac
         shift;
@@ -321,6 +398,15 @@
     KVM="$kvm"
     kvmcmd=( $kvm -enable-kvm )
 
+    local bios_opts=""
+    if [ -n "$bios" ] && $uefi; then
+        error "--uefi (or --uefi-nvram) is incompatible with --bios"
+        return 1
+    fi
+    get_bios_opts "$bios" "$uefi" "$uefi_nvram" ||
+        { error "failed to get bios opts"; return 1; }
+    bios_opts=( "${_RET[@]}" )
+
     local out="" fmt="" bus="" unit="" index="" serial="" driver="" devopts=""
     local busorindex="" driveopts="" cur="" val="" file=""
     for((i=0;i<${#diskdevs[@]};i++)); do
@@ -538,7 +624,9 @@
 
     local bus_devices
     bus_devices=( -device "$virtio_scsi_bus,id=virtio-scsi-xkvm" )
-    cmd=( "${kvmcmd[@]}" "${archopts[@]}" "${bus_devices[@]}"
+    cmd=( "${kvmcmd[@]}" "${archopts[@]}" 
+          "${bios_opts[@]}"
+          "${bus_devices[@]}"
           "${netargs[@]}"
           "${diskargs[@]}" "${pt[@]}" )
     local pcmd=$(quote_cmd "${cmd[@]}")

=== modified file 'tox.ini'
--- tox.ini	2016-05-10 16:13:29 +0000
+++ tox.ini	2016-10-03 18:55:20 +0000
@@ -1,7 +1,7 @@
 [tox]
 minversion = 1.6
 skipsdist = True
-envlist = py27, py3, py3-flake8, py3-pylint, py27-pylint, trusty-check, coverage27, coverage3
+envlist = py27, py3, py3-flake8, py3-pylint, py27-pylint, trusty-check, trusty-py27, trusty-py3
 
 [tox:jenkins]
 downloadcache = ~/cache/pip
@@ -13,11 +13,16 @@
     LC_ALL = en_US.utf-8
 deps = -r{toxinidir}/test-requirements.txt
     -r{toxinidir}/requirements.txt
-commands = {envpython} {toxinidir}/tools/noproxy nosetests {posargs} tests/unittests
+commands = {envpython} {toxinidir}/tools/noproxy nosetests \
+    {posargs:--with-coverage --cover-erase --cover-branches \
+       --cover-package=curtin --cover-inclusive tests/unittests}
 
 [testenv:py3]
 basepython = python3
 
+[testenv:py27]
+basepython = python2.7
+
 # tox uses '--pre' by default to pip install.  We don't want that, and
 # 'pip_pre=False' isn't available until tox version 1.9.
 install_command = pip install {opts} {packages}
@@ -32,7 +37,7 @@
 basepython = python3
 deps = {[testenv]deps}
     flake8
-commands = {envpython} -m flake8 {posargs:curtin tests/vmtests}
+commands = {envpython} -m flake8 {posargs:curtin tests/}
 
 [testenv:py3-pylint]
 # set basepython because tox 1.6 (trusty) does not support generated environments
@@ -49,32 +54,44 @@
     pylint==1.5.4
 commands = {envpython} -m pylint --errors-only {posargs:curtin}
 
-[testenv:coverage3]
-envdir = {toxworkdir}/py34
-commands = {envpython} {toxinidir}/tools/noproxy nosetests --with-coverage --cover-erase --cover-branches --cover-package=curtin --cover-inclusive {posargs} tests/unittests
-
-[testenv:coverage27]
-envdir = {toxworkdir}/py27
-commands = {envpython} {toxinidir}/tools/noproxy nosetests --with-coverage --cover-erase --cover-branches --cover-package=curtin --cover-inclusive {posargs} tests/unittests
-
 [testenv:docs]
 deps = {[testenv]deps}
     sphinx
+    sphinx-rtd-theme
 commands =
     sphinx-build -b html -d doc/_build/doctrees doc/ doc/_build/html
 
-[testenv:trusty-check]
+[testenv:trusty]
 # this environment provides roughly a trusty build environment where
 # where 'make check' is run during package build.  This protects against
 # package build errors on trusty where pep8 and pyflakes there have subtly
 # different behavior.  Note, we do only run pyflakes3, though.
-basepython = python3
 deps = pyflakes==0.8.1
    pep8==1.4.6
+   mock==1.0.1
+   nose==1.3.1
+   pyyaml==3.10
+   oauthlib==0.6.1
+
+[testenv:trusty-check]
+deps = {[testenv:trusty]deps}
+basepython = python3
 commands =
    {toxinidir}/tools/run-pyflakes3 {posargs}
    {toxinidir}/tools/run-pep8 {posargs}
 
+[testenv:trusty-py27]
+deps = {[testenv:trusty]deps}
+basepython = python2.7
+commands = {envpython} {toxinidir}/tools/noproxy nosetests \
+    {posargs:tests/unittests}
+
+[testenv:trusty-py3]
+deps = {[testenv:trusty]deps}
+basepython = python3
+commands = {envpython} {toxinidir}/tools/noproxy nosetests \
+    {posargs:tests/unittests}
+
 [flake8]
 builtins = _
 exclude = .venv,.bzr,.tox,dist,doc,*lib/python*,*egg,build

