Initial commit
commit
6dbc137b3a
|
@ -0,0 +1,4 @@
|
|||
/dist/
|
||||
*.egg-info/
|
||||
__pycache__/
|
||||
*.py[co]
|
|
@ -0,0 +1,18 @@
|
|||
from setuptools import find_packages, setup
|
||||
|
||||
setup(
|
||||
name='mkvm',
|
||||
version='0.1',
|
||||
description='Virtual Machine Image Creator',
|
||||
author='Dustin C. Hatch',
|
||||
author_email='dustin@hatch.name',
|
||||
url='http://bitbucket.org/AdmiralNemo/imgmaker',
|
||||
license='GPL-3+',
|
||||
packages=find_packages('src'),
|
||||
package_dir={'': 'src'},
|
||||
entry_points={
|
||||
'console_scripts': [
|
||||
'mkvm=mkvm.mkvm:main',
|
||||
],
|
||||
},
|
||||
)
|
|
@ -0,0 +1,99 @@
|
|||
import glob
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
CUSTOM_SCRIPT_ENV = {
|
||||
'PATH': os.pathsep.join((
|
||||
'/usr/local/sbin',
|
||||
'/usr/sbin',
|
||||
'/sbin',
|
||||
'/usr/local/bin',
|
||||
'/usr/bin',
|
||||
'/bin',
|
||||
)),
|
||||
}
|
||||
|
||||
FIX_LINKS = [
|
||||
('/etc/mtab', '/proc/self/mounts'),
|
||||
('/etc/resolv.conf', '/run/resolv.conf'),
|
||||
('/etc/ntp.conf', '/run/ntp.conf'),
|
||||
]
|
||||
|
||||
|
||||
def fix_runtime_symlinks(mountpoint):
|
||||
for link, target in FIX_LINKS:
|
||||
path = os.path.join(mountpoint, link[1:])
|
||||
if os.path.exists(path):
|
||||
os.unlink(path)
|
||||
os.symlink(target, path)
|
||||
|
||||
|
||||
def set_timezone(mountpoint, timezone):
|
||||
path = os.path.join(mountpoint, 'etc/localtime')
|
||||
if os.path.exists(path):
|
||||
os.unlink(path)
|
||||
os.symlink(os.path.join('/usr/share/zoneinfo', timezone), path)
|
||||
|
||||
|
||||
def set_hostname(mountpoint, hostname):
|
||||
hostname = hostname.split('.')[0]
|
||||
path = os.path.join(mountpoint, 'etc/conf.d/hostname')
|
||||
with open(path) as f:
|
||||
lines = f.readlines()
|
||||
with open(path, 'w') as f:
|
||||
for line in lines:
|
||||
if line.lstrip().startswith('hostname='):
|
||||
f.write('hostname="{}"\n'.format(hostname))
|
||||
else:
|
||||
f.write(line)
|
||||
|
||||
|
||||
def run_custom_script(mountpoint, script):
|
||||
cmd = ['chroot', mountpoint, '/bin/sh']
|
||||
env = CUSTOM_SCRIPT_ENV.copy()
|
||||
p = subprocess.Popen(cmd, env=env, stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
out, err = p.communicate(script.encode())
|
||||
for line in out.decode().splitlines():
|
||||
log.info(line.strip())
|
||||
for line in err.decode().splitlines():
|
||||
log.error(line.strip())
|
||||
if p.returncode != 0:
|
||||
log.error('Custom script exited with return '
|
||||
'code {}'.format(p.returncode))
|
||||
|
||||
|
||||
def configure(vm):
|
||||
fix_runtime_symlinks(vm.mountpoint)
|
||||
if vm.timezone:
|
||||
set_timezone(vm.mountpoint, vm.timezone)
|
||||
if vm.fqdn:
|
||||
set_hostname(vm.mountpoint, vm.fqdn)
|
||||
if vm.customize:
|
||||
run_custom_script(vm.mountpoint, vm.customize)
|
||||
|
||||
|
||||
def inject_ssh_keys(filename, *keys):
|
||||
log.debug('Injecting SSH keys into {}'.format(filename))
|
||||
keys = set(keys)
|
||||
try:
|
||||
keys.remove(None)
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
keys.update(glob.glob(os.path.expanduser('~/.ssh/id_*.pub')))
|
||||
|
||||
dirname = os.path.dirname(filename)
|
||||
if not os.path.isdir(dirname):
|
||||
os.makedirs(dirname)
|
||||
|
||||
with open(filename, 'a') as f:
|
||||
for key in keys:
|
||||
log.info('Adding SSH key {}'.format(key))
|
||||
with open(key) as k:
|
||||
f.write(k.read())
|
|
@ -0,0 +1,129 @@
|
|||
#!/usr/bin/env python3
|
||||
from . import configure, stage3, storage, virt, vmdesc
|
||||
import argparse
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
log = logging.getLogger('mkvm')
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
gstage = parser.add_argument_group('stage3 tarball options')
|
||||
gstage.add_argument('--arch',
|
||||
help='Architecture to use when fetching a stage3 '
|
||||
'tarball')
|
||||
gstage.add_argument('--subtype',
|
||||
help='stage3 subtype/profile (e.g. nomultilib)')
|
||||
gstagefile = gstage.add_mutually_exclusive_group(required=True)
|
||||
gstagefile.add_argument('--latest', action='store_true', default=False,
|
||||
help='Fetch the latest stage3 tarball, optionally '
|
||||
'selecting an alternate flavor such as '
|
||||
'nomultilib or hardened')
|
||||
gstagefile.add_argument('--stage', '-s', metavar='FILENAME',
|
||||
help='stage3 tarball to install')
|
||||
|
||||
gfetch = parser.add_argument_group('download options')
|
||||
gfetch.add_argument('--mirror', metavar='URI',
|
||||
help='Base URI of download mirror')
|
||||
gfetch.add_argument('--cache-dir', metavar='PATH',
|
||||
help='Cache location for fetched stage3 tarballs')
|
||||
|
||||
gvirt = parser.add_argument_group('libvirt options')
|
||||
gvirt.add_argument('--connect', '-c', metavar='URI', dest='uri',
|
||||
help='libvirt connection URI')
|
||||
gvirt.add_argument('--pool', '-p', default='default',
|
||||
help='storage pool for the new disk')
|
||||
|
||||
parser.add_argument('--verbose', '-v', action='store_true', default=False,
|
||||
help='Print additional messages')
|
||||
parser.add_argument('--inject-ssh-key', '-i', action='append', nargs='?',
|
||||
metavar='FILENAME',
|
||||
help='Authorize SSH public key in the VM')
|
||||
parser.add_argument('--no-autostart', action='store_false', default=True,
|
||||
dest='autostart',
|
||||
help='Do not automatically start the new VM')
|
||||
parser.add_argument('description',
|
||||
help='Path to VM description document')
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
|
||||
# prevent libvirt from writing error messages to stderr
|
||||
sys.stderr.flush()
|
||||
newstderr = os.dup(sys.stderr.fileno())
|
||||
devnull = os.open(os.devnull, os.O_WRONLY)
|
||||
os.dup2(devnull, sys.stderr.fileno())
|
||||
os.close(devnull)
|
||||
sys.stderr = os.fdopen(newstderr, 'w')
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
|
||||
|
||||
vm = vmdesc.VirtualMachine.from_yaml(args.description)
|
||||
v = virt.Virt(args.uri)
|
||||
|
||||
if args.latest:
|
||||
try:
|
||||
fetcher = stage3.Fetcher(args.cache_dir)
|
||||
stagetbz = fetcher.fetch_stage(args.arch, args.subtype)
|
||||
fetcher.verify(stagetbz + '.DIGESTS.asc')
|
||||
except KeyboardInterrupt:
|
||||
print('')
|
||||
raise SystemExit(os.EX_TEMPFAIL)
|
||||
except (OSError, stage3.FetchError, stage3.VerifyError) as e:
|
||||
log.error('Failed to fetch stage3 tarball: {}'.format(e))
|
||||
raise SystemExit(os.EX_UNAVAILABLE)
|
||||
else:
|
||||
try:
|
||||
os.stat(args.stage)
|
||||
except OSError as e:
|
||||
log.error('Cannot read stage3 tarball: {}'.format(e))
|
||||
raise SystemExit(os.EX_OSFILE)
|
||||
stagetbz = args.stage
|
||||
|
||||
try:
|
||||
pool = v.get_pool(args.pool)
|
||||
vm.create_disk(pool)
|
||||
vm.partition_disk()
|
||||
vm.format_filesystems()
|
||||
except ValueError as e:
|
||||
log.error(e)
|
||||
raise SystemExit(os.EX_CONFIG)
|
||||
except (OSError, virt.VirtError, storage.DiskError) as e:
|
||||
log.error(e)
|
||||
raise SystemExit(os.EX_SOFTWARE)
|
||||
|
||||
try:
|
||||
vm.mount_filesystems()
|
||||
except ValueError as e:
|
||||
log.error(e)
|
||||
raise SystemExit(os.EX_CONFIG)
|
||||
except storage.MountError as e:
|
||||
log.error(e)
|
||||
raise SystemExit(os.EX_OSERR)
|
||||
|
||||
try:
|
||||
stage3.extract_stage(stagetbz, vm.mountpoint)
|
||||
except (OSError, stage3.ExtractError) as e:
|
||||
log.error(e)
|
||||
raise SystemExit(os.EX_SOFTWARE)
|
||||
|
||||
vm.write_fstab()
|
||||
configure.configure(vm)
|
||||
if args.inject_ssh_key:
|
||||
filename = os.path.join(vm.mountpoint, 'root/.ssh/authorized_keys')
|
||||
configure.inject_ssh_keys(filename, *args.inject_ssh_key)
|
||||
|
||||
vm.unmount_filesystems()
|
||||
|
||||
dom = v.create_domain(vm)
|
||||
if args.autostart:
|
||||
dom.create()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -0,0 +1,204 @@
|
|||
import codecs
|
||||
import gpgme
|
||||
import logging
|
||||
import hashlib
|
||||
import io
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import time
|
||||
import urllib.parse
|
||||
import urllib.request
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
XDG_CACHE_DIR = os.environ.get('XDG_CACHE_DIR', '~/.cache')
|
||||
|
||||
ARCH_NAMES = {
|
||||
'i386': 'x86',
|
||||
'i486': 'x86',
|
||||
'i586': 'x86',
|
||||
'i686': 'x86',
|
||||
'x86_64': 'amd64',
|
||||
'em64t': 'amd64',
|
||||
}
|
||||
|
||||
|
||||
class FetchError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class VerifyError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ExtractError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class Fetcher(object):
|
||||
|
||||
DEFAULT_MIRROR = 'http://distfiles.gentoo.org/'
|
||||
LIST_CACHE_LIFE = 86400
|
||||
|
||||
log = log.getChild('fetch')
|
||||
|
||||
def __init__(self, cache_dir=None, mirror=DEFAULT_MIRROR):
|
||||
if cache_dir is None:
|
||||
self.cache_dir = os.path.join(
|
||||
os.path.expanduser(XDG_CACHE_DIR),
|
||||
'stage3s',
|
||||
)
|
||||
else:
|
||||
self.cache_dir = cache_dir
|
||||
if not os.path.isdir(self.cache_dir):
|
||||
os.makedirs(self.cache_dir)
|
||||
self.mirror = mirror
|
||||
if not self.mirror.endswith('/'):
|
||||
self.mirror += '/'
|
||||
|
||||
@staticmethod
|
||||
def verify(filename):
|
||||
log.debug('Verifying PGP signature for {}'.format(filename))
|
||||
ctx = gpgme.Context()
|
||||
plaintext = io.BytesIO()
|
||||
with open(filename, 'rb') as f:
|
||||
sigs = ctx.verify(f, None, plaintext)
|
||||
for sig in sigs:
|
||||
if sig.status:
|
||||
raise VerifyError(sig.status.args[2])
|
||||
if sig.wrong_key_usage:
|
||||
raise VerifyError('wrong key usage')
|
||||
log.info('Successfully verified PGP signature')
|
||||
plaintext.seek(0)
|
||||
buf = codecs.getreader('utf-8')(plaintext)
|
||||
dirname = os.path.dirname(filename)
|
||||
for line in buf:
|
||||
if not line.lstrip().startswith('#'):
|
||||
continue
|
||||
if 'SHA512' in line:
|
||||
h = hashlib.sha512()
|
||||
else:
|
||||
continue
|
||||
line = buf.readline()
|
||||
try:
|
||||
digest, filename = line.split()
|
||||
except ValueError:
|
||||
pass
|
||||
path = os.path.join(dirname, filename)
|
||||
log.debug('Verifying checksum of {}'.format(path))
|
||||
with open(path, 'rb') as f:
|
||||
for data in iter(lambda: f.read(4096), b''):
|
||||
h.update(data)
|
||||
if h.hexdigest() != digest.lower():
|
||||
raise VerifyError(
|
||||
'{} checksum mismatch: {}'.format(h.name, filename))
|
||||
log.info('Verified checksum of {}'.format(filename))
|
||||
|
||||
def wget(self, *uris):
|
||||
cmd = ['wget', '--continue']
|
||||
cmd += uris
|
||||
log.debug('Running command: {}'.format(' '.join(cmd)))
|
||||
try:
|
||||
p = subprocess.Popen(cmd, cwd=self.cache_dir)
|
||||
except OSError as e:
|
||||
raise FetchError('Failed to run wget: {}'.format(e))
|
||||
if p.wait() != 0:
|
||||
raise FetchError('wget returned status {}'.format(p.returncode))
|
||||
|
||||
def fetch_stage(self, arch=None, subtype=None):
|
||||
if not arch:
|
||||
arch = os.uname().machine
|
||||
try:
|
||||
arch = ARCH_NAMES[arch]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
want = 'stage3-{}-'.format(subtype if subtype else arch)
|
||||
with self._get_latest_list(arch) as latest_list:
|
||||
for line in latest_list:
|
||||
line = line.split('#')[0]
|
||||
if not line:
|
||||
continue
|
||||
try:
|
||||
path, size = line.split(None, 1)
|
||||
except ValueError:
|
||||
log.warning('Unexpected value: {}'.format(line))
|
||||
continue
|
||||
filename = os.path.basename(path)
|
||||
if filename.startswith(want):
|
||||
break
|
||||
else:
|
||||
raise FetchError(
|
||||
'No stage3 tarballs for {}'.format(subtype or arch))
|
||||
log.info('Found latest stage3 tarball: {}'.format(filename))
|
||||
full_path = 'releases/{}/autobuilds/{}'.format(arch, path)
|
||||
uri = urllib.parse.urljoin(self.mirror, full_path)
|
||||
local_path = os.path.join(self.cache_dir, filename)
|
||||
to_fetch = [
|
||||
uri,
|
||||
uri + '.CONTENTS',
|
||||
uri + '.DIGESTS.asc',
|
||||
]
|
||||
try:
|
||||
st = os.stat(local_path)
|
||||
except OSError:
|
||||
pass
|
||||
else:
|
||||
if st.st_size == int(size):
|
||||
log.info('Cached copy of {} is complete'.format(filename))
|
||||
to_fetch.remove(uri)
|
||||
for fn in to_fetch[-2:]:
|
||||
c_fn = os.path.join(self.cache_dir, os.path.basename(fn))
|
||||
try:
|
||||
st = os.stat(c_fn)
|
||||
except OSError:
|
||||
pass
|
||||
else:
|
||||
if st.st_size > 0:
|
||||
to_fetch.remove(fn)
|
||||
if to_fetch:
|
||||
self.wget(*to_fetch)
|
||||
return local_path
|
||||
|
||||
def _get_latest_list(self, arch):
|
||||
cache_fname = os.path.join(
|
||||
self.cache_dir,
|
||||
'latest-stage3-{}.txt'.format(arch),
|
||||
)
|
||||
try:
|
||||
st = os.stat(cache_fname)
|
||||
except OSError:
|
||||
pass
|
||||
else:
|
||||
if st.st_mtime > time.time() - self.LIST_CACHE_LIFE:
|
||||
return open(cache_fname)
|
||||
|
||||
path = 'releases/{}/autobuilds/latest-stage3.txt'.format(arch)
|
||||
url = urllib.parse.urljoin(self.mirror, path)
|
||||
log.debug('Fetching URL: {}'.format(url))
|
||||
try:
|
||||
response = urllib.request.urlopen(url)
|
||||
except urllib.error.HTTPError as e:
|
||||
log.error('Failed to fetch latest stage3 list: {}'.format(e))
|
||||
raise FetchError('Could not fetch latest stage3 list')
|
||||
with open(cache_fname, 'wb') as f:
|
||||
for line in response:
|
||||
f.write(line)
|
||||
return open(cache_fname)
|
||||
|
||||
|
||||
def extract_stage(tbz, directory):
|
||||
log.debug('Extracting {} to {}'.format(tbz, directory))
|
||||
cmd = ['tar', '-xaf', tbz, '--numeric-owner', '-C', directory]
|
||||
try:
|
||||
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError as e:
|
||||
msg = e.output.decode().strip() if e.output else e
|
||||
raise ExtractError('Failed to extract stage tarball: {}'.format(msg))
|
||||
openrc_run = os.path.join(directory, 'run/openrc')
|
||||
if os.path.isdir(openrc_run):
|
||||
shutil.rmtree(openrc_run)
|
||||
log.info('Successfully extracted {}'.format(os.path.basename(tbz)))
|
|
@ -0,0 +1,246 @@
|
|||
import logging
|
||||
import os
|
||||
import pyudev
|
||||
import subprocess
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
udev = pyudev.Context()
|
||||
|
||||
|
||||
class MountError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class DiskError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def mount(source, target=None, fstype=None, options=None):
|
||||
cmd = ['mount']
|
||||
if fstype:
|
||||
cmd += ('-t', fstype)
|
||||
if options:
|
||||
cmd += ('-o', ','.join(options))
|
||||
cmd.append(source)
|
||||
if target:
|
||||
cmd.append(target)
|
||||
try:
|
||||
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError as e:
|
||||
msg = e.output.decode().strip() if e.output else e
|
||||
raise MountError(msg)
|
||||
log.info('Mounted {} at {}'.format(source, target))
|
||||
|
||||
|
||||
def umount(target):
|
||||
log.debug('Unmounting {}'.format(target))
|
||||
cmd = ['umount', target]
|
||||
try:
|
||||
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError as e:
|
||||
msg = e.output.decode().strip() if e.output else e
|
||||
raise MountError(msg)
|
||||
log.info('Successfully unmounted {}'.format(target))
|
||||
|
||||
|
||||
def find_next_nbd():
|
||||
cmd = ['pgrep', '-a', '^nbd[0-9]+$']
|
||||
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
|
||||
nbds = p.communicate()[0]
|
||||
|
||||
devs = []
|
||||
for line in nbds.splitlines():
|
||||
devs.append(line.split()[1])
|
||||
devs.sort()
|
||||
log.debug('Found {} nbd nodes in use'.format(len(devs)))
|
||||
|
||||
try:
|
||||
last = devs[-1]
|
||||
except IndexError:
|
||||
return 'nbd0'
|
||||
return 'nbd' + str(int(last[3:]) + 1)
|
||||
|
||||
|
||||
def setup_loop(filename):
|
||||
cmd = ['losetup', '-f', '--show', filename]
|
||||
try:
|
||||
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
||||
dev = output.decode().strip()
|
||||
except subprocess.CalledProcessError as e:
|
||||
msg = e.output.decode().strip() if e.output else e
|
||||
raise DiskError('Failed to set up loopback: {}'.format(msg))
|
||||
log.info('Connected {} to {}'.format(filename, dev))
|
||||
return dev
|
||||
|
||||
|
||||
def detach_loop(dev):
|
||||
cmd = ['losetup', '-d', dev]
|
||||
try:
|
||||
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
||||
if output:
|
||||
log.info(output.decode().strip())
|
||||
except subprocess.CalledProcessError as e:
|
||||
msg = e.output.decode().strip() if e.output else e
|
||||
raise DiskError('Failed to detach loopback: {}'.format(msg))
|
||||
log.info('Disconnected {}'.format(dev))
|
||||
|
||||
|
||||
def setup_nbd(filename):
|
||||
dev = os.path.join('/dev', find_next_nbd())
|
||||
cmd = ['qemu-nbd', '-c', dev, filename]
|
||||
try:
|
||||
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError as e:
|
||||
msg = e.output.decode().strip() if e.output else e
|
||||
raise DiskError('Failed to set up nbd: {}'.format(msg))
|
||||
log.info('Connected {} to {}'.format(filename, dev))
|
||||
return dev
|
||||
|
||||
|
||||
def detach_nbd(dev):
|
||||
cmd = ['qemu-nbd', '-d', dev]
|
||||
try:
|
||||
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
||||
if output:
|
||||
log.info(output.decode().strip())
|
||||
except subprocess.CalledProcessError as e:
|
||||
msg = e.output.decode().strip() if e.output else e
|
||||
raise DiskError('Failed to detach nbd: {}'.format(msg))
|
||||
|
||||
|
||||
def get_partitions(blockdev):
|
||||
dev = pyudev.Device.from_device_file(udev, blockdev)
|
||||
for child in dev.children:
|
||||
yield child.device_node
|
||||
|
||||
|
||||
def partition_gpt(blockdev, partitions):
|
||||
log.debug('Partitioning {} with GPT'.format(blockdev))
|
||||
cmd = ['sgdisk', '-a', '4096', '-Z', '-g']
|
||||
for idx, part in enumerate(partitions):
|
||||
partnum = idx + 1
|
||||
cmd += ('-n', '{}::{}'.format(partnum, part.get('size', '')))
|
||||
if 'type' in part:
|
||||
cmd += ('-t', '{}:{:X}'.format(partnum, part['type']))
|
||||
if 'name' in part:
|
||||
cmd += ('-c', '{}:{}'.format(partnum, part['name']))
|
||||
cmd.append(blockdev)
|
||||
try:
|
||||
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
||||
for line in output.decode().rstrip().splitlines():
|
||||
log.info(line)
|
||||
except subprocess.CalledProcessError as e:
|
||||
msg = e.output.decode().strip() if e.output else e
|
||||
raise DiskError('Failed to partition disk: {}'.format(msg))
|
||||
|
||||
|
||||
def pvscan():
|
||||
try:
|
||||
output = subprocess.check_output(['pvscan'], stderr=subprocess.STDOUT)
|
||||
if output:
|
||||
log.debug('pvscan:\n{}'.format(output.decode().rstrip()))
|
||||
except subprocess.CalledProcessError as e:
|
||||
msg = e.output.decode().strip() if e.output else e
|
||||
raise DiskError('pvscan failed: {}'.format(msg))
|
||||
|
||||
|
||||
def create_lvm_pv(*pvs):
|
||||
cmd = ['pvcreate']
|
||||
cmd += pvs
|
||||
try:
|
||||
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
||||
if output:
|
||||
log.info(output.decode().strip())
|
||||
except subprocess.CalledProcessError as e:
|
||||
msg = e.output.decode().strip() if e.output else e
|
||||
raise DiskError('Failed to create lvm pv: {}'.format(msg))
|
||||
|
||||
|
||||
def create_lvm_vg(name, *pvs):
|
||||
cmd = ['vgcreate', name]
|
||||
cmd += pvs
|
||||
try:
|
||||
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
||||
log.info(output.decode().strip())
|
||||
except subprocess.CalledProcessError as e:
|
||||
msg = e.output.decode().strip() if e.output else e
|
||||
raise DiskError('Failed to create lvm vg: {}'.format(msg))
|
||||
|
||||
|
||||
def create_lvm_lv(vg_name, name, size):
|
||||
cmd = ['lvcreate', '-n', name]
|
||||
size = str(size)
|
||||
if '%' in size:
|
||||
cmd += ('-l', size)
|
||||
else:
|
||||
cmd += ('-L', size)
|
||||
cmd.append(vg_name)
|
||||
try:
|
||||
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
||||
log.info(output.decode().strip())
|
||||
except subprocess.CalledProcessError as e:
|
||||
msg = e.output.decode().strip() if e.output else e
|
||||
raise DiskError('Failed to create lvm lv: {}'.format(msg))
|
||||
|
||||
|
||||
def get_lv_blockdev(vg_name, lv_name):
|
||||
cmd = ['lvs', '-o', 'lv_name,lv_kernel_major,lv_kernel_minor',
|
||||
'--noheadings', '/'.join((vg_name, lv_name))]
|
||||
try:
|
||||
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError as e:
|
||||
msg = e.output.decode().strip() if e.output else e
|
||||
raise DiskError('Could not get info for lv: {}'.format(msg))
|
||||
major, minor = output.split()[1:]
|
||||
devnum = os.makedev(int(major), int(minor))
|
||||
dev = pyudev.Device.from_device_number(udev, 'block', devnum)
|
||||
return dev.device_node
|
||||
|
||||
|
||||
def mkfs(fstype, device, label=None):
|
||||
log.debug(
|
||||
'Creating {fstype} filesystem on {device} '
|
||||
'with label "{label}"'.format(
|
||||
fstype=fstype,
|
||||
device=device,
|
||||
label=label,
|
||||
)
|
||||
)
|
||||
if fstype == 'swap':
|
||||
cmd = ['mkswap']
|
||||
else:
|
||||
cmd = ['mkfs.{}'.format(fstype), '-q']
|
||||
if label:
|
||||
cmd += ('-L', label)
|
||||
cmd.append(device)
|
||||
try:
|
||||
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
||||
if output:
|
||||
log.info(output.decode().strip())
|
||||
except subprocess.CalledProcessError as e:
|
||||
msg = e.output.decode().strip() if e.output else e
|
||||
raise DiskError(
|
||||
'Failed to create filesystem: {}'.format(msg))
|
||||
log.info('Successfully created {} filesystem on {}'.format(fstype, device))
|
||||
|
||||
|
||||
def get_fs_uuid(blockdev):
|
||||
cmd = ['blkid', '-c', os.devnull, '-o', 'value', '-s', 'UUID', blockdev]
|
||||
try:
|
||||
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError as e:
|
||||
msg = e.output.decode().strip() if e.output else e
|
||||
raise DiskError('Could not get fs uuid: {}'.format(msg))
|
||||
return output.decode().strip()
|
||||
|
||||
|
||||
def deactivate_vg(name):
|
||||
cmd = ['vgchange', '-an', name]
|
||||
try:
|
||||
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
||||
if output:
|
||||
log.info(output.decode().strip())
|
||||
except subprocess.CalledProcessError as e:
|
||||
msg = e.output.decode().strip() if e.output else e
|
||||
raise DiskError('Failed to deactivate vg: {}'.format(msg))
|
|
@ -0,0 +1,130 @@
|
|||
from . import storage
|
||||
from xml.etree import ElementTree as etree
|
||||
import libvirt
|
||||
import logging
|
||||
import os
|
||||
import stat
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
VirtError = libvirt.libvirtError
|
||||
|
||||
|
||||
class Virt(object):
|
||||
|
||||
def __init__(self, uri=None):
|
||||
self.conn = libvirt.open(uri)
|
||||
log.debug('Successfully connected to {}'.format(self.conn.getURI()))
|
||||
|
||||
def get_pool(self, name):
|
||||
return VirtPool.from_virpool(self.conn.storagePoolLookupByName(name))
|
||||
|
||||
def create_domain(self, vm):
|
||||
return self.conn.defineXML(vm.to_xml())
|
||||
|
||||
|
||||
class VirtPool(object):
|
||||
|
||||
DEFAULT_EXTENSION = '.img'
|
||||
EXTENSIONS = {
|
||||
'qcow2': '.qcow2',
|
||||
'vdi': '.vdi',
|
||||
'vmdk': '.vmdk',
|
||||
}
|
||||
|
||||
def __init__(self, name=None):
|
||||
self._pool = None
|
||||
self.name = name
|
||||
self.path = None
|
||||
self.type = None
|
||||
|
||||
@classmethod
|
||||
def from_virpool(cls, pool):
|
||||
root = etree.fromstring(pool.XMLDesc())
|
||||
self = cls(pool.name())
|
||||
self._pool = pool
|
||||
self.type = root.attrib['type']
|
||||
self.path = root.find('target').find('path').text
|
||||
return self
|
||||
|
||||
def create_volume(self, name, size, fmt=None):
|
||||
assert self._pool
|
||||
if self.type == 'logical':
|
||||
name = name.lower()
|
||||
fmt = None
|
||||
else:
|
||||
name += self.EXTENSIONS.get(fmt, self.DEFAULT_EXTENSION)
|
||||
|
||||
vol = VirtVolume()
|
||||
vol.name = name
|
||||
vol.capacity = size
|
||||
vol.path = os.path.join(self.path, name)
|
||||
vol.format = fmt
|
||||
self._pool.createXML(vol.to_xml())
|
||||
log.info('Created volume {} ({} bytes)'.format(vol.name, vol.capacity))
|
||||
return vol
|
||||
|
||||
|
||||
class VirtVolume(object):
|
||||
|
||||
def __init__(self):
|
||||
self.name = None
|
||||
self.capacity = None
|
||||
self.path = None
|
||||
self.format = None
|
||||
|
||||
@property
|
||||
def blockdev(self):
|
||||
try:
|
||||
return self.__blockdev
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
if self.disk_type == 'block':
|
||||
self.__blockdev = self.path
|
||||
elif self.format == 'raw':
|
||||
self.__blockdev = storage.setup_loop(self.path)
|
||||
else:
|
||||
self.__blockdev = storage.setup_nbd(self.path)
|
||||
return self.__blockdev
|
||||
|
||||
@property
|
||||
def disk_type(self):
|
||||
try:
|
||||
return self.__disk_type
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
st = os.stat(self.path)
|
||||
if stat.S_ISBLK(st.st_mode):
|
||||
self.__disk_type = 'block'
|
||||
else:
|
||||
self.__disk_type = 'file'
|
||||
return self.__disk_type
|
||||
|
||||
@property
|
||||
def partitions(self):
|
||||
return list(storage.get_partitions(self.blockdev))
|
||||
|
||||
def to_xml(self):
|
||||
root = etree.Element('volume')
|
||||
elm_name = etree.SubElement(root, 'name')
|
||||
elm_name.text = self.name
|
||||
elm_capacity = etree.SubElement(root, 'capacity')
|
||||
elm_capacity.attrib['unit'] = 'bytes'
|
||||
elm_capacity.text = str(self.capacity)
|
||||
elm_target = etree.SubElement(root, 'target')
|
||||
elm_path = etree.SubElement(elm_target, 'path')
|
||||
elm_path.text = self.path
|
||||
if self.format:
|
||||
elm_format = etree.SubElement(elm_target, 'format')
|
||||
elm_format.attrib['type'] = self.format
|
||||
return etree.tostring(root, encoding='unicode')
|
||||
|
||||
def deactivate(self):
|
||||
if 'nbd' in self.blockdev:
|
||||
storage.detach_nbd(self.blockdev)
|
||||
elif 'loop' in self.blockdev:
|
||||
storage.detach_loop(self.blockdev)
|
|
@ -0,0 +1,388 @@
|
|||
from . import storage
|
||||
from xml.etree import ElementTree as etree
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import tempfile
|
||||
import yaml
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
UNITS = {
|
||||
'k': 2 ** 10,
|
||||
'kb': 10 ** 3,
|
||||
'kib': 2 ** 10,
|
||||
'm': 2 ** 20,
|
||||
'mb': 10 ** 6,
|
||||
'mib': 2 ** 20,
|
||||
'g': 2 ** 30,
|
||||
'gb': 10 ** 9,
|
||||
'gib': 2 ** 30,
|
||||
't': 2 ** 40,
|
||||
'tb': 10 ** 12,
|
||||
'tib': 2 ** 40,
|
||||
'p': 2 ** 50,
|
||||
'pb': 10 ** 15,
|
||||
'pib': 2 ** 50,
|
||||
'e': 2 ** 60,
|
||||
'eb': 10 ** 18,
|
||||
'eib': 2 ** 60,
|
||||
'z': 2 ** 70,
|
||||
'zb': 10 ** 21,
|
||||
'zib': 2 ** 70,
|
||||
'y': 2 ** 80,
|
||||
'yb': 10 ** 24,
|
||||
'yib': 2 ** 80,
|
||||
}
|
||||
|
||||
SIZE_RE = re.compile(
|
||||
r'^(?P<value>[0-9]+)\s*'
|
||||
r'(?P<unit>\w+)?\s*$'
|
||||
)
|
||||
|
||||
|
||||
try:
|
||||
YamlLoader = yaml.CLoader
|
||||
except AttributeError:
|
||||
YamlLoader = yaml.Loader
|
||||
|
||||
|
||||
def parse_size(size):
|
||||
if isinstance(size, int):
|
||||
return size
|
||||
|
||||
m = SIZE_RE.match(size.lower())
|
||||
if not m:
|
||||
raise ValueError('Invalid size: {}'.format(size))
|
||||
parts = m.groupdict()
|
||||
if parts['unit'] in (None, 'b', 'byte', 'bytes'):
|
||||
factor = 1
|
||||
else:
|
||||
try:
|
||||
factor = UNITS[parts['unit']]
|
||||
except KeyError:
|
||||
raise ValueError('Invalid size : {}'.format(size))
|
||||
return int(parts['value']) * factor
|
||||
|
||||
|
||||
class VirtualMachine(object):
|
||||
|
||||
__slots__ = (
|
||||
'name',
|
||||
'fqdn',
|
||||
'ram',
|
||||
'vcpus',
|
||||
'net_ifaces',
|
||||
'console',
|
||||
'graphics',
|
||||
'video',
|
||||
'guest_agent',
|
||||
'image_format',
|
||||
'disk_size',
|
||||
'disk_label',
|
||||
'partitions',
|
||||
'volumes',
|
||||
'tmpfs_tmp',
|
||||
'install_grub',
|
||||
'kernel',
|
||||
'initrd',
|
||||
'kcmdline',
|
||||
'boot',
|
||||
'timezone',
|
||||
'customize',
|
||||
'_disk',
|
||||
'_mountpoint',
|
||||
)
|
||||
|
||||
def __init__(self, name=None):
|
||||
self.name = name
|
||||
self.fqdn = None
|
||||
self.ram = 256 * 2 ** 20
|
||||
self.vcpus = 1
|
||||
self.net_ifaces = []
|
||||
self.console = 'virtio'
|
||||
self.graphics = 'spice'
|
||||
self.video = 'qxl'
|
||||
self.guest_agent = True
|
||||
self.image_format = 'raw'
|
||||
self.disk_size = 3 * 2 ** 30
|
||||
self.disk_label = 'gpt'
|
||||
self.partitions = []
|
||||
self.volumes = []
|
||||
self.tmpfs_tmp = True
|
||||
self.install_grub = False
|
||||
self.kernel = None
|
||||
self.initrd = None
|
||||
self.kcmdline = None
|
||||
self.boot = None
|
||||
self.timezone = 'UTC'
|
||||
self.customize = None
|
||||
self._disk = None
|
||||
self._mountpoint = None
|
||||
|
||||
@property
|
||||
def mountpoint(self):
|
||||
return self._mountpoint
|
||||
|
||||
@property
|
||||
def vg_name(self):
|
||||
return self.name.lower()
|
||||
|
||||
@classmethod
|
||||
def from_yaml(cls, filename):
|
||||
with open(filename) as f:
|
||||
data = yaml.load(f, Loader=YamlLoader)
|
||||
self = cls()
|
||||
for key, value in data.items():
|
||||
if key in ('disk_size', 'ram'):
|
||||
value = parse_size(value)
|
||||
try:
|
||||
setattr(self, key, value)
|
||||
except AttributeError:
|
||||
pass
|
||||
return self
|
||||
|
||||
def create_disk(self, pool):
|
||||
self._disk = pool.create_volume(
|
||||
name=self.name,
|
||||
size=self.disk_size,
|
||||
fmt=self.image_format,
|
||||
)
|
||||
|
||||
def partition_disk(self):
|
||||
if self.partitions:
|
||||
if not self.disk_label:
|
||||
raise ValueError('Cannot partition without disk label')
|
||||
elif self.disk_label == 'gpt':
|
||||
self._partition_gpt()
|
||||
# TODO: support msdos disk labels
|
||||
else:
|
||||
raise ValueError(
|
||||
'Unsupported disk label {}'.format(self.disk_label))
|
||||
if self.volumes:
|
||||
self._partition_lvm()
|
||||
|
||||
def format_filesystems(self):
|
||||
def lv_block(name):
|
||||
return storage.get_lv_blockdev(self.vg_name, name)
|
||||
|
||||
partitions = self._disk.partitions
|
||||
for idx, part in enumerate(self.partitions):
|
||||
try:
|
||||
storage.mkfs(part['fstype'], partitions[idx],
|
||||
part.get('label'))
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
for vol in self.volumes:
|
||||
try:
|
||||
storage.mkfs(vol['fstype'], lv_block(vol['name']), vol['name'])
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def mount_filesystems(self):
|
||||
mountpoint = tempfile.mkdtemp(prefix='mkvm-')
|
||||
partitions = self._disk.partitions
|
||||
|
||||
mounts = {}
|
||||
for idx, part in enumerate(self.partitions):
|
||||
try:
|
||||
mounts[part['mountpoint']] = partitions[idx]
|
||||
except KeyError:
|
||||
pass
|
||||
for vol in self.volumes:
|
||||
try:
|
||||
mounts[vol['mountpoint']] = storage.get_lv_blockdev(
|
||||
self.vg_name, vol['name'])
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
try:
|
||||
storage.mount(mounts.pop('/'), mountpoint)
|
||||
except KeyError:
|
||||
raise ValueError('No root filesystem defined')
|
||||
|
||||
for dirname, device in sorted(mounts.items()):
|
||||
target = os.path.join(mountpoint, dirname[1:])
|
||||
if not os.path.isdir(target):
|
||||
os.makedirs(target)
|
||||
storage.mount(device, target)
|
||||
self._mountpoint = mountpoint
|
||||
|
||||
def write_fstab(self):
|
||||
def get_kwargs(mountpoint, fstype):
|
||||
kwargs = {
|
||||
'fsopts': 'sw' if fstype == 'swap' else 'defaults',
|
||||
'mountpoint': 'none',
|
||||
}
|
||||
if not mountpoint:
|
||||
kwargs['passno'] = 0
|
||||
elif mountpoint == '/':
|
||||
kwargs['passno'] = 1
|
||||
else:
|
||||
kwargs['passno'] = 2
|
||||
return kwargs
|
||||
|
||||
tmpl = '{source}\t{mountpoint}\t{fstype}\t{fsopts}\t0 {passno}\n'
|
||||
with open(os.path.join(self.mountpoint, 'etc', 'fstab'), 'w') as f:
|
||||
partitions = self._disk.partitions
|
||||
for idx, part in enumerate(self.partitions):
|
||||
if part.get('fstype'):
|
||||
kwargs = get_kwargs(part.get('mountpoint'), part['fstype'])
|
||||
if part.get('label'):
|
||||
kwargs['source'] = 'LABEL={}'.format(part['label'])
|
||||
else:
|
||||
fs_uuid = storage.get_fs_uuid(partitions[idx])
|
||||
kwargs['source'] = 'UUID={}'.format(fs_uuid)
|
||||
kwargs.update(part)
|
||||
f.write(tmpl.format(**kwargs))
|
||||
for vol in self.volumes:
|
||||
if vol.get('fstype'):
|
||||
kwargs = get_kwargs(vol.get('mountpoint'), vol['fstype'])
|
||||
kwargs['source'] = os.path.join('/dev', self.vg_name,
|
||||
vol['name'])
|
||||
kwargs.update(vol)
|
||||
f.write(tmpl.format(**kwargs))
|
||||
if self.tmpfs_tmp:
|
||||
f.write(tmpl.format(
|
||||
source='tmpfs',
|
||||
mountpoint='/tmp',
|
||||
fstype='tmpfs',
|
||||
fsopts='defaults',
|
||||
passno=0,
|
||||
))
|
||||
log.info('Successfully wrote fstab to {}'.format(f.name))
|
||||
|
||||
def unmount_filesystems(self):
|
||||
mounts = []
|
||||
with open('/proc/self/mountinfo') as f:
|
||||
for line in f:
|
||||
mountinfo = line.split()
|
||||
if mountinfo[4].startswith(self.mountpoint):
|
||||
mounts.append(mountinfo[4])
|
||||
for mount in reversed(mounts):
|
||||
storage.umount(mount)
|
||||
os.rmdir(self.mountpoint)
|
||||
if self.volumes:
|
||||
storage.deactivate_vg(self.vg_name)
|
||||
self._disk.deactivate()
|
||||
|
||||
def to_xml(self):
|
||||
root = etree.Element('domain', type='kvm')
|
||||
etree.SubElement(root, 'name').text = self.name
|
||||
|
||||
elm_memory = etree.SubElement(root, 'memory', unit='KiB')
|
||||
elm_memory.text = str(self.ram // 1024)
|
||||
elm_vcpu = etree.SubElement(root, 'vcpu', placement='static')
|
||||
elm_vcpu.text = str(self.vcpus)
|
||||
|
||||
elm_os = etree.SubElement(root, 'os')
|
||||
elm_type = etree.SubElement(elm_os, 'type', arch='x86_64')
|
||||
elm_type.text = 'hvm'
|
||||
if not self.boot:
|
||||
pass
|
||||
elif 'kernel' in self.boot:
|
||||
etree.SubElement(elm_os, 'kernel').text = self.kernel
|
||||
etree.SubElement(elm_os, 'initrd').text = self.initrd
|
||||
etree.SubElement(elm_os, 'cmdline').text = self.kcmdline
|
||||
else:
|
||||
for dev in self.boot:
|
||||
etree.SubElement(elm_os, 'boot', dev=dev)
|
||||
|
||||
elm_features = etree.SubElement(root, 'features')
|
||||
for feat in ('acpi', 'apic', 'pae'):
|
||||
etree.SubElement(elm_features, feat)
|
||||
|
||||
elm_devices = etree.SubElement(root, 'devices')
|
||||
|
||||
for iface in self.net_ifaces:
|
||||
elm_iface = etree.Element('interface')
|
||||
if 'bridge' in iface:
|
||||
elm_iface.attrib['type'] = 'bridge'
|
||||
elm_source = etree.SubElement(elm_iface, 'source')
|
||||
elm_source.attrib['bridge'] = iface['bridge']
|
||||
if 'mac' in iface:
|
||||
elm_mac = etree.SubElement(elm_iface, 'mac')
|
||||
elm_mac.attrib['address'] = iface['mac']
|
||||
elm_model = etree.SubElement(elm_iface, 'model')
|
||||
elm_model.attrib['type'] = iface.get('model', 'virtio')
|
||||
elm_devices.insert(0, elm_iface)
|
||||
|
||||
if self._disk:
|
||||
elm_disk = etree.Element('disk')
|
||||
elm_disk.attrib['type'] = self._disk.disk_type
|
||||
elm_disk.attrib['device'] = 'disk'
|
||||
elm_driver = etree.SubElement(elm_disk, 'driver')
|
||||
elm_driver.attrib['name'] = 'qemu'
|
||||
elm_driver.attrib['type'] = self._disk.format
|
||||
elm_source = etree.SubElement(elm_disk, 'source')
|
||||
source_type = 'dev' if self._disk.disk_type == 'block' else 'file'
|
||||
elm_source.attrib[source_type] = self._disk.path
|
||||
elm_target = etree.SubElement(elm_disk, 'target')
|
||||
elm_target.attrib['dev'] = 'vda'
|
||||
elm_target.attrib['bus'] = 'virtio'
|
||||
elm_devices.insert(0, elm_disk)
|
||||
|
||||
if self.console:
|
||||
elm_console = etree.SubElement(elm_devices, 'console', type='pty')
|
||||
elm_target = etree.SubElement(elm_console, 'target')
|
||||
elm_target.attrib['type'] = self.console
|
||||
|
||||
if self.guest_agent:
|
||||
elm_channel = etree.SubElement(elm_devices, 'channel')
|
||||
elm_channel.attrib['type'] = 'unix'
|
||||
elm_target = etree.SubElement(elm_channel, 'target')
|
||||
elm_target.attrib['type'] = 'virtio'
|
||||
elm_target.attrib['name'] = 'org.qemu.guest_agent.0'
|
||||
|
||||
if self.graphics == 'spice':
|
||||
elm_channel = etree.SubElement(elm_devices, 'channel')
|
||||
elm_channel.attrib['type'] = 'spicevmc'
|
||||
elm_target = etree.SubElement(elm_channel, 'target')
|
||||
elm_target.attrib['type'] = 'virtio'
|
||||
elm_target.attrib['name'] = 'com.redhat.spice.0'
|
||||
if self.graphics:
|
||||
elm_graphics = etree.SubElement(elm_devices, 'graphics')
|
||||
elm_graphics.attrib['type'] = self.graphics
|
||||
elm_graphics.attrib['autoport'] = 'yes'
|
||||
|
||||
if self.video == 'qxl':
|
||||
elm_video = etree.SubElement(elm_devices, 'video')
|
||||
elm_model = etree.SubElement(elm_video, 'model')
|
||||
elm_model.attrib.update(
|
||||
type=self.video,
|
||||
ram='65536',
|
||||
vram='65536',
|
||||
heads='1',
|
||||
)
|
||||
elif self.video:
|
||||
elm_video = etree.SubElement(elm_devices, 'video')
|
||||
elm_model = etree.SubElement(elm_video, 'model')
|
||||
elm_model.attrib.update(
|
||||
type=self.video,
|
||||
vram='9216',
|
||||
heads='1',
|
||||
)
|
||||
|
||||
return etree.tostring(root, encoding='unicode')
|
||||
|
||||
def _partition_gpt(self):
|
||||
storage.partition_gpt(self._disk.blockdev, self.partitions)
|
||||
|
||||
def _partition_lvm(self):
|
||||
blockdev = self._disk.blockdev
|
||||
storage.pvscan()
|
||||
pvs = []
|
||||
if self.partitions:
|
||||
partitions = self._disk.partitions
|
||||
for idx, part in enumerate(self.partitions):
|
||||
if part.get('type') == 0x8e00:
|
||||
pvs.append(partitions[idx])
|
||||
else:
|
||||
pvs.append(blockdev)
|
||||
storage.create_lvm_pv(*pvs)
|
||||
storage.create_lvm_vg(self.vg_name, *pvs)
|
||||
for vol in self.volumes:
|
||||
storage.create_lvm_lv(self.vg_name, vol['name'], vol['size'])
|
Loading…
Reference in New Issue