Pretty solid start

This commit is contained in:
Disassembler 2020-02-06 19:00:41 +01:00
commit 88b8520ff8
No known key found for this signature in database
GPG Key ID: 524BD33A0EE29499
21 changed files with 1386 additions and 0 deletions

21
APKBUILD Normal file
View File

@ -0,0 +1,21 @@
# Contributor: Disassembler <disassembler@dasm.cz>
# Maintainer: Disassembler <disassembler@dasm.cz>
pkgname=spoc
pkgver=0.0.1
pkgrel=0
pkgdesc="SPOC application, conatiner, and image manager"
url="https://spotter.vm/"
arch="noarch"
license="GPL"
depends="lxc python3 py3-bcrypt py3-cffi py3-cryptography py3-requests"
options="!check !strip"
build() {
return 0
}
package() {
mkdir -p ${pkgdir}
cp -rp etc ${pkgdir}
cp -rp usr ${pkgdir}
}

122
usr/bin/spoc-app Normal file
View File

@ -0,0 +1,122 @@
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import argparse
import os
from spoc import publisher
from spoc.utils import readable_size
ACTION_LIST = 1
ACTION_INSTALL = 2
ACTION_UPGRADE = 3
ACTION_UNINSTALL = 4
ACTION_START = 5
ACTION_STOP = 6
ACTION_STATUS = 7
ACTION_PUBLISH = 8
ACTION_UNPUBLISH = 9
def listing():
raise NotImplementedException()
def install():
raise NotImplementedException()
def upgrade():
raise NotImplementedException()
def uninstall():
raise NotImplementedException()
def start():
raise NotImplementedException()
def stop():
raise NotImplementedException()
def status():
raise NotImplementedException()
def publish(composefile, do_publish):
# Check if publishing is needed and attempt to publish the application
app_name = os.path.basename(os.path.dirname(filepath))
if not do_publish:
try:
publisher.get_app(app_name)
except KeyError:
do_publish = True
if do_publish:
print(f'Publishing application "{app_name}"')
package = publisher.publish_app(app_name)['package']
print(f'Application "{app_name}" compressed from {readable_size(package["size"])} to {readable_size(package["dlsize"])}')
else:
print(f'Application "{app_name}" already published, skipping publishing')
def unpublish():
raise NotImplementedException()
parser = argparse.ArgumentParser(description='SPOC application manager')
parser.set_defaults(action=None)
subparsers = parser.add_subparsers()
parser_list = subparsers.add_parser('list')
parser_list.set_defaults(action=ACTION_LIST)
parser_list.add_argument('type', choices=('installed', 'online', 'upgrades', 'published'), default='installed', const='installed', nargs='?')
parser_install = subparsers.add_parser('install')
parser_install.set_defaults(action=ACTION_INSTALL)
parser_install.add_argument('app')
parser_upgrade = subparsers.add_parser('upgrade')
parser_upgrade.set_defaults(action=ACTION_UPGRADE)
parser_upgrade.add_argument('app')
parser_uninstall = subparsers.add_parser('uninstall')
parser_uninstall.set_defaults(action=ACTION_UNINSTALL)
parser_uninstall.add_argument('app')
parser_start = subparsers.add_parser('start')
parser_start.set_defaults(action=ACTION_START)
parser_start.add_argument('app')
parser_stop = subparsers.add_parser('stop')
parser_stop.set_defaults(action=ACTION_STOP)
parser_stop.add_argument('app')
parser_status = subparsers.add_parser('status')
parser_status.set_defaults(action=ACTION_STATUS)
parser_status.add_argument('app')
parser_publish = subparsers.add_parser('publish')
parser_publish.set_defaults(action=ACTION_PUBLISH)
parser_publish.add_argument('-f', '--force', action='store_true', help='Force republish already published application')
parser_publish.add_argument('file')
parser_unpublish = subparsers.add_parser('unpublish')
parser_unpublish.set_defaults(action=ACTION_UNPUBLISH)
parser_unpublish.add_argument('app')
args = parser.parse_args()
if args.action == ACTION_LIST:
listing(args.type)
elif args.action == ACTION_INSTALL:
install(args.app)
elif args.action == ACTION_UPGRADE:
upgrade(args.app)
elif args.action == ACTION_UNINSTALL:
uninstall(args.app)
elif args.action == ACTION_START:
start(args.app)
elif args.action == ACTION_STOP:
stop(args.app)
elif args.action == ACTION_STATUS:
status(args.app)
elif args.action == ACTION_PUBLISH:
publish(args.file, args.force)
elif args.action == ACTION_UNPUBLISH:
unpublish(args.app)
else:
parser.print_usage()

169
usr/bin/spoc-container Normal file
View File

@ -0,0 +1,169 @@
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import argparse
import shlex
import os
from spoc.container import Container
from spoc.image import Image
from spoc.paths import VOLUME_DIR
ACTION_CREATE = 1
ACTION_MODIFY = 2
ACTION_DESTROY = 3
ACTION_START = 4
ACTION_STOP = 5
ACTION_STATUS = 6
ACTION_EXEC = 7
def modify_depend(container, depend):
if depend.startswith('!'):
try:
container.depends.remove(depend[1:])
except KeyError:
pass
else:
# Add the dependency and remove duplicates
container.depends.append(depend)
container.depends = list(set(container.depends))
def modify_mount(container, mount):
volume,mountpoint = mount.split(':', 1)
mountpoint = mountpoint.lstrip('/')
if mountpoint:
# If the volume doesn't exist yet, assume it will be a directory
is_dir = not os.path.isfile(os.path.join(VOLUME_DIR, volume))
container.mounts[volume] = (mountpoint, is_dir)
else:
try:
del container.mounts[volume]
except KeyError:
pass
def modify_env(container, env):
key,value = env.split('=', 1)
if value:
container.env[key] = value
else:
try:
del container.env[key]
except KeyError:
pass
def modify_container(container, depends, mounts, envs, uid, gid, cmd, cwd, ready, halt, autostart):
for depend in depends:
modify_depend(container, depend)
for mount in mounts:
modify_mount(container, mount)
for env in envs:
modify_env(container, env)
autostart = autostart == 'on'
args = locals()
for member in ('uid', 'gid', 'cmd', 'cwd', 'ready', 'halt', 'autostart'):
value = args[member]
if value:
setattr(container, member, value)
def create(container_name, image_name, depends, mounts, env, uid, gid, cmd, cwd, ready, halt, autostart):
container = Image(image_name, True).get_container(container_name)
modify_container(container, depends, mounts, env, uid, gid, cmd, cwd, ready, halt, autostart)
container.create()
def modify(container_name, depends, mounts, env, uid, gid, cmd, cwd, ready, halt, autostart):
container = Container(container_name, True)
modify_container(container, depends, mounts, env, uid, gid, cmd, cwd, ready, halt, autostart)
container.create()
def destroy(container_name):
container = Container(container_name)
container.destroy()
def start(container_name):
container = Container(container_name, True)
container.start()
def stop(container_name):
container = Container(container_name, True)
container.stop()
def status(container_name):
container = Container(container_name, True)
print(container.get_state().value)
def execute(container_name, command):
container = Container(container_name, True)
container.execute(command)
parser = argparse.ArgumentParser(description='SPOC container manager')
parser.set_defaults(action=None)
subparsers = parser.add_subparsers()
parser_create = subparsers.add_parser('create')
parser_create.set_defaults(action=ACTION_CREATE)
parser_create.add_argument('-d', '--depends', action='append', default=[], help='Add another container as a start dependency')
parser_create.add_argument('-m', '--mount', action='append', default=[], help='Add mount to the container - format volume:mountpoint')
parser_create.add_argument('-e', '--env', action='append', default=[], help='Add environment variable for the container - format KEY=value')
parser_create.add_argument('-u', '--uid', help='Sets the container init UID')
parser_create.add_argument('-g', '--gid', help='Sets the container init GID')
parser_create.add_argument('-c', '--cmd', help='Sets the container init command')
parser_create.add_argument('-w', '--workdir', help='Sets the container init working directory')
parser_create.add_argument('-r', '--ready', help='Sets the container ready command')
parser_create.add_argument('-s', '--stopsig', help='Sets the signal to be sent to init on container shutdown')
parser_create.add_argument('-a', '--autostart', choices=('on', 'off'), help='Sets the container to be automatically started after the host boots up')
parser_create.add_argument('container')
parser_create.add_argument('image')
parser_modify = subparsers.add_parser('modify')
parser_modify.set_defaults(action=ACTION_MODIFY)
parser_modify.add_argument('-d', '--depends', action='append', default=[], help='Add another container as a start dependency - prepend the name with ! to remove the dependency')
parser_modify.add_argument('-m', '--mount', action='append', default=[], help='Add mount to the container - format volume:mountpoint - specify empty mountpoint to remove the mount')
parser_modify.add_argument('-e', '--env', action='append', default=[], help='Add environment variable for the container - format KEY=value - specify empty value to remove the env')
parser_modify.add_argument('-u', '--uid', help='Sets the container init UID')
parser_modify.add_argument('-g', '--gid', help='Sets the container init GID')
parser_modify.add_argument('-c', '--cmd', help='Sets the container init command')
parser_modify.add_argument('-w', '--workdir', help='Sets the container init working directory')
parser_modify.add_argument('-r', '--ready', help='Sets the container ready command')
parser_modify.add_argument('-s', '--stopsig', help='Sets the signal to be sent to init on container shutdown')
parser_modify.add_argument('-a', '--autostart', choices=('on', 'off'), help='Sets the container to be automatically started after the host boots up')
parser_modify.add_argument('container')
parser_destroy = subparsers.add_parser('destroy')
parser_destroy.set_defaults(action=ACTION_DESTROY)
parser_destroy.add_argument('container')
parser_start = subparsers.add_parser('start')
parser_start.set_defaults(action=ACTION_START)
parser_start.add_argument('container')
parser_stop = subparsers.add_parser('uninstall')
parser_stop.set_defaults(action=ACTION_STOP)
parser_stop.add_argument('container')
parser_status = subparsers.add_parser('start')
parser_status.set_defaults(action=ACTION_STATUS)
parser_status.add_argument('container')
parser_exec = subparsers.add_parser('exec')
parser_exec.set_defaults(action=ACTION_EXEC)
parser_exec.add_argument('container')
parser_exec.add_argument('command', nargs=argparse.REMAINDER)
args = parser.parse_args()
if args.action == ACTION_CREATE:
create(args.container, args.image, args.depends, args.mount, args.env, args.uid, args.gid, args.cmd, args.workdir, args.ready, args.stopsig, args.autostart)
elif args.action == ACTION_MODIFY:
modify(args.container, args.depends, args.mount, args.env, args.uid, args.gid, args.cmd, args.workdir, args.ready, args.stopsig, args.autostart)
elif args.action == ACTION_DESTROY:
destroy(args.container)
elif args.action == ACTION_START:
start(args.container)
elif args.action == ACTION_STOP:
stop(args.container)
elif args.action == ACTION_STATUS:
status(args.container)
elif args.action == ACTION_EXEC:
execute(args.container, args.command)
else:
parser.print_usage()

16
usr/bin/spoc-hook Normal file
View File

@ -0,0 +1,16 @@
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
from spoc.container import Container
if __name__ == '__main__':
hook_type = os.environ['LXC_HOOK_TYPE']
container = Container(os.environ['LXC_NAME'], True)
if hook_type == 'pre-start':
container.clean_ephemeral_layer()
container.mount_rootfs()
elif hook_type == 'post-stop':
container.unmount_rootfs()
container.clean_ephemeral_layer()

132
usr/bin/spoc-image Normal file
View File

@ -0,0 +1,132 @@
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import argparse
from spoc import repo_local
from spoc import repo_publish
from spoc.image import Image
from spoc.utils import readable_size
ACTION_LIST = 1
ACTION_DOWNLOAD = 2
ACTION_DELETE = 3
ACTION_BUILD = 4
ACTION_PUBLISH = 5
ACTION_UNPUBLISH = 6
ACTION_EXTRACT = 7
def get_image_name(filepath):
# Read and return image name from image file
with open(filepath) as f:
for line in f:
if line.startswith('IMAGE '):
return line.split()[1]
return None
def listing(repo_type):
if repo_type == 'installed':
images = repo_local.get_images()
elif repo_type == 'online':
images = repo_online.get_images()
elif repo_type == 'published':
images = repo_publish.get_images()
for image in images:
print(image)
def download(image_name):
raise NotImplementedException() # TODO
def delete(image_name):
image = Image(image_name)
image.delete()
def build(filename, force, do_publish):
# Check if a build is needed and attempt to build the image from image file
image_name = get_image_name(filename)
image = Image(image_name)
if force or image.name not in repo_local.get_images():
image.delete()
print(f'Building image {image_name} from file {filename}')
image.build(filename)
print(f'Image {image_name} built successfully')
# If publishing was requested, force publish after successful build
force = True
else:
print(f'Image {image_name} already built, skipping build task')
if do_publish:
publish(image_name, force)
def publish(image_name, force):
# Check if publishing is needed and attempt to publish the image
image = Image(image_name, True)
if force or image.name not in repo_publish.get_images():
image.unpublish()
print(f'Publishing image {image_name}')
image.publish()
print(f'Image {image_name} compressed from {readable_size(image.size)} to {readable_size(image.dlsize)} and published successfully')
else:
print(f'Image {image_name} already published, skipping publish task')
def unpublish(image_name):
image = Image(image_name)
image.unpublish()
def extract(image_name, source, destination):
raise NotImplementedException() # TODO
parser = argparse.ArgumentParser(description='SPOC image manager')
parser.set_defaults(action=None)
subparsers = parser.add_subparsers()
parser_list = subparsers.add_parser('list')
parser_list.set_defaults(action=ACTION_LIST)
parser_list.add_argument('type', choices=('installed', 'online', 'published'), default='installed', const='installed', nargs='?')
parser_download = subparsers.add_parser('download')
parser_download.set_defaults(action=ACTION_DOWNLOAD)
parser_download.add_argument('image')
parser_delete = subparsers.add_parser('delete')
parser_delete.set_defaults(action=ACTION_DELETE)
parser_delete.add_argument('image')
parser_build = subparsers.add_parser('build')
parser_build.set_defaults(action=ACTION_BUILD)
parser_build.add_argument('-f', '--force', action='store_true', help='Force rebuild already existing image')
parser_build.add_argument('-p', '--publish', action='store_true', help='Publish the image after successful build')
parser_build.add_argument('file')
parser_publish = subparsers.add_parser('publish')
parser_publish.set_defaults(action=ACTION_PUBLISH)
parser_publish.add_argument('-f', '--force', action='store_true', help='Force republish already published image')
parser_publish.add_argument('image')
parser_unpublish = subparsers.add_parser('unpublish')
parser_unpublish.set_defaults(action=ACTION_UNPUBLISH)
parser_unpublish.add_argument('image')
parser_extract = subparsers.add_parser('extract')
parser_extract.set_defaults(action=ACTION_EXTRACT)
parser_extract.add_argument('image')
parser_extract.add_argument('source')
parser_extract.add_argument('destination')
args = parser.parse_args()
if args.action == ACTION_LIST:
listing(args.type)
elif args.action == ACTION_DOWNLOAD:
download(args.image)
elif args.action == ACTION_DELETE:
delete(args.image)
elif args.action == ACTION_BUILD:
build(args.file, args.force, args.publish)
elif args.action == ACTION_PUBLISH:
publish(args.image, args.force)
elif args.action == ACTION_UNPUBLISH:
publishreop.unpublish_image(args.image)
elif args.action == ACTION_EXTRACT:
extract(args.image, args.source, args.destination)
else:
parser.print_usage()

View File

@ -0,0 +1 @@
# -*- coding: utf-8 -*-

View File

@ -0,0 +1,6 @@
class App:
def __init__(self):
self.name - None
self.version = None
self.meta = {}
self.containers = []

View File

@ -0,0 +1,190 @@
# -*- coding: utf-8 -*-
import os
import shutil
import subprocess
import time
from . import network
from . import repo_local
from .exceptions import InvalidContainerStateError
from .paths import CONTAINERS_DIR, LAYERS_DIR, LOG_DIR, HOSTS_FILE, VOLUME_DIR
from .templates import LXC_CONTAINER_TEMPLATE
# States taken from https://github.com/lxc/lxc/blob/master/src/lxc/state.h
STATE_STOPPED = 'STOPPED'
STATE_STARTING = 'STARTING'
STATE_RUNNING = 'RUNNING'
STATE_STOPPING = 'STOPPING'
STATE_ABORTING = 'ABORTING'
STATE_FREEZING = 'FREEZING'
STATE_FROZEN = 'FROZEN'
STATE_THAWED = 'THAWED'
DEFINITION_MEMBERS = ('build', 'depends', 'layers', 'mounts', 'env', 'uid', 'gid', 'cmd', 'cwd', 'ready', 'halt', 'autostart')
class Container:
def __init__(self, name, load_from_repo=False):
self.name = name
self.build = False
self.depends = []
self.layers = []
self.mounts = {}
self.env = {}
self.uid = None
self.gid = None
self.cmd = None
self.cwd = None
self.ready = None
self.halt = None
self.autostart = False
self.container_path = os.path.join(CONTAINERS_DIR, name)
self.config_path = os.path.join(self.container_path, 'config')
self.rootfs_path = os.path.join(self.container_path, 'rootfs')
self.olwork_path = os.path.join(self.container_path, 'olwork')
self.ephemeral_layer_path = os.path.join(self.container_path, 'ephemeral')
self.log_path = os.path.join(LOG_DIR, f'{name}.log')
if load_from_repo:
self.set_definition(repo_local.get_container(name))
def set_definition(self, definition):
for key in DEFINITION_MEMBERS:
if key in definition:
setattr(self, key, definition[key])
def get_definition(self):
definition = {}
for key in DEFINITION_MEMBERS:
value = getattr(self, key)
if value:
definition[key] = value
return definition
def get_state(self):
# Get current state of the container, uses LXC monitor socket accessible only in ocntainer's namespace
state = subprocess.run(['lxc-info', '-sH', '-P', CONTAINERS_DIR, self.name], capture_output=True, check=True)
return state.stdout.strip().decode()
def await_state(self, awaited_state, timeout=30):
# Block execution until the container reaches the desired state or until timeout
subprocess.run(['lxc-wait', '-P', CONTAINERS_DIR, '-s', awaited_state, '-t', timeout, self.name], check=True)
def mount_rootfs(self):
# Prepares container rootfs
# Called in lxc.hook.pre-start as the standard mount options are insufficient for rootless containers (see notes for overlayfs below)
layers = [os.path.join(LAYERS_DIR, layer) for layer in self.layers]
if len(layers) > 1:
# Multiple layers require overlayfs, however non-root users don't normally have capability to create overlayfs mounts - https://www.spinics.net/lists/linux-fsdevel/msg105877.html
# Standard linux kernels currently doesn't support overlay mounts in user namespaces (lxc.hook.pre-mount)
# The exception is Ubuntu or custom patches such as https://salsa.debian.org/kernel-team/linux/blob/master/debian/patches/debian/overlayfs-permit-mounts-in-userns.patch
# Possible alternative is fuse-overlayfs, which doesn't work well on Alpine (and it's FUSE anyway, so it needs an extra service and a process for each mount)
# Another alternative would be to mount in the namespace via -N option, but LXC doesn't expose PID or namespaces of the process during container setup
overlay_opts = f'upperdir={layers[-1]},lowerdir={":".join(reversed(layers[:-1]))},workdir={self.olwork_path}'
subprocess.run(['mount', '-t', 'overlay', '-o', overlay_opts, 'none', self.rootfs_path])
else:
# We only have a single layer, no overlay needed
subprocess.run(['mount', '--bind', layers[0], self.rootfs_path])
def unmount_rootfs(self):
# Recursively unmounts container rootfs
# Called in lxc.hook.post-stop
# For unprivileged containers it could theoretically be called already in lxc.hook.start-host, as the user namespace clones the mounts,
# so they are not needed in the parent namespace anymore, but removing rootfs on container stop seems more intuitive
subprocess.run(['umount', '-R', self.rootfs_path], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def clean_ephemeral_layer(self):
# Cleans container ephemeral layer. Called in lxc.hook.post-stop and lxc.hook.pre-start in case of unclean shutdown
# This is done early in the container start process, so the inode of the ephemeral directory must remain unchanged
for item in os.scandir(self.ephemeral_layer_path):
shutil.rmtree(item.path) if item.is_dir() else os.unlink(item.path)
def create(self):
# Create container directories
os.makedirs(self.rootfs_path, 0o755, True)
os.makedirs(self.olwork_path, 0o755, True)
os.makedirs(self.ephemeral_layer_path, 0o755, True)
os.makedirs(LOG_DIR, 0o750, True)
# Change UID/GID of the ephemeral layer directory
# Chown is possible only when the process is running as root, for user namespaces, see https://linuxcontainers.org/lxc/manpages/man1/lxc-usernsexec.1.html
os.chown(self.ephemeral_layer_path, 100000, 100000)
# Create container configuration file based on the container definition
layers = [os.path.join(LAYERS_DIR, layer) for layer in self.layers]
if not self.build:
# Add ephemeral layer if the container is not created as part of build process
layers.append(self.ephemeral_layer_path)
layers = ','.join(layers)
mounts = '\n'.join([f'lxc.mount.entry = {os.path.join(VOLUME_DIR, v)} {m[0]} none bind,create={"dir" if m[1] else "file"} 0 0' for v,m in self.mounts.items()])
env = '\n'.join([f'lxc.environment = {k}={v}' for k,v in self.env.items()])
uid = self.uid if self.uid else 0
gid = self.gid if self.gid else 0
cmd = self.cmd if self.cmd else '/sbin/init'
cwd = self.cwd if self.cwd else '/'
halt = self.halt if self.halt else 'SIGINT'
ip_address, ip_netmask, ip_gateway = network.request_ip(self.name)
# Write LXC configuration file
with open(self.config_path, 'w') as f:
f.write(LXC_CONTAINER_TEMPLATE.format(name=self.name, ip_address=ip_address, ip_netmask=ip_netmask, ip_gateway=ip_gateway,
rootfs=self.rootfs_path, hosts=HOSTS_FILE, mounts=mounts, env=env,
uid=uid, gid=gid, cmd=cmd, cwd=cwd, halt=halt, log=self.log_path))
repo_local.register_container(self.name, self.get_definition())
def destroy(self):
repo_local.unregister_container(self.name)
self.unmount_rootfs()
try:
shutil.rmtree(self.container_path)
except FileNotFoundError:
pass
try:
os.unlink(self.log_path)
except FileNotFoundError:
pass
# Release the IP address from global hosts configuration
network.release_ip(self.name)
def start(self):
# Start the container, wait until it is reported as started and execute application readiness check
subprocess.Popen(['lxc-start', '-P', CONTAINERS_DIR, self.name])
self.await_state(STATE_RUNNING)
# Launch the readiness check in a separate thread, so it can be reliably cancelled after timeout
with ThreadPoolExecutor(max_workers=1) as pool:
# Create anonymous object to pass the task cancellation information
guard = type('', (object,), {'cancel': False})()
future = pool.submit(self.check_readiness, guard)
future.result(timeout=30)
guard.cancel = True
def check_readiness(self, guard):
# Run spoc.init.ready until it returns return code 0 or the guard cancels the loop
ready_cmd = shlex.split(self.ready) if self.ready else ['/bin/true']
while not guard.cancel:
state = self.get_state()
if state != STATE_RUNNING:
raise InvalidContainerStateError(self.name, state)
check = subprocess.run(['lxc-attach', '-P', CONTAINERS_DIR, '--clear-env', self.name, '--']+ready_cmd, timeout=30)
if check.returncode == 0:
break
time.sleep(0.25)
def stop(self):
# Stop the container and wait until it stops completely
subprocess.Popen(['lxc-stop', '-P', CONTAINERS_DIR, self.name])
self.await_state(STATE_STOPPED)
def execute(self, cmd, check=False):
# TODO: Allow to pass UID/GID
# If the container is starting or stopping, wait until the operation is finished
state = self.get_state()
if state == STATE_STARTING:
self.await_state(STATE_RUNNING)
state = self.get_state()
elif state == STATE_STOPPING:
self.await_state(STATE_STOPPED)
state = self.get_state()
# If the container is stopped, use lxc-execute, otherwise use lxc-attach
if state == STATE_STOPPED:
return subprocess.run(['lxc-execute', '-P', CONTAINERS_DIR, self.name, '--']+cmd, check=check)
elif state == STATE_RUNNING:
return subprocess.run(['lxc-attach', '-P', CONTAINERS_DIR, '--clear-env', self.name, '--']+cmd, check=check)
else:
raise InvalidContainerStateError(self.name, state)

View File

@ -0,0 +1,27 @@
# -*- coding: utf-8 -*-
import hashlib
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.serialization import load_pem_private_key
def sign_file(private_key_path, input_path):
# Generate SHA512 signature of a file using EC private key
with open(private_key_path, 'rb') as f:
priv_key = load_pem_private_key(f.read(), None, default_backend())
with open(input_path, 'rb') as f:
data = f.read()
return priv_key.sign(data, ec.ECDSA(hashes.SHA512()))
def hash_file(file_path):
# Calculate SHA512 hash of a file
sha512 = hashlib.sha512()
with open(file_path, 'rb') as f:
while True:
data = f.read(65536)
if not data:
break
sha512.update(data)
return sha512.hexdigest()

View File

@ -0,0 +1,72 @@
# -*- coding: utf-8 -*-
class CircularDependencyError(Exception):
pass
class Node:
def __init__(self, name, depends):
self.name = name
self.depends = set(depends)
# "Batches" are sets of tasks that can be run together
def solve_batches(nodes):
# Build a map of node names to node instances
name_to_instance = {n.name: n for n in nodes}
# Build a map of node names to dependency names
name_to_deps = {n.name: n.depends.copy() for n in nodes}
# This is where we'll store the batches
batches = []
# While there are dependencies to solve...
while name_to_deps:
# Get all nodes with no dependencies
ready = {name for name, deps in name_to_deps.items() if not deps}
# If there aren't any, we have a loop in the graph
if not ready:
raise CircularDependencyError(name_to_deps)
# Remove them from the dependency graph
for name in ready:
del name_to_deps[name]
for deps in name_to_deps.values():
deps.difference_update(ready)
# Add the batch to the list
batches.append(ready)
return batches
def solve_flat(nodes):
batches = solve_batches(nodes)
return [i for b in batches for i in b]
a = Node("a", [])
b = Node("b", [])
c = Node("c", ["a"])
d = Node("d", ["b"])
e = Node("e", ["c", "d"])
f = Node("f", ["a", "b"])
g = Node("g", ["e", "f"])
h = Node("h", ["g"])
i = Node("i", ["a"])
j = Node("j", ["b"])
k = Node("k", [])
nodes = (a, b, c, d, e, f, g, h, i, j, k)
# Show the batches on screen
print "Batches:"
for bundle in get_task_batches(nodes):
print ", ".join(node.name for node in bundle)
print
# An example, *broken* dependency graph
a = Task("a", "i")
nodes = (a, b, c, d, e, f, g, h, i, j)
# Show it on screen
print "A broken dependency graph example:"
print format_nodes(nodes)
print
# This should raise an exception and show the current state of the graph
print "Trying to resolve the dependencies will raise an exception:"
print
get_task_batches(nodes)

View File

@ -0,0 +1,31 @@
# -*- coding: utf-8 -*-
class AppNotFoundError(Exception):
def __init__(self, name):
self.name = name
def __str__(self):
return f'Application {self.name} not found'
class ContainerNotFoundError(Exception):
def __init__(self, name):
self.name = name
def __str__(self):
return f'Container {self.name} not found'
class ImageNotFoundError(Exception):
def __init__(self, name):
self.name = name
def __str__(self):
return f'Image {self.name} not found'
class InvalidContainerStateError(Exception):
# Container is not in expected state (running, stopped etc.)
def __init__(self, container_name, container_state):
self.container_name = container_name
self.container_state = container_state
def __str__(self):
return f'Container "{self.container_name}" reached unexpected state {self.container_state}'

View File

@ -0,0 +1,19 @@
# -*- coding: utf-8 -*-
import fcntl
from contextlib import contextmanager
def locked_ex(lock_file):
def decorator(target):
def wrapper(*args, **kwargs):
with lock_ex(lock_file):
return target(*args, **kwargs)
return wrapper
return decorator
@contextmanager
def lock_ex(lock_file):
with open(lock_file, 'w') as lock:
fcntl.lockf(lock, fcntl.LOCK_EX)
yield lock

View File

@ -0,0 +1,90 @@
# -*- coding: utf-8 -*-
import os
import shutil
import tarfile
from . import crypto
from . import repo_local
from . import repo_publish
from . import utils
from .container import Container
from .imagebuilder import ImageBuilder
from .paths import LAYERS_DIR, PUB_LAYERS_DIR
DEFINITION_MEMBERS = ('parent', 'env', 'uid', 'gid', 'cmd', 'cwd', 'ready', 'halt', 'size', 'dlsize', 'hash')
class Image:
def __init__(self, name, load_from_repo=False):
self.name = name
self.layer_path = os.path.join(LAYERS_DIR, name)
self.archive_path = os.path.join(PUB_LAYERS_DIR, f'{name}.tar.xz')
self.parent = None
self.env = {}
self.uid = None
self.gid = None
self.cmd = None
self.cwd = None
self.ready = None
self.halt = None
self.size = None
self.dlsize = None
self.hash = None
if load_from_repo:
self.set_definition(repo_local.get_image(name))
def set_definition(self, definition):
for key in DEFINITION_MEMBERS:
if key in definition:
setattr(self, key, definition[key])
def get_definition(self):
definition = {}
for key in DEFINITION_MEMBERS:
value = getattr(self, key)
if value:
definition[key] = value
return definition
def get_container(self, container_name):
container = Image(self.parent, True).get_container(container_name) if self.parent else Container(container_name)
container.layers.append(self.name)
for key,value in self.env.items():
container.env[key] = value
for member in ('uid', 'gid', 'cmd', 'cwd', 'ready', 'halt'):
value = getattr(self, member)
if value:
setattr(container, member, value)
return container
def build(self, filename):
# Build the container from image file and save to local repository
# Chown is possible only when the process is running as root, for user namespaces, see https://linuxcontainers.org/lxc/manpages/man1/lxc-usernsexec.1.html
os.makedirs(self.layer_path, 0o755, True)
os.chown(self.layer_path, 100000, 100000)
ImageBuilder().build(self, filename)
repo_local.register_image(self.name, self.get_definition())
def delete(self):
repo_local.unregister_image(self.name)
try:
shutil.rmtree(self.layer_path)
except FileNotFoundError:
pass
def publish(self):
ctr = utils.TarSizeCounter()
os.makedirs(PUB_LAYERS_DIR, 0o755, True)
with tarfile.open(self.archive_path, 'w:xz') as tar:
tar.add(self.layer_path, self.name, filter=ctr.add_file)
self.size = ctr.size
self.dlsize = os.path.getsize(self.archive_path)
self.hash = crypto.hash_file(self.archive_path)
repo_publish.register_image(self.name, self.get_definition())
def unpublish(self):
repo_publish.unregister_image(self.name)
try:
os.unlink(self.archive_path)
except FileNotFoundError:
pass

View File

@ -0,0 +1,136 @@
# -*- coding: utf-8 -*-
import os
import shutil
import stat
import subprocess
import tempfile
from .container import Container
from .paths import VOLUME_DIR
class ImageBuilder:
def build(self, image, filename):
# Reset internal state, read and process lines from filename
self.image = image
self.filename = filename
self.script_eof = None
self.script_lines = []
with open(filename, 'r') as f:
for line in f:
self.process_line(line.strip())
def process_line(self, line):
# Parse a line from image file
if self.script_eof:
if line == self.script_eof:
self.script_eof = None
self.run_script(self.script_lines)
else:
self.script_lines.append(line)
elif line:
self.process_directive(*line.split(None, 1))
def process_directive(self, directive, args):
# Process a directive from image file
if 'RUN' == directive:
self.script_lines = []
self.script_eof = args
elif 'FROM' == directive:
# Set the name of image from which this one inherits
self.image.parent = args
elif 'COPY' == directive:
srcdst = args.split()
self.copy_files(srcdst[0], srcdst[1] if len(srcdst) > 1 else '')
elif 'ENV' == directive:
# Sets environment records
self.image.env.append(args.split(None, 1))
elif 'USER' == directive:
# Sets init UID / GID
self.image.uid,self.image.gid = args.split() #TODO: Get UID/GID by name + get GIT automatically as primary UID group
elif 'CMD' == directive:
# Sets init command
self.image.cmd = args
elif 'WORKDIR' == directive:
# Sets init working directory
self.image.cwd = args
elif 'HALT' == directive:
# Sets signal to be sent to init when stopping the container
self.image.halt = args
elif 'READY' == directive:
# Sets a command to check readiness of the container after it has been started
self.image.ready = args
def run_script(self, script_lines):
# Creates a temporary container, runs a script in its namespace, and stores the files modified by it as part of the layer
# TODO: Run the script as the correct user if UID/GID has been already set - doesn't this the LXC init do automatically?
os.makedirs(VOLUME_DIR, 0o755, True)
script_fd, script_path = tempfile.mkstemp(suffix='.sh', dir=VOLUME_DIR, text=True)
script_name = os.path.basename(script_path)
script_lines = '\n'.join(script_lines)
with os.fdopen(script_fd, 'w') as script:
script.write(f'#!/bin/sh\nset -ev\n\n{script_lines}\n')
os.chmod(script_path, 0o700)
os.chown(script_path, 100000, 100000)
# Get the current image container definition and add the script file as a mount
container = self.image.get_container(self.image.name)
container.is_build = True
container.mounts[script_name] = (os.path.basename(script_path), False)
# Create a temporary container and run the script in it
container.create()
container.execute(['/bin/sh', '-lc', os.path.join('/', script_name)], True)
container.destroy()
os.unlink(script_path)
def copy_files(self, src, dst):
# Copy files from the host or download them from a http(s) URL
dst = os.path.join(self.image.layer_path, dst.lstrip('/'))
if src.startswith('http://') or src.startswith('https://'):
unpack_http_archive(src, dst)
else:
src = os.path.join(os.path.dirname(self.filename), src)
copy_tree(src, dst)
# Shift UID/GID of the files to the unprivileged range
shift_uid(dst, os.stat(dst, follow_symlinks=False))
def unpack_http_archive(src, dst):
# Decompress an archive downloaded via http(s)
# TODO: Rewrite to python (requests, tarfile)
xf = 'xzf'
if src.endswith('.bz2'):
xf = 'xjf'
elif src.endswith('.xz'):
xf = 'xJf'
with subprocess.Popen(['wget', src, '-O', '-'], stdout=subprocess.PIPE) as wget:
with subprocess.Popen(['tar', xf, '-', '-C', dst], stdin=wget.stdout) as tar:
wget.stdout.close()
tar.wait()
def copy_tree(src, dst):
# Copies files from the host
if not os.path.isdir(src):
shutil.copy2(src, dst)
else:
os.makedirs(dst, exist_ok=True)
for name in os.listdir(src):
copy_tree(os.path.join(src, name), os.path.join(dst, name))
shutil.copystat(src, dst)
def shift_uid(path, path_stat):
# Shifts UID/GID of a file or a directory and its contents to the unprivileged range
# The function parameters could arguably be more friendly, but os.scandir() already calls stat() on the entires,
# so it would be wasteful to not reuse them for considerable performance gain
uid = path_stat.st_uid
gid = path_stat.st_gid
do_chown = False
if uid < 100000:
uid = uid + 100000
do_chown = True
if gid < 100000:
gid = gid + 100000
do_chown = True
if do_chown:
os.chown(path, uid, gid, follow_symlinks=False)
if stat.S_ISDIR(path_stat.st_mode):
for entry in os.scandir(path):
shift_uid(entry.path, entry.stat(follow_symlinks=False))

View File

@ -0,0 +1,58 @@
# -*- coding: utf-8 -*-
import fcntl
import ipaddress
import socket
import struct
from .paths import HOSTS_FILE
INTERFACE_NAME = 'spocbr0'
# ioctl magic constants taken from https://git.musl-libc.org/cgit/musl/tree/include/sys/ioctl.h (same as glibc)
IOCTL_SIOCGIFADDR = 0x8915
IOCTL_SIOCGIFNETMASK = 0x891b
def read_leases():
# Read and parse all IP-hostname pairs from the global hosts file
try:
with open(HOSTS_FILE, 'r') as f:
leases = [lease.strip().split(' ', 1) for lease in f]
return {ip: hostname for ip, hostname in leases}
except:
interface = get_bridge_interface()
return {str(interface.ip): 'host'}
def write_leases(leases):
# write all IP-hostname pairs to the global hosts file
with open(HOSTS_FILE, 'w') as f:
for ip, hostname in sorted(leases.items(), key=lambda lease: socket.inet_aton(lease[0])):
f.write(f'{ip} {hostname}\n')
def get_bridge_interface():
# Returns bridge interface's IP address and netmask
with socket.socket(socket.AF_INET) as sock:
# Get IPv4Interface for given interface name
packed_ifname = struct.pack('256s', INTERFACE_NAME.encode())
ip = socket.inet_ntoa(fcntl.ioctl(sock.fileno(), IOCTL_SIOCGIFADDR, packed_ifname)[20:24])
netmask = socket.inet_ntoa(fcntl.ioctl(sock.fileno(), IOCTL_SIOCGIFNETMASK, packed_ifname)[20:24])
return ipaddress.IPv4Interface(f'{ip}/{netmask}')
def request_ip(container_name):
# Find if and IP hasn't been leased for the hostname
interface = get_bridge_interface()
leases = read_leases()
for ip in leases:
if leases[ip] == container_name:
return (ip, str(interface.network.prefixlen), str(interface.ip))
# If not, get the first unassigned IP from the interface's network
for ip in interface.network.hosts():
ip = str(ip)
if ip not in leases:
leases[ip] = container_name
write_leases(leases)
return (ip, str(interface.network.prefixlen), str(interface.ip))
def release_ip(container_name):
# Delete the lease from hosts file
leases = {ip: h for ip, h in read_leases().items() if h != container_name}
write_leases(leases)

View File

@ -0,0 +1,19 @@
# -*- coding: utf-8 -*-
ROOT_DIR = '/var/lib/spoc'
CONTAINERS_DIR = '/var/lib/spoc/containers'
LAYERS_DIR = '/var/lib/spoc/layers'
VOLUME_DIR = '/var/lib/spoc/volumes'
HOSTS_FILE = '/var/lib/spoc/hosts'
REPO_FILE = '/var/lib/spoc/repository.json'
REPO_LOCK = '/run/lock/spoc-repository.lock'
LOG_DIR = '/var/log/spoc'
PUB_ROOT_DIR = '/srv/build/spoc'
PUB_LAYERS_DIR = '/srv/build/spoc/layers'
PUB_APPS_DIR = '/srv/build/spoc/apps'
PUB_REPO_FILE = '/srv/build/spoc/repository.json'
PUB_SIG_FILE = '/srv/build/spoc/repository.sig'
PUB_REPO_LOCK = '/run/lock/spoc-publish.lock'
PUB_PRIVATE_KEY = '/etc/spoc/publish.key'

View File

@ -0,0 +1,90 @@
# -*- coding: utf-8 -*-
import json
from .exceptions import AppNotFoundError, ContainerNotFoundError, ImageNotFoundError
from .flock import lock_ex
from .paths import REPO_FILE, REPO_LOCK
TYPE_APP = 'apps'
TYPE_CONTAINER = 'containers'
TYPE_IMAGE = 'images'
def load():
try:
with open(REPO_FILE) as f:
return json.load(f)
except FileNotFoundError:
return {TYPE_IMAGE: {}, TYPE_CONTAINER: {}, TYPE_APP: {}}
def save(data):
with open(REPO_FILE, 'w') as f:
json.dump(data, f, sort_keys=True, indent=4)
def get_entries(entry_type):
with lock_ex(REPO_LOCK):
data = load()
return data[entry_type]
def get_entry(entry_type, name):
return get_entries(entry_type)[name]
def add_entry(entry_type, name, definition):
with lock_ex(REPO_LOCK):
data = load()
data[entry_type][name] = definition
save(data)
def delete_entry(entry_type, name):
with lock_ex(REPO_LOCK):
data = load()
try:
del data[entry_type][name]
save(data)
except KeyError:
pass
def get_images():
return get_entries(TYPE_IMAGE)
def get_image(image_name):
try:
return get_entry(TYPE_IMAGE, image_name)
except KeyError as e:
raise ImageNotFoundError(image_name) from e
def register_image(image_name, definition):
add_entry(TYPE_IMAGE, image_name, definition)
def unregister_image(image_name):
delete_entry(TYPE_IMAGE, image_name)
def get_containers():
return get_entries(TYPE_CONTAINER)
def get_container(container_name):
try:
return get_entry(TYPE_CONTAINER, container_name)
except KeyError as e:
raise ContainerNotFoundError(container_name) from e
def register_container(container_name, definition):
add_entry(TYPE_CONTAINER, container_name, definition)
def unregister_container(container_name):
delete_entry(TYPE_CONTAINER, container_name)
def get_apps():
return get_entries(TYPE_APP)
def get_app(app_name):
try:
return get_entry(TYPE_APP, image_name)
except KeyError as e:
raise ImageNotFoundError(image_name) from e
def register_app(app_name, definition):
add_entry(TYPE_APP, image_name, definition)
def unregister_app(app_name):
delete_entry(TYPE_APP, image_name)

View File

@ -0,0 +1,38 @@
# -*- coding: utf-8 -*-
import json
import requests
from . import crypto
from .exceptions import AppNotFoundError, ImageNotFoundError
TYPE_APP = 'apps'
TYPE_IMAGE = 'images'
def load():
raise NotImplementedError()
def get_entries(entry_type):
data = load()
return data[entry_type]
def get_entry(entry_type, name):
return get_entries(entry_type)[name]
def get_images():
return get_entries(TYPE_IMAGE)
def get_image(image_name):
try:
return get_entry(TYPE_IMAGE, image_name)
except KeyError as e:
raise ImageNotFoundError(image_name) from e
def get_apps():
return get_entries(TYPE_APP)
def get_app(app_name):
try:
return get_entry(TYPE_APP, image_name)
except KeyError as e:
raise ImageNotFoundError(image_name) from e

View File

@ -0,0 +1,79 @@
# -*- coding: utf-8 -*-
import json
from . import crypto
from .exceptions import AppNotFoundError, ImageNotFoundError
from .flock import lock_ex
from .paths import PUB_PRIVATE_KEY, PUB_REPO_FILE, PUB_REPO_LOCK, PUB_SIG_FILE
TYPE_APP = 'apps'
TYPE_IMAGE = 'images'
def load():
try:
with open(PUB_REPO_FILE) as f:
return json.load(f)
except FileNotFoundError:
return {TYPE_IMAGE: {}, TYPE_APP: {}}
def save(data):
with open(PUB_REPO_FILE, 'w') as f:
json.dump(data, f, sort_keys=True, indent=4)
# Cryptographically sign the repository file
signature = crypto.sign_file(PUB_PRIVATE_KEY, PUB_REPO_FILE)
with open(PUB_SIG_FILE, 'wb') as f:
f.write(signature)
def get_entries(entry_type):
with lock_ex(PUB_REPO_LOCK):
data = load()
return data[entry_type]
def get_entry(entry_type, name):
return get_entries(entry_type)[name]
def add_entry(entry_type, name, definition):
with lock_ex(PUB_REPO_LOCK):
data = load()
data[entry_type][name] = definition
save(data)
def delete_entry(entry_type, name):
with lock_ex(PUB_REPO_LOCK):
data = load()
try:
del data[entry_type][name]
save(data)
except KeyError:
pass
def get_images():
return get_entries(TYPE_IMAGE)
def get_image(image_name):
try:
return get_entry(TYPE_IMAGE, image_name)
except KeyError as e:
raise ImageNotFoundError(image_name) from e
def register_image(image_name, definition):
add_entry(TYPE_IMAGE, image_name, definition)
def unregister_image(image_name):
delete_entry(TYPE_IMAGE, image_name)
def get_apps():
return get_entries(TYPE_APP)
def get_app(app_name):
try:
return get_entry(TYPE_APP, image_name)
except KeyError as e:
raise ImageNotFoundError(image_name) from e
def register_app(app_name, definition):
add_entry(TYPE_APP, image_name, definition)
def unregister_app(app_name):
delete_entry(TYPE_APP, image_name)

View File

@ -0,0 +1,52 @@
# -*- coding: utf-8 -*-
LXC_CONTAINER_TEMPLATE = '''# Container name
lxc.uts.name = {name}
# Network
lxc.net.0.type = veth
lxc.net.0.link = spocbr0
lxc.net.0.flags = up
lxc.net.0.ipv4.address = {ip_address}/{ip_netmask}
lxc.net.0.ipv4.gateway = {ip_gateway}
# Root filesystem
lxc.rootfs.path = {rootfs}
# Mounts
lxc.mount.entry = shm dev/shm tmpfs rw,nodev,noexec,nosuid,relatime,mode=1777,create=dir 0 0
lxc.mount.entry = /etc/resolv.conf etc/resolv.conf none bind,ro,create=file 0 0
lxc.mount.entry = {hosts} etc/hosts none bind,ro,create=file 0 0
{mounts}
# Environment
lxc.environment = PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
{env}
# Init
lxc.init.uid = {uid}
lxc.init.gid = {gid}
lxc.init.cwd = {cwd}
lxc.init.cmd = {cmd}
# Halt
lxc.signal.halt = {halt}
# Log
lxc.console.size = 1MB
lxc.console.logfile = {log}
# ID map
lxc.idmap = u 0 100000 65536
lxc.idmap = g 0 100000 65536
# Hooks
lxc.hook.version = 1
lxc.hook.pre-start = /usr/bin/spoc-hook
lxc.hook.post-stop = /usr/bin/spoc-hook
# Other
lxc.arch = linux64
lxc.include = /usr/share/lxc/config/common.conf
lxc.include = /usr/share/lxc/config/userns.conf
'''

View File

@ -0,0 +1,18 @@
# -*- coding: utf-8 -*-
class TarSizeCounter:
def __init__(self):
self.size = 0
def add_file(self, tarinfo):
self.size += tarinfo.size
return tarinfo
SIZE_PREFIXES = ('', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
def readable_size(bytes):
i = 0
while bytes > 1024:
i += 1
bytes /= 1024
return f'{bytes:.2f} {SIZE_PREFIXES[i]}B'