Compare commits

...

13 Commits

46 changed files with 2383 additions and 2111 deletions

5
.gitignore vendored Normal file
View File

@ -0,0 +1,5 @@
__pycache__/
.coverage
.vscode/
*.egg-info/
coverage.xml

View File

@ -1,21 +0,0 @@
# Contributor: Disassembler <disassembler@dasm.cz>
# Maintainer: Disassembler <disassembler@dasm.cz>
pkgname=spoc
pkgver=0.9.3
pkgrel=0
pkgdesc="SPOC application, container, and image manager"
url="https://spotter.vm/"
arch="noarch"
license="GPL"
depends="lxc python3 py3-cffi py3-cryptography py3-requests"
options="!check !strip"
build() {
return 0
}
package() {
mkdir -p ${pkgdir}
cp -rp etc ${pkgdir}
cp -rp usr ${pkgdir}
}

View File

@ -1,16 +0,0 @@
#!/sbin/openrc-run
description="SPOC"
depend() {
need localmount sysfs cgroups
after firewall net
}
start() {
/usr/bin/spoc-app start-autostarted
}
stop() {
/usr/bin/spoc-app stop-all
}

View File

@ -1,13 +0,0 @@
[general]
data-dir = /var/lib/spoc/
log-dir = /var/log/spoc/
network-interface = spocbr0
resolv-conf = /etc/resolv.conf
[publish]
publish-dir = /srv/build/spoc/
signing-key = /etc/spoc/publish.key
[repo]
url = https://repo.spotter.cz/spoc/
public-key = MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEWJXH4Qm0kt2L86sntQH+C1zOJNQ0qMRt0vx4krTxRs9HQTQYAy//JC92ea2aKleA8OL0JF90b1NYXcQCWdAS+vE/ng9IEAii8C2+5nfuFeZ5YUjbQhfFblwHSM0c7hEG

60
setup.cfg Normal file
View File

@ -0,0 +1,60 @@
[metadata]
name = spoc
version = 2.0.0
license = GPLv3+
author = Disassembler
author_email = disassembler@dasm.cz
description = SPOC application and container manager. A simple orchestrator for podman.
classifiers =
Development Status :: 5 - Production/Stable
Environment :: Console
Intended Audience :: System Administrators
License :: OSI Approved :: GNU General Public License v3 or later
Operating System :: POSIX
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
Topic :: System :: Installation/Setup
Topic :: System :: Systems Administration
[options]
packages = find:
package_dir = =src
py_modules = spoc_cli
python_requires = >= 3.6
install_requires = requests
[options.packages.find]
where = src
[options.entry_points]
console_scripts =
spoc = spoc_cli:main
[tool:pytest]
testpaths = tests
[coverage:run]
branch = True
omit =
*/dist-packages/*
*/site-packages/*
[pylint.BASIC]
good-names = e,ex,f,_
[pylint.'MESSAGES CONTROL']
disable = missing-docstring
[tox:tox]
[testenv:{pylint,pytest}]
skipsdist = True
usedevelop = True
deps =
pylint
pytest-cov
commands =
pytest: pytest -vv --cov src --cov tests --cov-report term --cov-report xml --cov-fail-under 100 {posargs}
pylint: pylint src tests {posargs}

6
setup.py Normal file
View File

@ -0,0 +1,6 @@
# This file is intended to be used only by PEP 517 incompatible build frontends
# Metadata are in setup.cfg
from setuptools import setup
setup()

104
src/spoc/__init__.py Normal file
View File

@ -0,0 +1,104 @@
from pkg_resources import parse_version
from . import app
from . import autostart
from . import config
from . import podman
from . import repo
from .flock import locked
class AppError(Exception):
def __init__(self, app_name):
super().__init__(app_name)
self.app_name = app_name
class AppAlreadyInstalledError(AppError):
pass
class AppNotInstalledError(AppError):
pass
class AppNotInRepoError(AppError):
pass
class AppNotUpdateableError(AppError):
pass
def list_installed():
return dict(sorted(podman.get_apps().items()))
def list_online():
return {app:definition['version'] for app,definition in sorted(repo.get_apps().items())}
def list_updates():
online_apps = {app:definition['version'] for app,definition in repo.get_apps().items()}
apps = {app:f'{version} -> {online_apps[app]}' for app,version in podman.get_apps().items()
if app in online_apps
and parse_version(online_apps[app]) > parse_version(version)}
return dict(sorted(apps.items()))
@locked()
def install(app_name, from_file=None):
if app_name in podman.get_apps():
raise AppAlreadyInstalledError(app_name)
if not from_file and app_name not in repo.get_apps():
raise AppNotInRepoError(app_name)
app.install(app_name, from_file=from_file)
@locked()
def update(app_name, from_file=None):
if app_name not in podman.get_apps():
raise AppNotInstalledError(app_name)
if not from_file and app_name not in list_updates():
raise AppNotUpdateableError(app_name)
app.update(app_name, from_file=from_file)
@locked()
def uninstall(app_name):
if app_name not in podman.get_apps():
raise AppNotInstalledError(app_name)
app.uninstall(app_name)
@locked()
def start(app_name):
if app_name not in podman.get_apps():
raise AppNotInstalledError(app_name)
podman.start_pod(app_name)
@locked()
def stop(app_name):
if app_name not in podman.get_apps():
raise AppNotInstalledError(app_name)
podman.stop_pod(app_name)
def status(app_name=None):
if app_name is not None and app_name not in podman.get_apps():
raise AppNotInstalledError(app_name)
return podman.get_pod_status(app_name)
@locked()
def set_autostart(app_name, enabled):
if app_name not in podman.get_apps():
raise AppNotInstalledError(app_name)
autostart.set_app(app_name, enabled)
@locked()
def start_autostarted():
for app_name in autostart.get_apps():
podman.start_pod(app_name)
@locked()
def stop_all():
for app_name in podman.get_apps():
podman.stop_pod(app_name)
@locked()
def login(host, username, password):
config.write_auth(host, username, password)
repo.load(force=True)
@locked()
def prune():
podman.prune()

141
src/spoc/app.py Normal file
View File

@ -0,0 +1,141 @@
import os
import json
from . import autostart
from . import config
from . import depsolver
from . import podman
from . import repo
class App:
def __init__(self, app_name):
self.app_name = app_name
self.env_file = os.path.join(config.DATA_DIR, f'{app_name}.env')
def get_definition(self, from_file=None):
if from_file:
with open(from_file, encoding='utf-8') as f:
return json.load(f)
return repo.get_apps()[self.app_name]
def install(self, is_update=False, from_file=None):
definition = self.get_definition(from_file)
version = definition['version']
containers = definition['containers']
# Create volumes
volumes = set()
for container in containers.values():
volumes |= set(container.get('volumes', {}))
existing_volumes = self.get_existing_volumes()
if is_update:
# Remove volumes no longer referenced by the containers
volumes_to_remove = existing_volumes - volumes
volumes -= existing_volumes
else:
# If this is a clean install, remove all volumes with the app label
volumes_to_remove = existing_volumes
self.remove_volumes(volumes_to_remove)
self.create_volumes(volumes)
# Create env file
envs = definition.get('environment', {})
if is_update:
# Keep old values on update
for key,value in self.read_env_vars().items():
if key in envs:
envs[key] = value
self.write_env_vars(envs)
# Create pod and containers
self.create_pod(version)
self.create_containers(containers)
def update(self, from_file=None):
self.install(is_update=True, from_file=from_file)
def uninstall(self):
autostart.set_app(self.app_name, False)
self.remove_pod()
self.remove_env_vars()
self.remove_volumes(self.get_existing_volumes())
def create_pod(self, version):
podman.remove_pod(self.app_name)
podman.create_pod(self.app_name, version)
def remove_pod(self):
podman.remove_pod(self.app_name)
def read_env_vars(self):
env_vars = {}
try:
with open(self.env_file, encoding='utf-8') as f:
lines = f.read().splitlines()
for line in lines:
key,value = line.split('=', 1)
env_vars[key] = value
except FileNotFoundError:
pass
return env_vars
def write_env_vars(self, env_vars):
os.makedirs(config.DATA_DIR, exist_ok=True)
with open(self.env_file, 'w', encoding='utf-8') as f:
for key,value in env_vars.items():
f.write(f'{key}={value}\n')
def remove_env_vars(self):
try:
os.unlink(self.env_file)
except FileNotFoundError:
pass
def get_existing_volumes(self):
existing_volumes = podman.get_volumes_for_app(self.app_name)
strip_len = len(self.app_name)+1
return set(volume[strip_len:] for volume in existing_volumes)
def create_volumes(self, volumes):
for volume in volumes:
self.create_volume(volume)
def remove_volumes(self, volumes):
for volume in volumes:
self.remove_volume(volume)
def create_volume(self, volume):
volume = f'{self.app_name}-{volume}'
podman.create_volume(self.app_name, volume)
def remove_volume(self, volume):
volume = f'{self.app_name}-{volume}'
podman.remove_volume(volume)
def create_containers(self, containers):
deps = depsolver.DepSolver()
for name,definition in containers.items():
deps.add(name, definition.get('requires', []))
container_order = deps.solve()
hosts = set(containers)
for name in container_order:
self.create_container(name, containers[name], hosts)
def create_container(self, name, definition, hosts):
name = f'{self.app_name}-{name}'
image = definition['image']
volumes = {f'{self.app_name}-{volume}':mount
for volume,mount in definition.get('volumes', {}).items()}
requires = set(f'{self.app_name}-{require}' for require in definition.get('requires', []))
podman.create_container(self.app_name, name, image, env_file=self.env_file,
volumes=volumes, requires=requires, hosts=hosts)
def install(app_name, from_file=None):
App(app_name).install(from_file=from_file)
def update(app_name, from_file=None):
App(app_name).update(from_file=from_file)
def uninstall(app_name):
App(app_name).uninstall()

25
src/spoc/autostart.py Normal file
View File

@ -0,0 +1,25 @@
import os
from . import config
def get_apps():
try:
with open(config.AUTOSTART_FILE, encoding='utf-8') as f:
lines = f.read().splitlines()
return set(lines)
except FileNotFoundError:
return set()
def set_app(app_name, enabled):
apps = get_apps()
if enabled:
apps.add(app_name)
else:
try:
apps.remove(app_name)
except KeyError:
return
os.makedirs(config.DATA_DIR, exist_ok=True)
with open(config.AUTOSTART_FILE, 'w', encoding='utf-8') as f:
for app in apps:
f.write(f'{app}\n')

38
src/spoc/config.py Normal file
View File

@ -0,0 +1,38 @@
import json
from base64 import b64decode, b64encode
DATA_DIR = '/var/lib/spoc'
AUTOSTART_FILE = '/var/lib/spoc/autostart'
LOCK_FILE = '/run/lock/spoc.lock'
REGISTRY_HOST = None
REGISTRY_AUTH = None
REGISTRY_AUTH_FILE = '/var/lib/spoc/auth.json'
REPO_FILE_URL = None
def read_auth():
global REGISTRY_HOST, REGISTRY_AUTH, REPO_FILE_URL # pylint: disable=global-statement
try:
with open(REGISTRY_AUTH_FILE, encoding='utf-8') as f:
data = json.load(f)
REGISTRY_HOST = next(iter(data['auths'].keys()))
auth = b64decode(data['auths'][REGISTRY_HOST]['auth'].encode()).decode()
REGISTRY_AUTH = tuple(auth.split(':', 1))
except FileNotFoundError:
REGISTRY_HOST = 'localhost'
REGISTRY_AUTH = None
REPO_FILE_URL = f'https://{REGISTRY_HOST}/repository.json'
def write_auth(host, username, password):
global REGISTRY_HOST, REGISTRY_AUTH, REPO_FILE_URL # pylint: disable=global-statement
b64auth = b64encode(f'{username}:{password}'.encode()).decode()
data = json.dumps({'auths': {host: {'auth': b64auth}}})
with open(REGISTRY_AUTH_FILE, 'w', encoding='utf-8') as f:
f.write(data)
REGISTRY_HOST = host
REGISTRY_AUTH = (username, password)
REPO_FILE_URL = f'https://{REGISTRY_HOST}/repository.json'
read_auth()

57
src/spoc/depsolver.py Normal file
View File

@ -0,0 +1,57 @@
class CircularDependencyError(Exception):
# Dependecy solver has found a circular dependency between items
def __init__(self, deps):
super().__init__(deps)
self.deps = deps
def __str__(self):
result = ['Dependency resolution failed due to circular dependency.',
'Unresolved dependencies:']
result.extend(f' {item} => {item_deps}' for item, item_deps in self.deps.items())
return '\n'.join(result)
class MissingDependencyError(Exception):
# Dependecy solver has found an item that depends on a nonexistent item
def __init__(self, deps, missing):
super().__init__(deps, missing)
self.deps = deps
self.missing = missing
def __str__(self):
result = ['Dependency resolution failed due to missing dependency.',
'Missing dependencies:']
result.append(f' {self.missing}')
result.append('Unresolved dependencies:')
result.extend(f' {item} => {item_deps}' for item, item_deps in self.deps.items())
return '\n'.join(result)
class DepSolver:
def __init__(self):
self.unresolved = {}
def add(self, item, dependencies):
self.unresolved[item] = set(dependencies)
def solve(self):
# Returns a list of instances ordered by dependency
resolved = []
while self.unresolved:
# Get a batch of items not depending on anything
# or originally depending on already resolved items
batch = {item for item,deps in self.unresolved.items() if not deps}
if not batch:
# If there are no such items, check if a dependecy is missing
wanted_deps = set(dep for deps in self.unresolved.values() for dep in deps)
missing_deps = wanted_deps - set(self.unresolved)
if missing_deps:
raise MissingDependencyError(self.unresolved, missing_deps)
# If all dependencies exist, we have found a circular dependency
raise CircularDependencyError(self.unresolved)
# Add resolved items to the result and remove from the unresolved ones
for item in batch:
resolved.append(item)
del self.unresolved[item]
# Remove resolved items from the dependencies of yet unresolved items
for item in self.unresolved:
self.unresolved[item] -= batch
return resolved

49
src/spoc/flock.py Normal file
View File

@ -0,0 +1,49 @@
import errno
import fcntl
import os
import time
import sys
from contextlib import contextmanager
from . import config
def print_lock(pid):
with open(os.path.join('/proc', pid, 'cmdline'), 'rb') as f:
cmdline = f.read().decode().replace('\0', ' ').strip()
print(f'Waiting for lock currently held by process {pid} - {cmdline}', file=sys.stderr)
@contextmanager
def locked():
with open(config.LOCK_FILE, 'a', encoding='utf-8'):
# Open the lock file in append mode first to ensure its existence
# but not modify any data if it already exists
pass
# Open the lock file in read + write mode without truncation
with open(config.LOCK_FILE, 'r+', encoding='utf-8') as f:
lock_printed = False
while True:
try:
# Try to obtain exclusive lock in non-blocking mode
fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
break
except OSError as e:
# If lock is held by another process
if e.errno == errno.EAGAIN:
if not lock_printed:
# Print a message using contents of the lock file
# (PID of the process holding the lock)
print_lock(f.read())
# Set flag so the message is not printed in every loop
lock_printed = True
# Set the position for future truncation
f.seek(0)
# Wait for the lock to be freed
time.sleep(0.1)
else:
raise
# If the lock was obtained, truncate the file
# and write PID of the process holding the lock
f.truncate()
f.write(str(os.getpid()))
f.flush()
yield f

93
src/spoc/podman.py Normal file
View File

@ -0,0 +1,93 @@
import json
import os
import subprocess
from . import config
ENV = os.environ.copy()
ENV['REGISTRY_AUTH_FILE'] = config.REGISTRY_AUTH_FILE
def run(cmd, **kwargs):
return subprocess.run(cmd, check=True, env=ENV, **kwargs)
def out(cmd, **kwargs):
return run(cmd, stdout=subprocess.PIPE, text=True, **kwargs).stdout.rstrip()
def get_subuidgid():
def get_first_sub(kind):
lines = out(['su', '-', 'spoc', '-c', f'podman unshare cat /proc/self/{kind}_map'])
for line in lines.splitlines():
columns = line.split()
if columns[0] == '1':
return int(columns[1])
return 0
return (get_first_sub('uid'), get_first_sub('gid'))
def get_apps():
apps = {}
data = json.loads(out(['podman', 'pod', 'ps', '--format', 'json']))
for pod in data:
app_name = pod['Labels'].get('spoc.app')
app_version = pod['Labels'].get('spoc.version')
if app_name:
apps[app_name] = app_version
return apps
def get_volumes_for_app(app_name):
volume_ls = out(['podman', 'volume', 'ls', '--filter', f'label=spoc.app={app_name}',
'--format', 'json'])
return set(volume['Name'] for volume in json.loads(volume_ls))
def start_pod(app_name):
run(['podman', 'pod', 'start', app_name])
def stop_pod(app_name):
run(['podman', 'pod', 'stop', '--ignore', app_name])
def get_pod_status(app_name=None):
app_filter = 'label=spoc.app'
if app_name:
app_filter = f'{app_filter}={app_name}'
return out(['podman', 'pod', 'ps', '--filter', app_filter])
def create_volume(app_name, vol_name):
subuid, subgid = get_subuidgid()
run(['podman', 'volume', 'create',
'--opt', f'o=uid={subuid},gid={subgid}',
'--label', f'spoc.app={app_name}', vol_name])
def remove_volume(vol_name):
run(['podman', 'volume', 'rm', vol_name])
def create_pod(app_name, app_version):
run(['podman', 'pod', 'create', '--name', app_name,
'--subuidname', 'spoc', '--subgidname', 'spoc',
'--label', f'spoc.app={app_name}', '--label', f'spoc.version={app_version}'])
def remove_pod(app_name):
stop_pod(app_name)
run(['podman', 'pod', 'rm', '--ignore', app_name])
def create_container(app_name, cnt_name, image, **kwargs):
cmd = ['podman', 'container', 'create', '--name', cnt_name, '--pod', app_name,
'--subuidname', 'spoc', '--subgidname', 'spoc',
'--restart', 'unless-stopped']
env_file = kwargs.get('env_file')
if env_file:
cmd.extend(['--env-file', env_file])
requires = kwargs.get('requires')
if requires:
cmd.extend(['--requires', ','.join(sorted(requires))])
volumes = kwargs.get('volumes')
if volumes:
for volume,mount in sorted(volumes.items(), key=lambda x: x[1]):
cmd.extend(['--volume', f'{volume}:{mount}'])
hosts = kwargs.get('hosts')
if hosts:
for host in sorted(hosts):
cmd.extend(['--add-host', f'{host}:127.0.0.1'])
cmd.append(image)
run(cmd)
def prune():
run(['podman', 'image', 'prune', '--all', '--force', '--volumes'])

17
src/spoc/repo.py Normal file
View File

@ -0,0 +1,17 @@
import requests
from . import config
_DATA = {}
def load(force=False):
global _DATA # pylint: disable=global-statement
if not _DATA or force:
_DATA = {}
response = requests.get(config.REPO_FILE_URL, auth=config.REGISTRY_AUTH, timeout=5)
response.raise_for_status()
_DATA = response.json()
def get_apps():
load()
return _DATA

176
src/spoc_cli.py Normal file
View File

@ -0,0 +1,176 @@
import argparse
import getpass
import sys
import requests
import spoc
APP_ERROR_STRINGS = {
'AppAlreadyInstalledError': 'Application {} is already installed',
'AppNotInstalledError': 'Application {} is not installed',
'AppNotInRepoError': 'Application {} does not exist in the repository',
'AppNotUpdateableError': 'Application {} does not have a newer version to update',
}
def handle_app_error(exception):
ex_type = type(exception).__name__
print(APP_ERROR_STRINGS[ex_type].format(exception.app_name), file=sys.stderr)
def handle_repo_error(exception):
if isinstance(exception, requests.HTTPError):
status_code = exception.response.status_code
if status_code == 401:
reason = 'Invalid username/password'
else:
reason = f'{status_code} {exception.response.reason}'
else:
ex_type = type(exception)
reason = f'{ex_type.__module__}.{ex_type.__name__}'
print(f'Repository "{spoc.config.REGISTRY_HOST}" cannot be reached due to: {reason}',
file=sys.stderr)
def listing(list_type):
if list_type == 'installed':
apps = spoc.list_installed()
elif list_type == 'online':
apps = spoc.list_online()
elif list_type == 'updates':
apps = spoc.list_updates()
else:
apps = {}
for app_name, app_version in apps.items():
print(app_name, app_version)
def install(app_name, from_file):
spoc.install(app_name, from_file)
def update(app_name, from_file):
spoc.update(app_name, from_file)
def uninstall(app_name):
spoc.uninstall(app_name)
def start(app_name):
spoc.start(app_name)
def stop(app_name):
spoc.stop(app_name)
def status(app_name):
print(spoc.status(app_name))
def set_autostart(app_name, value):
enabled = value.lower() in ('1', 'on', 'enable', 'true')
spoc.set_autostart(app_name, enabled)
def start_autostarted():
spoc.start_autostarted()
def stop_all():
spoc.stop_all()
def login(host):
username = input('Username: ')
password = getpass.getpass()
spoc.login(host, username, password)
print('Login OK')
def prune():
spoc.prune()
def parse_args(args=None):
parser = argparse.ArgumentParser(description='SPOC application manager')
subparsers = parser.add_subparsers(dest='action', required=True)
parser_list = subparsers.add_parser('list')
parser_list.set_defaults(action=listing)
parser_list.add_argument('type', choices=('installed', 'online', 'updates'),
default='installed', const='installed', nargs='?',
help='Selected repository or application criteria')
parser_install = subparsers.add_parser('install')
parser_install.set_defaults(action=install)
parser_install.add_argument('app', help='Name of the application to install')
parser_install.add_argument('--from-file',
help='Filename containing the application definition ' \
'to be used instead of online repository')
parser_update = subparsers.add_parser('update')
parser_update.set_defaults(action=update)
parser_update.add_argument('app', help='Name of the application to update')
parser_update.add_argument('--from-file',
help='Filename containing the application definition ' \
'to be used instead of online repository')
parser_uninstall = subparsers.add_parser('uninstall')
parser_uninstall.set_defaults(action=uninstall)
parser_uninstall.add_argument('app', help='Name of the application to uninstall')
parser_start = subparsers.add_parser('start')
parser_start.set_defaults(action=start)
parser_start.add_argument('app', help='Name of the application to start')
parser_stop = subparsers.add_parser('stop')
parser_stop.set_defaults(action=stop)
parser_stop.add_argument('app', help='Name of the application to stop')
parser_status = subparsers.add_parser('status')
parser_status.set_defaults(action=status)
parser_status.add_argument('app', nargs='?', help='Name of the application to check')
parser_autostart = subparsers.add_parser('autostart')
parser_autostart.set_defaults(action=set_autostart)
parser_autostart.add_argument('app', help='Name of the application to be automatically started')
parser_autostart.add_argument('value', choices=('1', 'on', 'enable', 'true',
'0', 'off', 'disable', 'false'),
help='Set or unset the applications to be automatically ' \
'started after the host boots up')
parser_start_autostarted = subparsers.add_parser('start-autostarted')
parser_start_autostarted.set_defaults(action=start_autostarted)
parser_stop_all = subparsers.add_parser('stop-all')
parser_stop_all.set_defaults(action=stop_all)
parser_login = subparsers.add_parser('login')
parser_login.set_defaults(action=login)
parser_login.add_argument('host', help='Hostname of the container registry')
parser_prune = subparsers.add_parser('prune')
parser_prune.set_defaults(action=prune)
return parser.parse_args(args)
def main(): # pylint: disable=too-many-branches
args = parse_args()
try:
if args.action is listing:
listing(args.type)
elif args.action is install:
install(args.app, args.from_file)
elif args.action is update:
update(args.app, args.from_file)
elif args.action is uninstall:
uninstall(args.app)
elif args.action is start:
start(args.app)
elif args.action is stop:
stop(args.app)
elif args.action is status:
status(args.app)
elif args.action is set_autostart:
set_autostart(args.app, args.value)
elif args.action is start_autostarted:
start_autostarted()
elif args.action is stop_all:
stop_all()
elif args.action is login:
login(args.host)
elif args.action is prune:
prune()
except spoc.AppError as ex:
handle_app_error(ex)
except requests.RequestException as ex:
handle_repo_error(ex)

266
tests/test_app.py Normal file
View File

@ -0,0 +1,266 @@
import json
import os
from unittest.mock import patch, call, mock_open
from spoc import app
from spoc import config
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'test_data')
with open(os.path.join(TEST_DATA_DIR, 'repository.json'), encoding='utf-8') as f:
MOCK_REPODATA = json.load(f)
MOCK_ENV = 'RAILS_ENV=test\n' \
'POSTGRES_PASSWORD=asdf=1234\n' \
'SOMEKEY=someval\n'
MOCK_ENV_DATA = {
'RAILS_ENV': 'test',
'POSTGRES_PASSWORD': 'asdf=1234',
'SOMEKEY': 'someval',
}
def test_init():
instance = app.App('someapp')
assert instance.app_name == 'someapp'
assert instance.env_file == os.path.join(config.DATA_DIR, 'someapp.env')
@patch('spoc.repo.get_apps', return_value=MOCK_REPODATA)
def test_get_definition(repo_get_apps):
instance = app.App('someapp')
definition = instance.get_definition()
assert definition == MOCK_REPODATA['someapp']
repo_get_apps.assert_called_once()
@patch('spoc.repo.get_apps')
@patch('builtins.open', new_callable=mock_open, read_data=json.dumps(MOCK_REPODATA['someapp']))
def test_get_definition_from_file(file_open, repo_get_apps):
instance = app.App('someapp')
definition = instance.get_definition('somefile')
assert definition == MOCK_REPODATA['someapp']
file_open.assert_called_once_with('somefile', encoding='utf-8')
repo_get_apps.assert_not_called()
@patch('spoc.app.App.get_definition', return_value=MOCK_REPODATA['someapp'])
@patch('spoc.app.App.get_existing_volumes', return_value=set('somevol'))
@patch('spoc.app.App.remove_volumes')
@patch('spoc.app.App.create_volumes')
@patch('spoc.app.App.read_env_vars')
@patch('spoc.app.App.write_env_vars')
@patch('spoc.app.App.create_pod')
@patch('spoc.app.App.create_containers')
def test_install(create_containers, create_pod, write_env_vars, #pylint: disable=too-many-arguments
read_env_vars, create_volumes, remove_volumes,
get_existing_volumes, get_definition):
instance = app.App('someapp')
instance.install()
get_definition.assert_called_once()
get_existing_volumes.assert_called_once()
remove_volumes.assert_called_once_with(set('somevol'))
create_volumes.assert_called_once_with(set(('migrate', 'storage', 'uploads', 'postgres-data')))
read_env_vars.assert_not_called()
write_env_vars.assert_called_once_with(MOCK_REPODATA['someapp']['environment'])
create_pod.assert_called_once_with('0.23.5-210416')
create_containers.assert_called_once_with(MOCK_REPODATA['someapp']['containers'])
@patch('spoc.app.App.get_definition', return_value=MOCK_REPODATA['someapp'])
@patch('spoc.app.App.get_existing_volumes', return_value=set(('somevol', 'migrate', 'storage')))
@patch('spoc.app.App.remove_volumes')
@patch('spoc.app.App.create_volumes')
@patch('spoc.app.App.read_env_vars', return_value=MOCK_ENV_DATA)
@patch('spoc.app.App.write_env_vars')
@patch('spoc.app.App.create_pod')
@patch('spoc.app.App.create_containers')
def test_update(create_containers, create_pod, write_env_vars, #pylint: disable=too-many-arguments
read_env_vars, create_volumes, remove_volumes,
get_existing_volumes, get_definition):
instance = app.App('someapp')
instance.update(from_file='somefile')
get_definition.assert_called_once()
get_existing_volumes.assert_called_once()
remove_volumes.assert_called_once_with(set(('somevol',)))
create_volumes.assert_called_once_with(set(('uploads', 'postgres-data')))
read_env_vars.assert_called_once()
expected_env_data = MOCK_REPODATA['someapp']['environment'].copy()
expected_env_data.update(MOCK_ENV_DATA)
del expected_env_data['SOMEKEY']
write_env_vars.assert_called_once_with(expected_env_data)
create_pod.assert_called_once_with('0.23.5-210416')
create_containers.assert_called_once_with(MOCK_REPODATA['someapp']['containers'])
@patch('spoc.autostart.set_app')
@patch('spoc.app.App.remove_pod')
@patch('spoc.app.App.remove_env_vars')
@patch('spoc.app.App.get_existing_volumes', return_value=set(('somevol', 'anothervol')))
@patch('spoc.app.App.remove_volumes')
def test_uninstall(remove_volumes, get_existing_volumes, remove_env_vars, remove_pod, autostart):
instance = app.App('someapp')
instance.uninstall()
autostart.assert_called_with('someapp', False)
remove_pod.assert_called_once()
remove_env_vars.assert_called_once()
get_existing_volumes.assert_called_once()
remove_volumes.assert_called_once_with(set(('somevol', 'anothervol')))
@patch('spoc.podman.remove_pod')
@patch('spoc.podman.create_pod')
def test_create_pod(create_pod, remove_pod):
instance = app.App('someapp')
instance.create_pod('0.1')
remove_pod.assert_called_once_with('someapp')
create_pod.assert_called_once_with('someapp', '0.1')
@patch('spoc.podman.remove_pod')
def test_remove_pod(remove_pod):
instance = app.App('someapp')
instance.remove_pod()
remove_pod.assert_called_once_with('someapp')
@patch('builtins.open', new_callable=mock_open, read_data=MOCK_ENV)
def test_read_env_vars(env_open):
instance = app.App('someapp')
env_vars = instance.read_env_vars()
env_file = os.path.join(config.DATA_DIR, 'someapp.env')
env_open.assert_called_once_with(env_file, encoding='utf-8')
assert env_vars == MOCK_ENV_DATA
@patch('builtins.open', side_effect=FileNotFoundError('someapp.env'))
def test_read_env_vars_filenotfound(env_open):
instance = app.App('someapp')
env_vars = instance.read_env_vars()
env_file = os.path.join(config.DATA_DIR, 'someapp.env')
env_open.assert_called_once_with(env_file, encoding='utf-8')
assert not env_vars
@patch('os.makedirs')
@patch('builtins.open', new_callable=mock_open)
def test_write_env_vars(env_open, makedirs):
instance = app.App('someapp')
instance.write_env_vars(MOCK_ENV_DATA)
makedirs.assert_called_once_with(config.DATA_DIR, exist_ok=True)
env_file = os.path.join(config.DATA_DIR, 'someapp.env')
env_open.assert_called_once_with(env_file, 'w', encoding='utf-8')
expected_writes = [call(line) for line in MOCK_ENV.splitlines(True)]
env_open().write.assert_has_calls(expected_writes, any_order=True)
@patch('os.unlink')
def test_remove_env_vars(unlink):
instance = app.App('someapp')
instance.remove_env_vars()
env_file = os.path.join(config.DATA_DIR, 'someapp.env')
unlink.assert_called_once_with(env_file)
@patch('os.unlink', side_effect=FileNotFoundError('someapp.env'))
def test_remove_env_vars_filenotfound(unlink):
instance = app.App('someapp')
instance.remove_env_vars()
env_file = os.path.join(config.DATA_DIR, 'someapp.env')
unlink.assert_called_once_with(env_file)
@patch('spoc.podman.get_volumes_for_app', return_value={'someapp-vol1', 'someapp-vol2'})
def test_get_existing_volumes(get_volume_names):
instance = app.App('someapp')
volumes = instance.get_existing_volumes()
get_volume_names.assert_called_once_with('someapp')
assert volumes == {'vol1', 'vol2'}
@patch('spoc.app.App.create_volume')
def test_create_volumes(create_volume):
instance = app.App('someapp')
instance.create_volumes({'vol1', 'vol2'})
create_volume.assert_has_calls([
call('vol1'),
call('vol2'),
], any_order=True)
@patch('spoc.app.App.remove_volume')
def test_remove_volumes(remove_volume):
instance = app.App('someapp')
instance.remove_volumes({'vol1', 'vol2'})
remove_volume.assert_has_calls([
call('vol1'),
call('vol2'),
], any_order=True)
@patch('spoc.podman.create_volume')
def test_create_volume(create_volume):
instance = app.App('someapp')
instance.create_volume('vol1')
create_volume.assert_called_once_with('someapp', 'someapp-vol1')
@patch('spoc.podman.remove_volume')
def test_remove_volume(remove_volume):
instance = app.App('someapp')
instance.remove_volume('vol1')
remove_volume.assert_called_once_with('someapp-vol1')
@patch('spoc.app.App.create_container')
def test_create_containers(create_container):
instance = app.App('someapp')
definitions = MOCK_REPODATA['someapp']['containers']
instance.create_containers(definitions)
# Ordered by dependency
create_container.assert_has_calls([
call('postgres', definitions['postgres'], {'someapp', 'postgres'}),
call('someapp', definitions['someapp'], {'someapp', 'postgres'}),
])
@patch('spoc.podman.create_container')
def test_create_container(create_container):
instance = app.App('someapp')
definition = MOCK_REPODATA['someapp']['containers']['someapp']
instance.create_container('someapp', definition, {'someapp', 'postgres'})
env_file = os.path.join(config.DATA_DIR, 'someapp.env')
volumes = {'someapp-migrate': '/srv/app/db/migrate',
'someapp-storage': '/srv/app/storage',
'someapp-uploads': '/srv/app/public/uploads'}
create_container.assert_called_once_with('someapp', 'someapp-someapp',
'example.com/someapp:0.23.6-210515',
env_file=env_file,
volumes=volumes,
requires={'someapp-postgres'},
hosts={'someapp', 'postgres'})
@patch('spoc.app.App')
def test_module_install(instance):
app.install('someapp')
instance.assert_called_once_with('someapp')
instance.return_value.install.assert_called_once_with(from_file=None)
@patch('spoc.app.App')
def test_module_update(instance):
app.update('someapp', from_file='somefile')
instance.assert_called_once_with('someapp')
instance.return_value.update.assert_called_once_with(from_file='somefile')
@patch('spoc.app.App')
def test_module_uninstall(instance):
app.uninstall('someapp')
instance.assert_called_once_with('someapp')
instance.return_value.uninstall.assert_called_once()

56
tests/test_autostart.py Normal file
View File

@ -0,0 +1,56 @@
from unittest.mock import patch, call, mock_open
from spoc import autostart
from spoc import config
@patch('builtins.open', new_callable=mock_open, read_data='someapp\nanotherapp\n')
def test_get_apps(file_open):
apps = autostart.get_apps()
file_open.assert_called_once_with(config.AUTOSTART_FILE, encoding='utf-8')
assert apps == {'someapp', 'anotherapp'}
@patch('builtins.open', side_effect=FileNotFoundError('someapp.env'))
def test_get_apps_filenotfounderror(file_open):
apps = autostart.get_apps()
file_open.assert_called_once_with(config.AUTOSTART_FILE, encoding='utf-8')
assert apps == set()
@patch('os.makedirs')
@patch('spoc.autostart.get_apps', return_value={'someapp'})
@patch('builtins.open', new_callable=mock_open)
def test_set_app_enable(file_open, get_apps, makedirs):
autostart.set_app('anotherapp', True)
get_apps.assert_called_once()
makedirs.assert_called_once_with(config.DATA_DIR, exist_ok=True)
file_open.assert_called_once_with(config.AUTOSTART_FILE, 'w', encoding='utf-8')
file_open().write.assert_has_calls([
call('someapp\n'),
call('anotherapp\n'),
], any_order=True)
@patch('os.makedirs')
@patch('spoc.autostart.get_apps', return_value={'someapp', 'anotherapp'})
@patch('builtins.open', new_callable=mock_open)
def test_set_app_disable(file_open, get_apps, makedirs):
autostart.set_app('anotherapp', False)
get_apps.assert_called_once()
makedirs.assert_called_once_with(config.DATA_DIR, exist_ok=True)
file_open.assert_called_once_with(config.AUTOSTART_FILE, 'w', encoding='utf-8')
file_open().write.assert_has_calls([
call('someapp\n'),
])
@patch('os.makedirs')
@patch('spoc.autostart.get_apps', return_value={'someapp'})
@patch('builtins.open', new_callable=mock_open)
def test_set_app_nonexistent(file_open, get_apps, makedirs):
autostart.set_app('anotherapp', False)
get_apps.assert_called_once()
makedirs.assert_not_called()
file_open.assert_not_called()
file_open().write.assert_not_called()

359
tests/test_cli.py Normal file
View File

@ -0,0 +1,359 @@
from argparse import Namespace
from unittest.mock import patch
import pytest
import requests
import spoc
import spoc_cli
class MockResponse: # pylint: disable=too-few-public-methods
def __init__(self, status_code, reason):
self.status_code = status_code
self.reason = reason
@pytest.mark.parametrize('exception,expected',[
(spoc.AppAlreadyInstalledError('someapp'),
'Application someapp is already installed\n'),
(spoc.AppNotInstalledError('someapp'),
'Application someapp is not installed\n'),
(spoc.AppNotInRepoError('someapp'),
'Application someapp does not exist in the repository\n'),
(spoc.AppNotUpdateableError('someapp'),
'Application someapp does not have a newer version to update\n'),
])
def test_handle_repo_error(exception, expected, capsys):
spoc_cli.handle_app_error(exception)
captured = capsys.readouterr()
assert captured.err == expected
@pytest.mark.parametrize('exception,expected',[
(requests.HTTPError(response=MockResponse(401, 'Unauthorized')),
'Invalid username/password'),
(requests.HTTPError(response=MockResponse(404, 'Not Found')),
'404 Not Found'),
(requests.ConnectTimeout(),
'requests.exceptions.ConnectTimeout'),
])
def test_handle_app_error(exception, expected, capsys):
spoc_cli.handle_repo_error(exception)
captured = capsys.readouterr()
expected = f'Repository "{spoc.config.REGISTRY_HOST}" cannot be reached due to: {expected}\n'
assert captured.err == expected
@patch('spoc.list_installed', return_value={'anotherapp': '0.1', 'someapp': '0.1'})
@patch('spoc.list_online')
@patch('spoc.list_updates')
def test_listing_installed(list_updates, list_online, list_installed, capsys):
spoc_cli.listing('installed')
list_installed.assert_called_once()
list_online.assert_not_called()
list_updates.assert_not_called()
captured = capsys.readouterr()
assert captured.out == 'anotherapp 0.1\nsomeapp 0.1\n'
@patch('spoc.list_installed')
@patch('spoc.list_online', return_value={'anotherapp': '0.2', 'someapp': '0.2'})
@patch('spoc.list_updates')
def test_listing_online(list_updates, list_online, list_installed, capsys):
spoc_cli.listing('online')
list_installed.assert_not_called()
list_online.assert_called_once()
list_updates.assert_not_called()
captured = capsys.readouterr()
assert captured.out == 'anotherapp 0.2\nsomeapp 0.2\n'
@patch('spoc.list_installed')
@patch('spoc.list_online')
@patch('spoc.list_updates', return_value={'anotherapp': '0.1 -> 0.2', 'someapp': '0.1 -> 0.2'})
def test_listing_updates(list_updates, list_online, list_installed, capsys):
spoc_cli.listing('updates')
list_installed.assert_not_called()
list_online.assert_not_called()
list_updates.assert_called_once()
captured = capsys.readouterr()
assert captured.out == 'anotherapp 0.1 -> 0.2\nsomeapp 0.1 -> 0.2\n'
@patch('spoc.list_installed')
@patch('spoc.list_online')
@patch('spoc.list_updates')
def test_listing_invalid(list_updates, list_online, list_installed, capsys):
spoc_cli.listing('invalid')
list_installed.assert_not_called()
list_online.assert_not_called()
list_updates.assert_not_called()
captured = capsys.readouterr()
assert not captured.out
@patch('spoc.install')
def test_install(install):
spoc_cli.install('someapp', 'somefile')
install.assert_called_once_with('someapp', 'somefile')
@patch('spoc.update')
def test_update(update):
spoc_cli.update('someapp', None)
update.assert_called_once_with('someapp', None)
@patch('spoc.uninstall')
def test_uninstall(uninstall):
spoc_cli.uninstall('someapp')
uninstall.assert_called_once_with('someapp')
@patch('spoc.start')
def test_start(start):
spoc_cli.start('someapp')
start.assert_called_once_with('someapp')
@patch('spoc.stop')
def test_stop(stop):
spoc_cli.stop('someapp')
stop.assert_called_once_with('someapp')
@patch('spoc.status', return_value='STATUS')
def test_status(status, capsys):
spoc_cli.status('someapp')
status.assert_called_once_with('someapp')
captured = capsys.readouterr()
assert captured.out == 'STATUS\n'
@pytest.mark.parametrize('value,expected',[
('1', True),
('on', True),
('Enable', True),
('TRUE', True),
('0', False),
('off', False),
('Disable', False),
('FALSE', False),
('whatever', False),
])
@patch('spoc.set_autostart')
def test_set_autostart(set_autostart, value, expected):
spoc_cli.set_autostart('someapp', value)
set_autostart.assert_called_once_with('someapp', expected)
@patch('spoc.start_autostarted')
def test_start_autostarted(start_autostarted):
spoc_cli.start_autostarted()
start_autostarted.assert_called_once()
@patch('spoc.stop_all')
def test_stop_all(stop_all):
spoc_cli.stop_all()
stop_all.assert_called_once()
@patch('builtins.input', return_value='someuser')
@patch('getpass.getpass', return_value='somepass')
@patch('spoc.login')
def test_login(login, getpass, nput, capsys):
spoc_cli.login('somehost')
nput.assert_called_once_with('Username: ')
getpass.assert_called_once()
login.assert_called_once_with('somehost', 'someuser', 'somepass')
captured = capsys.readouterr()
assert captured.out == 'Login OK\n'
@patch('builtins.input', return_value='someuser')
@patch('getpass.getpass', return_value='somepass')
@patch('spoc.login', side_effect=requests.ConnectTimeout())
def test_login_bad(login, getpass, nput, capsys):
with pytest.raises(requests.ConnectTimeout):
spoc_cli.login('somehost')
nput.assert_called_once_with('Username: ')
getpass.assert_called_once()
login.assert_called_once_with('somehost', 'someuser', 'somepass')
captured = capsys.readouterr()
assert captured.out == ''
@patch('spoc.prune')
def test_prune(prune):
spoc_cli.prune()
prune.assert_called_once()
@patch('sys.argv', ['foo', 'list'])
@patch('spoc_cli.listing')
def test_main_listing(listing):
spoc_cli.main()
listing.assert_called_once_with('installed')
@patch('sys.argv', ['foo', 'list', 'online'])
@patch('spoc_cli.listing')
def test_main_listing_online(listing):
spoc_cli.main()
listing.assert_called_once_with('online')
@patch('sys.argv', ['foo', 'install', 'someapp'])
@patch('spoc_cli.install')
def test_main_install(install):
spoc_cli.main()
install.assert_called_once_with('someapp', None)
@patch('sys.argv', ['foo', 'update', '--from-file', 'somefile', 'someapp'])
@patch('spoc_cli.update')
def test_main_update(update):
spoc_cli.main()
update.assert_called_once_with('someapp', 'somefile')
@patch('sys.argv', ['foo', 'uninstall', 'someapp'])
@patch('spoc_cli.uninstall')
def test_main_uninstall(uninstall):
spoc_cli.main()
uninstall.assert_called_once_with('someapp')
@patch('sys.argv', ['foo', 'start', 'someapp'])
@patch('spoc_cli.start')
def test_main_start(start):
spoc_cli.main()
start.assert_called_once_with('someapp')
@patch('sys.argv', ['foo', 'stop', 'someapp'])
@patch('spoc_cli.stop')
def test_main_stop(stop):
spoc_cli.main()
stop.assert_called_once_with('someapp')
@patch('sys.argv', ['foo', 'status', 'someapp'])
@patch('spoc_cli.status')
def test_main_status(status):
spoc_cli.main()
status.assert_called_once_with('someapp')
@patch('sys.argv', ['foo', 'status'])
@patch('spoc_cli.status')
def test_main_status_all(status):
spoc_cli.main()
status.assert_called_once_with(None)
@patch('sys.argv', ['foo', 'autostart', 'someapp', 'on'])
@patch('spoc_cli.set_autostart')
def test_main_autostart(autostart):
spoc_cli.main()
autostart.assert_called_once_with('someapp', 'on')
@patch('sys.argv', ['foo', 'start-autostarted'])
@patch('spoc_cli.start_autostarted')
def test_main_start_autostarted(start_autostarted):
spoc_cli.main()
start_autostarted.assert_called_once()
@patch('sys.argv', ['foo', 'stop-all'])
@patch('spoc_cli.stop_all')
def test_main_stop_all(stop_all):
spoc_cli.main()
stop_all.assert_called_once()
@patch('sys.argv', ['foo', 'login', 'example.com'])
@patch('spoc_cli.login')
def test_main_login(login):
spoc_cli.main()
login.assert_called_once_with('example.com')
@patch('sys.argv', ['foo', 'prune'])
@patch('spoc_cli.prune')
def test_main_prune(prune):
spoc_cli.main()
prune.assert_called_once()
@patch('spoc_cli.parse_args', return_value=Namespace(action=None))
@patch('spoc_cli.listing')
@patch('spoc_cli.install')
@patch('spoc_cli.update')
@patch('spoc_cli.uninstall')
@patch('spoc_cli.start')
@patch('spoc_cli.stop')
@patch('spoc_cli.status')
@patch('spoc_cli.start_autostarted')
@patch('spoc_cli.stop_all')
@patch('spoc_cli.login')
@patch('spoc_cli.prune')
def test_main_invalid(prune, login, stop_all, start_autostarted, # pylint: disable=too-many-arguments
status, stop, start, uninstall, update, install, listing, parse_args):
spoc_cli.main()
parse_args.assert_called_once()
listing.assert_not_called()
install.assert_not_called()
update.assert_not_called()
uninstall.assert_not_called()
start.assert_not_called()
stop.assert_not_called()
status.assert_not_called()
start_autostarted.assert_not_called()
stop_all.assert_not_called()
login.assert_not_called()
prune.assert_not_called()
@pytest.mark.parametrize('argv', [
['list', 'invalid'],
['install'],
['update'],
['uninstall'],
['start'],
['stop'],
['autostart'],
['autostart', 'someapp'],
['login'],
['invalid'],
[],
])
def test_main_systemexit(argv):
argv.insert(0, 'foo')
with patch('sys.argv', argv):
with pytest.raises(SystemExit):
spoc_cli.main()
@patch('sys.argv', ['foo', 'start', 'someapp'])
@patch('spoc_cli.start', side_effect=spoc.AppNotInstalledError('someapp'))
@patch('spoc_cli.handle_app_error')
def test_main_apperror(handle_app_error, start):
spoc_cli.main()
start.assert_called_once()
handle_app_error.assert_called_once()
@patch('sys.argv', ['foo', 'login', 'somehost'])
@patch('spoc_cli.login', side_effect=requests.HTTPError(response=MockResponse(401, 'Unauthorized')))
@patch('spoc_cli.handle_repo_error')
def test_main_repoerror(handle_repo_error, login):
spoc_cli.main()
login.assert_called_once()
handle_repo_error.assert_called_once()

34
tests/test_config.py Normal file
View File

@ -0,0 +1,34 @@
from unittest.mock import mock_open, patch
from spoc import config
@patch('builtins.open', new_callable=mock_open,
read_data='{"auths": {"example.com": {"auth": "c29tZXVzZXI6c29tZXBhc3N3b3Jk"}}}')
def test_read_auth(auth_open):
config.read_auth()
auth_open.assert_called_once_with(config.REGISTRY_AUTH_FILE, encoding='utf-8')
assert config.REGISTRY_HOST == 'example.com'
assert config.REGISTRY_AUTH == ('someuser', 'somepassword')
assert config.REPO_FILE_URL == 'https://example.com/repository.json'
@patch('builtins.open', side_effect=FileNotFoundError('auth.json'))
def test_read_auth_fallback(auth_open):
config.read_auth()
auth_open.assert_called_once_with(config.REGISTRY_AUTH_FILE, encoding='utf-8')
assert config.REGISTRY_HOST == 'localhost'
assert config.REGISTRY_AUTH is None
assert config.REPO_FILE_URL == 'https://localhost/repository.json'
@patch('builtins.open', new_callable=mock_open)
def test_write_auth(auth_open):
config.write_auth('example.org', 'user', 'anotherpwd')
auth_open.assert_called_once_with(config.REGISTRY_AUTH_FILE, 'w', encoding='utf-8')
expected_data = '{"auths": {"example.org": {"auth": "dXNlcjphbm90aGVycHdk"}}}'
auth_open().write.assert_called_once_with(expected_data)
assert config.REGISTRY_HOST == 'example.org'
assert config.REGISTRY_AUTH == ('user', 'anotherpwd')
assert config.REPO_FILE_URL == 'https://example.org/repository.json'

View File

@ -0,0 +1,113 @@
[
{
"Cgroup": "/libpod_parent",
"Containers": [
{
"Id": "59cacababc9ea7f0a7f4ad28c67227fdd6acc57a06b0b289390647e45152857b",
"Names": "yetanotherapp-cnt1",
"Status": "running"
},
{
"Id": "720dabf6edc271c52ea22535398966db094ab5eff1de894e6beb7c68e4657847",
"Names": "4faa6b9ad5aa-infra",
"Status": "running"
},
{
"Id": "7af90eef4b48f20dabdaaec90c6c7583fea6800d2433ef7879b805d51b81bfc4",
"Names": "yetanotherapp-cnt2",
"Status": "running"
}
],
"Created": "2021-07-06T09:19:24.609538926+02:00",
"Id": "4faa6b9ad5aa28b915a8ac967a01d9c3317be3a3bfc198b0681636399c19372e",
"InfraId": "720dabf6edc271c52ea22535398966db094ab5eff1de894e6beb7c68e4657847",
"Name": "yetanotherapp",
"Namespace": "",
"Networks": [
"podman"
],
"Status": "Running",
"Labels": {
"spoc.app": "yetanotherapp",
"spoc.version": "0.3"
}
},
{
"Cgroup": "/libpod_parent",
"Containers": [
{
"Id": "798cae491ef9025db809c261fb1169f5cc09526119d252340b9d64f0fce37be1",
"Names": "97f0c135887c-infra",
"Status": "running"
},
{
"Id": "9d02724a74d929818d08395b376d960b3dd30556738bc43e96f50a27f355b9a5",
"Names": "anotherapp-cnt2",
"Status": "configured"
},
{
"Id": "b5833a8da89d40824fdb4f2b779d24135d07452f5bfa583f96e369c5953ee286",
"Names": "anotherapp-cnt1",
"Status": "stopped"
}
],
"Created": "2021-07-06T08:47:06.389299933+02:00",
"Id": "97f0c135887c8ef6eccf4a37fbcc1e26a0f3c02e73de8edaa959bfba9592b1dd",
"InfraId": "798cae491ef9025db809c261fb1169f5cc09526119d252340b9d64f0fce37be1",
"Name": "anotherapp",
"Namespace": "",
"Networks": [
"podman"
],
"Status": "Degraded",
"Labels": {
"spoc.app": "anotherapp",
"spoc.version": "0.2"
}
},
{
"Cgroup": "/libpod_parent",
"Containers": [
{
"Id": "151e1e35083391eea41605db364b7e15fde7047a6119feffcd06984671a5c991",
"Names": "be0a8d0ab749-infra",
"Status": "running"
}
],
"Created": "2021-07-03T20:01:37.63866841+02:00",
"Id": "be0a8d0ab749b3c089f72a844700b76aafa541fffca5186865bef185fc1914a0",
"InfraId": "151e1e35083391eea41605db364b7e15fde7047a6119feffcd06984671a5c991",
"Name": "notmyapp",
"Namespace": "",
"Networks": [
"podman"
],
"Status": "Running",
"Labels": {
}
},
{
"Cgroup": "/libpod_parent",
"Containers": [
{
"Id": "0897891f6e7308903c4316ce80f569320176a38d5bc4de1fbf4b2323c1a51fcb",
"Names": "18c00febc93c-infra",
"Status": "configured"
}
],
"Created": "2021-07-03T13:29:36.975071665+02:00",
"Id": "18c00febc93ca105b5d83247e7b4a0b2184c82262d421f2c857dbf155dbe97e8",
"InfraId": "0897891f6e7308903c4316ce80f569320176a38d5bc4de1fbf4b2323c1a51fcb",
"Name": "someapp",
"Namespace": "",
"Networks": [
"podman"
],
"Status": "Created",
"Labels": {
"spoc.app": "someapp",
"spoc.version": "0.1"
}
}
]

View File

@ -0,0 +1,28 @@
[
{
"Name": "someapp-conf",
"Driver": "local",
"Mountpoint": "/var/lib/containers/storage/volumes/someapp-conf/_data",
"CreatedAt": "2021-07-04T18:22:44.758466689+02:00",
"Labels": {
"spoc.app": "someapp"
},
"Scope": "local",
"Options": {
}
},
{
"Name": "someapp-data",
"Driver": "local",
"Mountpoint": "/var/lib/containers/storage/volumes/someapp-data/_data",
"CreatedAt": "2021-07-03T13:22:11.455581712+02:00",
"Labels": {
"spoc.app": "someapp"
},
"Scope": "local",
"Options": {
}
}
]

View File

@ -0,0 +1,69 @@
{
"someapp": {
"version": "0.23.5-210416",
"meta": {
"title": "Some Application",
"desc-cs": "Platforma pro účast občanů",
"desc-en": "Platform for citizen participation",
"license": "GPL"
},
"environment": {
"RAILS_ENV": "production",
"RAILS_LOG_TO_STDOUT": "1",
"POSTGRES_USER": "someapp",
"POSTGRES_PASSWORD": "someapp",
"POSTGRES_DB": "someapp",
"POSTGRES_HOST": "postgres",
"DATABASE_URL": "postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}/${POSTGRES_DB}"
},
"containers": {
"someapp": {
"image": "example.com/someapp:0.23.6-210515",
"requires": [
"postgres"
],
"volumes": {
"migrate": "/srv/app/db/migrate",
"storage": "/srv/app/storage",
"uploads": "/srv/app/public/uploads"
}
},
"postgres": {
"image": "docker.io/postgres:12-alpine",
"volumes": {
"postgres-data": "/var/lib/postgresql/data"
}
}
}
},
"anotherapp": {
"version": "1.0.3-210106",
"meta": {
"title": "Another Application",
"desc-cs": "Řízení humanítární činnosti",
"desc-en": "Management of humanitarian activities",
"license": "GPL"
},
"containers": {
"anotherapp": {
"image": "example.com/anotherapp:1.0.3-210106",
"requires": [
"postgres"
],
"volumes": {
"conf": "/srv/web2py/applications/app/models",
"data-databases": "/srv/web2py/applications/app/databases",
"data-errors": "/srv/web2py/applications/app/errors",
"data-sessions": "/srv/web2py/applications/app/sessions",
"data-uploads": "/srv/web2py/applications/app/uploads"
}
},
"postgres": {
"image": "docker.io/postgres:12-alpine",
"volumes": {
"postgres-data": "/var/lib/postgresql/data"
}
}
}
}
}

92
tests/test_depsolver.py Normal file
View File

@ -0,0 +1,92 @@
import pytest
from spoc import depsolver
def test_circulardependencyerror():
ex = depsolver.CircularDependencyError({'dep1': {'dep2'}, 'dep2': {'dep1'}})
ex_str = str(ex)
assert ex.deps == {'dep1': {'dep2'}, 'dep2': {'dep1'}}
assert ex_str == 'Dependency resolution failed due to circular dependency.\n' \
'Unresolved dependencies:\n' \
' dep1 => {\'dep2\'}\n' \
' dep2 => {\'dep1\'}'
def test_missingdependencyerror():
ex = depsolver.MissingDependencyError({'dep1': {'dep2'}}, {'dep2'})
ex_str = str(ex)
assert ex.deps == {'dep1': {'dep2'}}
assert ex.missing == {'dep2'}
assert ex_str == 'Dependency resolution failed due to missing dependency.\n' \
'Missing dependencies:\n' \
' {\'dep2\'}\n' \
'Unresolved dependencies:\n' \
' dep1 => {\'dep2\'}'
def test_depsolver():
solver = depsolver.DepSolver()
assert not solver.unresolved
solver.add('dep1', ['dep2', 'dep3'])
solver.add('dep2', ['dep3', 'dep3'])
solver.add('dep3', [])
assert solver.unresolved == {
'dep1': {'dep2', 'dep3'},
'dep2': {'dep3'},
'dep3': set(),
}
resolved = solver.solve()
assert resolved == ['dep3', 'dep2', 'dep1']
def test_depsolver_complex():
solver = depsolver.DepSolver()
solver.add('dep1', ['dep8', 'dep12'])
solver.add('dep2', ['dep10'])
solver.add('dep3', [])
solver.add('dep4', ['dep9'])
solver.add('dep5', ['dep1', 'dep6', 'dep8'])
solver.add('dep6', ['dep2','dep10', 'dep13', 'dep14'])
solver.add('dep7', ['dep9'])
solver.add('dep8', ['dep2', 'dep12'])
solver.add('dep9', [])
solver.add('dep10', ['dep9'])
solver.add('dep11', ['dep2', 'dep14'])
solver.add('dep12', ['dep7'])
solver.add('dep13', ['dep9'])
solver.add('dep14', ['dep4'])
resolved = solver.solve()
# Order within the same batch (i.e. items not depending on each other) can be random
assert list(sorted(resolved[:2])) == ['dep3', 'dep9']
assert list(sorted(resolved[2:9])) == ['dep10', 'dep12', 'dep13', 'dep14',
'dep2', 'dep4', 'dep7']
assert list(sorted(resolved[9:12])) == ['dep11', 'dep6', 'dep8']
assert list(sorted(resolved[12:])) == ['dep1', 'dep5']
def test_depsolver_circular():
solver = depsolver.DepSolver()
solver.add('dep1', ['dep2', 'dep3'])
solver.add('dep2', ['dep3'])
solver.add('dep3', ['dep4'])
solver.add('dep4', ['dep1'])
with pytest.raises(depsolver.CircularDependencyError):
solver.solve()
def test_depsolver_missing():
solver = depsolver.DepSolver()
solver.add('dep1', ['dep2', 'dep3'])
solver.add('dep2', ['dep3'])
solver.add('dep4', ['dep1'])
with pytest.raises(depsolver.MissingDependencyError):
solver.solve()

110
tests/test_flock.py Normal file
View File

@ -0,0 +1,110 @@
import errno
import fcntl
from unittest.mock import call, patch, mock_open
import pytest
from spoc import config
from spoc import flock
@flock.locked()
def mock_func():
pass
@patch('builtins.open', new_callable=mock_open, read_data='foo\0arg1\0arg2\n'.encode())
def test_print_lock(cmdline_open, capsys):
flock.print_lock('123')
cmdline_open.assert_called_once_with('/proc/123/cmdline', 'rb')
captured = capsys.readouterr()
assert captured.err == 'Waiting for lock currently held by process 123 - foo arg1 arg2\n'
@patch('spoc.flock.print_lock')
@patch('fcntl.flock')
@patch('time.sleep')
@patch('os.getpid', return_value=1234)
@patch('builtins.open', new_callable=mock_open)
def test_locked_success(lock_open, getpid, sleep, fcntl_flock, print_lock):
mock_func()
lock_open.assert_has_calls([
call(config.LOCK_FILE, 'a', encoding='utf-8'),
call().__enter__(),
call().__exit__(None, None, None),
call(config.LOCK_FILE, 'r+', encoding='utf-8'),
call().__enter__(),
call().truncate(),
call().write('1234'),
call().flush(),
call().__exit__(None, None, None),
])
fcntl_flock.assert_called_once_with(lock_open(), fcntl.LOCK_EX | fcntl.LOCK_NB)
sleep.assert_not_called()
getpid.assert_called_once()
print_lock.assert_not_called()
@patch('spoc.flock.print_lock')
@patch('fcntl.flock')
@patch('time.sleep')
@patch('os.getpid', return_value=5678)
@patch('builtins.open', new_callable=mock_open, read_data='1234')
def test_locked_fail(lock_open, getpid, sleep, fcntl_flock, print_lock):
fcntl_flock.side_effect = [
OSError(errno.EAGAIN, 'in use'),
OSError(errno.EAGAIN, 'in use'),
None,
]
mock_func()
lock_open.assert_has_calls([
call(config.LOCK_FILE, 'a', encoding='utf-8'),
call().__enter__(),
call().__exit__(None, None, None),
call(config.LOCK_FILE, 'r+', encoding='utf-8'),
call().__enter__(),
call().read(),
call().seek(0),
call().truncate(),
call().write('5678'),
call().flush(),
call().__exit__(None, None, None),
])
expected_fcntl_flock_call = call(lock_open(), fcntl.LOCK_EX | fcntl.LOCK_NB)
assert fcntl_flock.call_args_list.count(expected_fcntl_flock_call) == 3
expected_sleep_call = call(0.1)
assert sleep.call_args_list.count(expected_sleep_call) == 2
getpid.assert_called_once()
print_lock.assert_called_once_with('1234')
@patch('spoc.flock.print_lock')
@patch('fcntl.flock', side_effect=OSError(errno.EBADF, 'nope'))
@patch('time.sleep')
@patch('os.getpid', return_value=5678)
@patch('builtins.open', new_callable=mock_open, read_data='1234')
def test_locked_error(lock_open, getpid, sleep, fcntl_flock, print_lock):
with pytest.raises(OSError):
mock_func()
# Last call is
# call().__exit__(<class 'OSError'>, OSError(9, 'nope'), <traceback object at 0xaddress>)
# The exception can be passed by the context manager above and checked as follows
# call().__exit__(ex.type, ex.value, ex.tb.tb_next.tb_next.tb_next)
# but it may by CPython specific, and frankly, that tb_next chain looks horrible.
# hence checking just the method and comparing the args with themselves
last_exit_call_args = lock_open().__exit__.call_args_list[-1][0]
lock_open.assert_has_calls([
call(config.LOCK_FILE, 'a', encoding='utf-8'),
call().__enter__(),
call().__exit__(None, None, None),
call(config.LOCK_FILE, 'r+', encoding='utf-8'),
call().__enter__(),
call().__exit__(*last_exit_call_args),
])
fcntl_flock.assert_called_once_with(lock_open(), fcntl.LOCK_EX | fcntl.LOCK_NB)
sleep.assert_not_called()
getpid.assert_not_called()
print_lock.assert_not_called()

165
tests/test_podman.py Normal file
View File

@ -0,0 +1,165 @@
import subprocess
import os
from unittest.mock import patch, call
from spoc import podman
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'test_data')
@patch('subprocess.run')
def test_run(run):
process = podman.run(['foo', 'bar'])
run.assert_called_once_with(['foo', 'bar'], check=True, env=podman.ENV)
assert process == run.return_value
@patch('subprocess.run')
def test_out(run):
run.return_value.stdout = 'RESULT\n'
output = podman.out(['foo', 'bar'], arg1=123, arg2=True)
run.assert_called_once_with(['foo', 'bar'], stdout=subprocess.PIPE, text=True, check=True,
env=podman.ENV, arg1=123, arg2=True)
assert output == 'RESULT'
@patch('spoc.podman.out', return_value=' 0 1000 1\n 1 100000 65536')
def test_get_subuidgid(out):
subuidgid = podman.get_subuidgid()
assert subuidgid == (100000, 100000)
out.assert_has_calls([
call(['su', '-', 'spoc', '-c', 'podman unshare cat /proc/self/uid_map']),
call(['su', '-', 'spoc', '-c', 'podman unshare cat /proc/self/gid_map']),
])
@patch('spoc.podman.out', return_value='')
def test_get_subuidgid_no_id(out):
subuidgid = podman.get_subuidgid()
assert subuidgid == (0, 0)
assert out.call_count == 2
@patch('spoc.podman.out')
def test_get_apps(out):
with open(os.path.join(TEST_DATA_DIR, 'podman_pod_ps.json'), encoding='utf-8') as f:
out.return_value = f.read()
pods = podman.get_apps()
expected_cmd = ['podman', 'pod', 'ps', '--format', 'json']
out.assert_called_once_with(expected_cmd)
assert pods == {'someapp': '0.1', 'anotherapp': '0.2', 'yetanotherapp': '0.3'}
@patch('spoc.podman.out')
def test_get_volumes_for_app(out):
with open(os.path.join(TEST_DATA_DIR, 'podman_volume_ls.json'), encoding='utf-8') as f:
out.return_value = f.read()
volumes = podman.get_volumes_for_app('someapp')
expected_cmd = ['podman', 'volume', 'ls', '--filter', 'label=spoc.app=someapp',
'--format', 'json']
out.assert_called_once_with(expected_cmd)
assert volumes == {'someapp-conf', 'someapp-data'}
@patch('spoc.podman.run')
def test_start_pod(run):
podman.start_pod('someapp')
expected_cmd = ['podman', 'pod', 'start', 'someapp']
run.assert_called_once_with(expected_cmd)
@patch('spoc.podman.run')
def test_stop_pod(run):
podman.stop_pod('someapp')
expected_cmd = ['podman', 'pod', 'stop', '--ignore', 'someapp']
run.assert_called_once_with(expected_cmd)
@patch('spoc.podman.out')
def test_get_pod_status(out):
out.return_value = 'RESULT'
status = podman.get_pod_status('someapp')
expected_cmd = ['podman', 'pod', 'ps', '--filter', 'label=spoc.app=someapp']
out.assert_called_once_with(expected_cmd)
assert status == 'RESULT'
@patch('spoc.podman.out')
def test_get_pod_status_all(out):
out.return_value = 'RESULT'
status = podman.get_pod_status()
expected_cmd = ['podman', 'pod', 'ps', '--filter', 'label=spoc.app']
out.assert_called_once_with(expected_cmd)
assert status == 'RESULT'
@patch('spoc.podman.run')
@patch('spoc.podman.get_subuidgid', return_value=(100000, 100000))
def test_create_volume(get_subuidgid, run):
podman.create_volume('someapp', 'someapp-vol')
expected_cmd = ['podman', 'volume', 'create',
'--opt', 'o=uid=100000,gid=100000',
'--label', 'spoc.app=someapp', 'someapp-vol']
get_subuidgid.assert_called_once()
run.assert_called_once_with(expected_cmd)
@patch('spoc.podman.run')
def test_remove_volume(run):
podman.remove_volume('someapp-vol')
expected_cmd = ['podman', 'volume', 'rm', 'someapp-vol']
run.assert_called_once_with(expected_cmd)
@patch('spoc.podman.run')
def test_create_pod(run):
podman.create_pod('someapp', '0.1')
expected_cmd = ['podman', 'pod', 'create', '--name', 'someapp',
'--subuidname', 'spoc', '--subgidname', 'spoc',
'--label', 'spoc.app=someapp', '--label', 'spoc.version=0.1']
run.assert_called_once_with(expected_cmd)
@patch('spoc.podman.stop_pod')
@patch('spoc.podman.run')
def test_remove_pod(run, stop_pod):
podman.remove_pod('someapp')
stop_pod.assert_called_once_with('someapp')
expected_cmd = ['podman', 'pod', 'rm', '--ignore', 'someapp']
run.assert_called_once_with(expected_cmd)
@patch('spoc.podman.run')
def test_create_container(run):
podman.create_container('someapp', 'someapp-cnt', 'example.com/someapp:0.23.6-210515',
env_file='/var/lib/spoc/someapp.env',
volumes={'someapp-srv': '/srv', 'someapp-mnt': '/mnt'},
requires={'someapp-cnt3', 'someapp-cnt2'},
hosts={'cnt2', 'cnt3', 'cnt'})
expected_cmd = ['podman', 'container', 'create', '--name', 'someapp-cnt', '--pod', 'someapp',
'--subuidname', 'spoc', '--subgidname', 'spoc',
'--restart', 'unless-stopped', '--env-file', '/var/lib/spoc/someapp.env',
'--requires', 'someapp-cnt2,someapp-cnt3', '--volume', 'someapp-mnt:/mnt',
'--volume', 'someapp-srv:/srv', '--add-host', 'cnt:127.0.0.1',
'--add-host', 'cnt2:127.0.0.1', '--add-host', 'cnt3:127.0.0.1',
'example.com/someapp:0.23.6-210515']
run.assert_called_once_with(expected_cmd)
@patch('spoc.podman.run')
def test_create_container_minimal(run):
podman.create_container('someapp', 'someapp-cnt', 'example.com/someapp:0.23.6-210515')
expected_cmd = ['podman', 'container', 'create', '--name', 'someapp-cnt', '--pod', 'someapp',
'--subuidname', 'spoc', '--subgidname', 'spoc',
'--restart', 'unless-stopped', 'example.com/someapp:0.23.6-210515']
run.assert_called_once_with(expected_cmd)
@patch('spoc.podman.run')
def test_prune(run):
podman.prune()
run.assert_has_calls([
call(['podman', 'image', 'prune', '--all', '--force', '--volumes']),
])

53
tests/test_repo.py Normal file
View File

@ -0,0 +1,53 @@
from unittest.mock import patch, call
import pytest
from spoc import config
from spoc import repo
@patch('spoc.repo._DATA', {})
@patch('requests.get')
def test_load(req_get):
repo.load()
req_get.assert_called_once_with(config.REPO_FILE_URL, auth=config.REGISTRY_AUTH, timeout=5)
req_get.return_value.raise_for_status.assert_called_once()
req_get.return_value.json.assert_called_once()
@patch('spoc.repo._DATA', {})
@patch('requests.get')
def test_load_twice_no_force(req_get):
repo.load()
repo.load()
req_get.assert_called_once_with(config.REPO_FILE_URL, auth=config.REGISTRY_AUTH, timeout=5)
req_get.return_value.raise_for_status.assert_called_once()
req_get.return_value.json.assert_called_once()
@patch('spoc.repo._DATA', {})
@patch('requests.get')
def test_load_twice_force(req_get):
repo.load()
repo.load(force=True)
expected_call = call(config.REPO_FILE_URL, auth=config.REGISTRY_AUTH, timeout=5)
assert req_get.call_args_list.count(expected_call) == 2
assert req_get.return_value.raise_for_status.call_count == 2
assert req_get.return_value.json.call_count == 2
@patch('spoc.repo._DATA', {'someapp': {'version': '0.1'}})
@patch('requests.get', side_effect=IOError())
def test_load_empty_on_fail(req_get):
with pytest.raises(IOError):
repo.load(force=True)
req_get.assert_called_once_with(config.REPO_FILE_URL, auth=config.REGISTRY_AUTH, timeout=5)
assert repo._DATA == {} # pylint: disable=protected-access
@patch('spoc.repo._DATA', {'someapp': {'version': '0.1'}})
@patch('spoc.repo.load')
def test_get_apps(repo_load):
apps = repo.get_apps()
repo_load.assert_called_once()
assert apps == {'someapp': {'version': '0.1'}}

267
tests/test_spoc.py Normal file
View File

@ -0,0 +1,267 @@
from unittest.mock import call, patch
import pytest
import spoc
def test_apperror():
exception = spoc.AppError('someapp')
assert exception.app_name == 'someapp'
def test_appalreadyinstallederror():
exception = spoc.AppAlreadyInstalledError('someapp')
assert exception.app_name == 'someapp'
assert isinstance(exception, spoc.AppError)
def test_appnotinstallederror():
exception = spoc.AppNotInstalledError('someapp')
assert exception.app_name == 'someapp'
assert isinstance(exception, spoc.AppError)
def test_appnotinrepoerror():
exception = spoc.AppNotInRepoError('someapp')
assert exception.app_name == 'someapp'
assert isinstance(exception, spoc.AppError)
def test_appnotupdateableerror():
exception = spoc.AppNotUpdateableError('someapp')
assert exception.app_name == 'someapp'
assert isinstance(exception, spoc.AppError)
@patch('spoc.podman.get_apps', return_value={'someapp': '0.2', 'anotherapp': '0.1'})
def test_list_installed(get_apps):
apps = spoc.list_installed()
get_apps.assert_called_once()
assert apps == {'anotherapp': '0.1', 'someapp': '0.2'}
@patch('spoc.repo.get_apps',
return_value={'someapp': {'version': '0.2'}, 'anotherapp': {'version': '0.1'}})
def test_list_online(get_apps):
apps = spoc.list_online()
get_apps.assert_called_once()
assert apps == {'anotherapp': '0.1', 'someapp': '0.2'}
@patch('spoc.repo.get_apps',
return_value={'someapp': {'version': '0.2'}, 'anotherapp': {'version': '0.1'}})
@patch('spoc.podman.get_apps', return_value={'someapp': '0.1'})
def test_list_updates(podman_get_apps, repo_get_apps):
apps = spoc.list_updates()
repo_get_apps.assert_called_once()
podman_get_apps.assert_called_once()
assert apps == {'someapp': '0.1 -> 0.2'}
@patch('spoc.repo.get_apps', return_value={'someapp': {'version': '0.1'}})
@patch('spoc.podman.get_apps', return_value={})
@patch('spoc.app.install')
def test_install(app_install, podman_get_apps, repo_get_apps):
spoc.install.__wrapped__('someapp')
podman_get_apps.assert_called_once()
repo_get_apps.assert_called_once()
app_install.assert_called_once_with('someapp', from_file=None)
@patch('spoc.repo.get_apps')
@patch('spoc.podman.get_apps', return_value={})
@patch('spoc.app.install')
def test_install_from_file(app_install, podman_get_apps, repo_get_apps):
spoc.install.__wrapped__('someapp', 'somefile')
podman_get_apps.assert_called_once()
repo_get_apps.assert_not_called()
app_install.assert_called_once_with('someapp', from_file='somefile')
@patch('spoc.repo.get_apps', return_value={'someapp': {'version': '0.1'}})
@patch('spoc.podman.get_apps', return_value={'someapp': '0.1'})
@patch('spoc.app.install')
def test_install_already_installed(app_install, podman_get_apps, repo_get_apps):
with pytest.raises(spoc.AppAlreadyInstalledError):
spoc.install.__wrapped__('someapp')
podman_get_apps.assert_called_once()
repo_get_apps.assert_not_called()
app_install.assert_not_called()
@patch('spoc.repo.get_apps', return_value={})
@patch('spoc.podman.get_apps', return_value={})
@patch('spoc.app.install')
def test_install_not_in_repo(app_install, podman_get_apps, repo_get_apps):
with pytest.raises(spoc.AppNotInRepoError):
spoc.install.__wrapped__('someapp')
podman_get_apps.assert_called_once()
repo_get_apps.assert_called_once()
app_install.assert_not_called()
@patch('spoc.list_updates', return_value={'someapp': '0.1 -> 0.2'})
@patch('spoc.podman.get_apps', return_value={'someapp': '0.1'})
@patch('spoc.app.update')
def test_update(app_update, podman_get_apps, list_updates):
spoc.update.__wrapped__('someapp')
podman_get_apps.assert_called_once()
list_updates.assert_called_once()
app_update.assert_called_once_with('someapp', from_file=None)
@patch('spoc.list_updates', return_value={})
@patch('spoc.podman.get_apps', return_value={})
@patch('spoc.app.update')
def test_update_not_installed(app_update, podman_get_apps, list_updates):
with pytest.raises(spoc.AppNotInstalledError):
spoc.update.__wrapped__('someapp')
podman_get_apps.assert_called_once()
list_updates.assert_not_called()
app_update.assert_not_called()
@patch('spoc.list_updates', return_value={})
@patch('spoc.podman.get_apps', return_value={'someapp': '0.1'})
@patch('spoc.app.update')
def test_update_not_updateable(app_update, podman_get_apps, list_updates):
with pytest.raises(spoc.AppNotUpdateableError):
spoc.update.__wrapped__('someapp')
podman_get_apps.assert_called_once()
list_updates.assert_called_once()
app_update.assert_not_called()
@patch('spoc.podman.get_apps', return_value={'someapp': '0.1'})
@patch('spoc.app.uninstall')
def test_uninstall(app_uninstall, podman_get_apps):
spoc.uninstall.__wrapped__('someapp')
podman_get_apps.assert_called_once()
app_uninstall.assert_called_once_with('someapp')
@patch('spoc.podman.get_apps', return_value={})
@patch('spoc.app.uninstall')
def test_uninstall_not_installed(app_uninstall, podman_get_apps):
with pytest.raises(spoc.AppNotInstalledError):
spoc.uninstall.__wrapped__('someapp')
podman_get_apps.assert_called_once()
app_uninstall.assert_not_called()
@patch('spoc.podman.get_apps', return_value={'someapp': '0.1'})
@patch('spoc.podman.start_pod')
def test_start(start_pod, podman_get_apps):
spoc.start.__wrapped__('someapp')
podman_get_apps.assert_called_once()
start_pod.assert_called_once_with('someapp')
@patch('spoc.podman.get_apps', return_value={})
@patch('spoc.podman.start_pod')
def test_start_not_installed(start_pod, podman_get_apps):
with pytest.raises(spoc.AppNotInstalledError):
spoc.start.__wrapped__('someapp')
podman_get_apps.assert_called_once()
start_pod.assert_not_called()
@patch('spoc.podman.get_apps', return_value={'someapp': '0.1'})
@patch('spoc.podman.stop_pod')
def test_stop(stop_pod, podman_get_apps):
spoc.stop.__wrapped__('someapp')
podman_get_apps.assert_called_once()
stop_pod.assert_called_once_with('someapp')
@patch('spoc.podman.get_apps', return_value={})
@patch('spoc.podman.stop_pod')
def test_stop_not_installed(stop_pod, podman_get_apps):
with pytest.raises(spoc.AppNotInstalledError):
spoc.stop.__wrapped__('someapp')
podman_get_apps.assert_called_once()
stop_pod.assert_not_called()
@patch('spoc.podman.get_apps', return_value={'someapp': '0.1'})
@patch('spoc.podman.get_pod_status', return_value='RESULT')
def test_status(get_pod_status, podman_get_apps):
status = spoc.status('someapp')
podman_get_apps.assert_called_once()
get_pod_status.assert_called_once_with('someapp')
assert status == 'RESULT'
@patch('spoc.podman.get_apps')
@patch('spoc.podman.get_pod_status', return_value='RESULT')
def test_status_all(get_pod_status, podman_get_apps):
status = spoc.status()
podman_get_apps.assert_not_called()
get_pod_status.assert_called_once_with(None)
assert status == 'RESULT'
@patch('spoc.podman.get_apps', return_value={})
@patch('spoc.podman.get_pod_status')
def test_status_not_installed(get_pod_status, podman_get_apps):
with pytest.raises(spoc.AppNotInstalledError):
spoc.status('someapp')
podman_get_apps.assert_called_once()
get_pod_status.assert_not_called()
@patch('spoc.podman.get_apps', return_value={'someapp': '0.1'})
@patch('spoc.autostart.set_app')
def test_set_autostart(set_app, podman_get_apps):
spoc.set_autostart.__wrapped__('someapp', True)
podman_get_apps.assert_called_once()
set_app.assert_called_once_with('someapp', True)
@patch('spoc.podman.get_apps', return_value={})
@patch('spoc.autostart.set_app')
def test_set_autostart_not_installed(set_app, podman_get_apps):
with pytest.raises(spoc.AppNotInstalledError):
spoc.set_autostart.__wrapped__('someapp', True)
podman_get_apps.assert_called_once()
set_app.assert_not_called()
@patch('spoc.autostart.get_apps', return_value={'someapp', 'anotherapp'})
@patch('spoc.podman.start_pod')
def test_start_autostarted(start_pod, get_apps):
spoc.start_autostarted.__wrapped__()
get_apps.assert_called_once()
start_pod.assert_has_calls([
call('someapp'),
call('anotherapp'),
], any_order=True)
@patch('spoc.podman.get_apps', return_value={'someapp': '0.1', 'anotherapp': '0.1'})
@patch('spoc.podman.stop_pod')
def test_stop_all(stop_pod, get_apps):
spoc.stop_all.__wrapped__()
get_apps.assert_called_once()
stop_pod.assert_has_calls([
call('someapp'),
call('anotherapp'),
], any_order=True)
@patch('spoc.config.write_auth')
@patch('spoc.repo.load')
def test_login(repo_load, write_auth):
spoc.login.__wrapped__('somehost', 'someuser', 'somepass')
write_auth.assert_called_once_with('somehost', 'someuser', 'somepass')
repo_load.assert_called_once_with(force=True)
@patch('spoc.podman.prune')
def test_prune(prune):
spoc.prune.__wrapped__()
prune.assert_called_once()

View File

@ -1,199 +0,0 @@
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import argparse
import os
from pkg_resources import parse_version
from spoc import repo_local, repo_online, repo_publish
from spoc.app import App
from spoc.cli import ActionQueue, print_lock, readable_size
from spoc.config import LOCK_FILE
from spoc.flock import locked
from spoc.image import Image
def listing(list_type):
# Lists applications in particular state
if list_type == 'installed':
apps = repo_local.get_apps()
elif list_type == 'online':
apps = repo_online.get_apps()
elif list_type == 'updates':
online_apps = repo_online.get_apps()
apps = [a for a,d in repo_local.get_apps().items() if a in online_apps and parse_version(online_apps[a]['version']) > parse_version(d['version'])]
elif list_type == 'published':
apps = repo_publish.get_apps()
elif list_type == 'running':
apps = [app for app in repo_local.get_apps() if App(app).is_running()]
elif list_type == 'stopped':
apps = [app for app in repo_local.get_apps() if App(app).is_stopped()]
for app in apps:
print(app)
@locked(LOCK_FILE, print_lock)
def install(app_name):
# Install application from online repository
queue = ActionQueue()
required_images = []
for container in repo_online.get_app(app_name)['containers'].values():
required_images.extend(repo_online.get_image(container['image'])['layers'])
local_images = repo_local.get_images()
# Layers need to be downloaded in correct order
for layer in list(dict.fromkeys(required_images)):
if layer not in local_images:
queue.download_image(Image(layer, False))
queue.install_app(App(app_name, False, False))
queue.process()
@locked(LOCK_FILE, print_lock)
def update(app_name):
# Update application from online repository
queue = ActionQueue()
required_images = []
for container in repo_online.get_app(app_name)['containers'].values():
required_images.extend(repo_online.get_image(container['image'])['layers'])
local_images = repo_local.get_images()
# Layers need to be downloaded in correct order
for layer in list(dict.fromkeys(required_images)):
if layer not in local_images:
queue.download_image(Image(layer, False))
queue.update_app(App(app_name, False))
queue.process()
@locked(LOCK_FILE, print_lock)
def uninstall(app_name):
# Remove application and its containers from local repository
queue = ActionQueue()
queue.uninstall_app(App(app_name, False))
queue.process()
def start(app_name):
# Start all application containers
queue = ActionQueue()
queue.start_app(App(app_name))
queue.process()
def stop(app_name):
# Stop all application containers
queue = ActionQueue()
queue.stop_app(App(app_name))
queue.process()
def status(app_name):
# Print status of all application containers
for container,status in sorted(App(app_name).status().items()):
print(f'{container}: {status.value}')
def publish(filename, force):
app_name = os.path.basename(os.path.dirname(os.path.abspath(filename)))
# Check if publishing is needed and attempt to publish the application
if force or app_name not in repo_publish.get_apps():
app = App(app_name, False, False)
print(f'Publishing application {app_name} from file {os.path.abspath(filename)}')
app.unpublish()
size, dlsize = app.publish(filename)
print(f'Application {app_name} compressed from {readable_size(size)} to {readable_size(dlsize)} and published successfully')
else:
print(f'Application {app_name} already published, skipping publish task')
def unpublish(app_name):
# Remove the application from publish repo
App(app_name, False, False).unpublish()
def autostart(app_name, value):
# Set if the application should be autostarted on boot
value = value.lower() in ('1', 'on', 'enable', 'true')
App(app_name, False).set_autostart(value)
def start_autostarted():
# Start all applications (resp. their containers) which are set to be autoostarted on boot
apps = [App(a) for a,d in repo_local.get_apps().items() if d.get('autostart')]
for app in apps:
app.start()
def stop_all():
# Stop all applications (resp. their containers)
apps = [App(a) for a,d in repo_local.get_apps().items()]
for app in apps:
app.stop()
parser = argparse.ArgumentParser(description='SPOC application manager')
parser.set_defaults(action=None)
subparsers = parser.add_subparsers()
parser_list = subparsers.add_parser('list')
parser_list.set_defaults(action=listing)
parser_list.add_argument('type', choices=('installed', 'online', 'updates', 'published', 'running', 'stopped'), default='installed', const='installed', nargs='?', help='Selected repository or application criteria')
parser_install = subparsers.add_parser('install')
parser_install.set_defaults(action=install)
parser_install.add_argument('app', help='Name of the application to install')
parser_update = subparsers.add_parser('update')
parser_update.set_defaults(action=update)
parser_update.add_argument('app', help='Name of the application to update')
parser_uninstall = subparsers.add_parser('uninstall')
parser_uninstall.set_defaults(action=uninstall)
parser_uninstall.add_argument('app', help='Name of the application to uninstall')
parser_start = subparsers.add_parser('start')
parser_start.set_defaults(action=start)
parser_start.add_argument('app', help='Name of the application to start')
parser_stop = subparsers.add_parser('stop')
parser_stop.set_defaults(action=stop)
parser_stop.add_argument('app', help='Name of the application to stop')
parser_status = subparsers.add_parser('status')
parser_status.set_defaults(action=status)
parser_status.add_argument('app', help='Name of the application to check')
parser_publish = subparsers.add_parser('publish')
parser_publish.set_defaults(action=publish)
parser_publish.add_argument('-f', '--force', action='store_true', help='Force republish already published application')
parser_publish.add_argument('filename', help='Path to metadata file of the application to publish')
parser_unpublish = subparsers.add_parser('unpublish')
parser_unpublish.set_defaults(action=unpublish)
parser_unpublish.add_argument('app', help='Name of the application to unpublish')
parser_autostart = subparsers.add_parser('autostart')
parser_autostart.set_defaults(action=autostart)
parser_autostart.add_argument('app', help='Name of the application to be automatically started')
parser_autostart.add_argument('value', choices=('1', 'on', 'enable', 'true', '0', 'off', 'disable', 'false'), help='Set or unset the applications to be automatically started after the host boots up')
parser_start_autostarted = subparsers.add_parser('start-autostarted')
parser_start_autostarted.set_defaults(action=start_autostarted)
parser_stop_all = subparsers.add_parser('stop-all')
parser_stop_all.set_defaults(action=stop_all)
args = parser.parse_args()
if args.action is listing:
listing(args.type)
elif args.action is install:
install(args.app)
elif args.action is update:
update(args.app)
elif args.action is uninstall:
uninstall(args.app)
elif args.action is start:
start(args.app)
elif args.action is stop:
stop(args.app)
elif args.action is status:
status(args.app)
elif args.action is publish:
publish(args.filename, args.force)
elif args.action is unpublish:
unpublish(args.app)
elif args.action is autostart:
autostart(args.app, args.value)
elif args.action is start_autostarted:
start_autostarted()
elif args.action is stop_all:
stop_all()
else:
parser.print_usage()

View File

@ -1,189 +0,0 @@
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import argparse
import os
import shlex
import sys
from spoc import repo_local
from spoc.config import VOLUMES_DIR
from spoc.container import Container
from spoc.image import Image
def listing(state):
# Lits containers in particular state
if state == 'all':
containers = repo_local.get_containers().keys()
elif state == 'running':
containers = [c for c in repo_local.get_containers() if Container(c).is_running()]
elif state == 'stopped':
containers = [c for c in repo_local.get_containers() if Container(c).is_stopped()]
for container in containers:
print(container)
def modify_depend(container, depend):
# Change container dependencies
if depend.startswith('!'):
try:
container.depends.remove(depend[1:])
except KeyError:
pass
else:
# Add the dependency and remove duplicates
container.depends.append(depend)
container.depends = list(set(container.depends))
def modify_mount(container, mount):
# Change container mount points
volume,mountpoint = mount.split(':', 1)
if mountpoint:
container.mounts[volume] = mountpoint
else:
try:
del container.mounts[volume]
except KeyError:
pass
def modify_env(container, env):
# Change container environment values
key,value = env.split('=', 1)
if value:
container.env[key] = value
else:
try:
del container.env[key]
except KeyError:
pass
def modify_container(container, depends, mounts, envs, uid, gid, cmd, cwd, ready, halt):
# Change container definition
for depend in depends:
modify_depend(container, depend)
for mount in mounts:
modify_mount(container, mount)
for env in envs:
modify_env(container, env)
args = locals()
for member in ('uid', 'gid', 'cmd', 'cwd', 'ready', 'halt'):
value = args[member]
if value:
setattr(container, member, value)
def create(container_name, image_name, depends, mounts, env, uid, gid, cmd, cwd, ready, halt):
# Create container based on image definition and extra fields
container = Container(container_name, False)
container.set_definition(Image(image_name).get_definition())
modify_container(container, depends, mounts, env, uid, gid, cmd, cwd, ready, halt)
container.create()
def modify(container_name, depends, mounts, env, uid, gid, cmd, cwd, ready, halt):
# Change configuration of an existing container
container = Container(container_name)
modify_container(container, depends, mounts, env, uid, gid, cmd, cwd, ready, halt)
container.create()
def destroy(container_name):
# Remove container and its directory
container = Container(container_name, False)
if container.is_running():
container.stop()
container.destroy()
def start(container_name, command):
# Start the container using init values from its definition
Container(container_name).start(command)
def stop(container_name):
# Stop the container using halt signal from its definition
Container(container_name).stop()
def status(container_name):
# Prints current running status of the container
print(Container(container_name).get_state().value)
def execute(container_name, command, uid, gid):
# Execute a command in container's namespace
result = Container(container_name).execute(command, uid, gid)
# Set returncode to that of the command
sys.exit(result.returncode)
parser = argparse.ArgumentParser(description='SPOC container manager')
parser.set_defaults(action=None)
subparsers = parser.add_subparsers()
parser_list = subparsers.add_parser('list')
parser_list.set_defaults(action=listing)
parser_list.add_argument('type', choices=('all', 'running', 'stopped'), default='all', const='all', nargs='?', help='Selected container criteria')
parser_create = subparsers.add_parser('create')
parser_create.set_defaults(action=create)
parser_create.add_argument('-d', '--depends', action='append', default=[], help='Add another container as a start dependency')
parser_create.add_argument('-m', '--mount', action='append', default=[], help='Add mount to the container - format volume:mountpoint[:file]')
parser_create.add_argument('-e', '--env', action='append', default=[], help='Add environment variable for the container - format KEY=value')
parser_create.add_argument('-u', '--uid', help='Sets the container init UID')
parser_create.add_argument('-g', '--gid', help='Sets the container init GID')
parser_create.add_argument('-c', '--cmd', help='Sets the container init command')
parser_create.add_argument('-w', '--workdir', help='Sets the container init working directory')
parser_create.add_argument('-r', '--ready', help='Sets the container ready command')
parser_create.add_argument('-s', '--stopsig', help='Sets the signal to be sent to init on container shutdown')
parser_create.add_argument('container', help='Name of the container to create')
parser_create.add_argument('image', help='Name of the image of which the container should be based')
parser_modify = subparsers.add_parser('modify')
parser_modify.set_defaults(action=modify)
parser_modify.add_argument('-d', '--depends', action='append', default=[], help='Add another container as a start dependency - prepend the name with ! to remove the dependency')
parser_modify.add_argument('-m', '--mount', action='append', default=[], help='Add mount to the container - format volume:mountpoint - specify empty mountpoint to remove the mount')
parser_modify.add_argument('-e', '--env', action='append', default=[], help='Add environment variable for the container - format KEY=value - specify empty value to remove the env')
parser_modify.add_argument('-u', '--uid', help='Sets the container init UID')
parser_modify.add_argument('-g', '--gid', help='Sets the container init GID')
parser_modify.add_argument('-c', '--cmd', help='Sets the container init command')
parser_modify.add_argument('-w', '--workdir', help='Sets the container init working directory')
parser_modify.add_argument('-r', '--ready', help='Sets the container ready command')
parser_modify.add_argument('-s', '--stopsig', help='Sets the signal to be sent to init on container shutdown')
parser_modify.add_argument('container', help='Name of the container to modify')
parser_destroy = subparsers.add_parser('destroy')
parser_destroy.set_defaults(action=destroy)
parser_destroy.add_argument('container', help='Name of the container to destroy')
parser_start = subparsers.add_parser('start')
parser_start.set_defaults(action=start)
parser_start.add_argument('container', help='Name of the container to start')
parser_start.add_argument('command', nargs=argparse.REMAINDER, help='Command to be run instead of the default init command')
parser_stop = subparsers.add_parser('stop')
parser_stop.set_defaults(action=stop)
parser_stop.add_argument('container', help='Name of the container to stop')
parser_status = subparsers.add_parser('status')
parser_status.set_defaults(action=status)
parser_status.add_argument('container', help='Name of the container to check')
parser_exec = subparsers.add_parser('exec')
parser_exec.set_defaults(action=execute)
parser_exec.add_argument('-u', '--uid', help='Sets the command UID')
parser_exec.add_argument('-g', '--gid', help='Sets the command GID')
parser_exec.add_argument('container', help='Name of the container in which to run the command')
parser_exec.add_argument('command', nargs=argparse.REMAINDER, help='The command to be run')
args = parser.parse_args()
if args.action is listing:
listing(args.type)
elif args.action is create:
create(args.container, args.image, args.depends, args.mount, args.env, args.uid, args.gid, args.cmd, args.workdir, args.ready, args.stopsig)
elif args.action is modify:
modify(args.container, args.depends, args.mount, args.env, args.uid, args.gid, args.cmd, args.workdir, args.ready, args.stopsig)
elif args.action is destroy:
destroy(args.container)
elif args.action is start:
start(args.container, args.command)
elif args.action is stop:
stop(args.container)
elif args.action is status:
status(args.container)
elif args.action is execute:
execute(args.container, args.command, args.uid, args.gid)
else:
parser.print_usage()

View File

@ -1,16 +0,0 @@
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
from spoc.container import Container
if __name__ == '__main__':
hook_type = os.environ['LXC_HOOK_TYPE']
container = Container(os.environ['LXC_NAME'])
if hook_type == 'pre-start':
container.clean_ephemeral_layer()
container.mount_rootfs()
elif hook_type == 'post-stop':
container.unmount_rootfs()
container.clean_ephemeral_layer()

View File

@ -1,161 +0,0 @@
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import argparse
import os
import sys
from spoc import repo_local, repo_online, repo_publish
from spoc.cli import ActionQueue, print_lock, readable_size
from spoc.config import LOCK_FILE
from spoc.depsolver import DepSolver
from spoc.exceptions import ImageNotFoundError
from spoc.flock import locked
from spoc.image import Image
from spoc.imagebuilder import ImageBuilder
def get_image_name(file_path):
# Read and return image name from image file
with open(file_path) as f:
for line in f:
if line.startswith('IMAGE '):
return line.split()[1]
return None
def listing(list_type):
# Lists images in particular state
if list_type == 'installed':
images = repo_local.get_images()
elif list_type == 'online':
images = repo_online.get_images()
elif list_type == 'published':
images = repo_publish.get_images()
for image in images:
print(image)
@locked(LOCK_FILE, print_lock)
def download(image_name):
# Download and unpack image from online repository
queue = ActionQueue()
local_images = repo_local.get_images()
for layer in repo_online.get_image(image_name)['layers']:
if layer not in local_images:
queue.download_image(Image(layer, False))
queue.process()
@locked(LOCK_FILE, print_lock)
def delete(image_name):
# Remove the image including all images that have it as one of its parents
# Check if image is in use
used_by = [c for c,d in repo_local.get_containers().items() if image_name in d['layers']]
if used_by:
sys.exit(f'Error: Image {image_name} is used by container{"s" if len(used_by) > 1 else ""} {", ".join(used_by)}')
# Gather layers inheriting from the layer to be removed which should be removed as well
retained_layers = set(image for image,definition in repo_local.get_images().items() if image_name not in definition['layers'])
remove_layers(retained_layers)
@locked(LOCK_FILE, print_lock)
def clean():
# Remove images which aren't used in any locally defined containers
retained_layers = set()
for definition in repo_local.get_containers().values():
retained_layers.update(definition['layers'])
remove_layers(retained_layers)
def remove_layers(retained_layers):
# Enqueue removal of images for cleanup
depsolver = DepSolver()
# Build dependency tree to safely remove the images in order of dependency
for image in set(repo_local.get_images()) - retained_layers:
image = Image(image)
depsolver.add(image.name, set(image.layers) - retained_layers, image)
# Enqueue and run the removal actions
queue = ActionQueue()
for image in reversed(depsolver.solve()):
queue.delete_image(image)
queue.process()
@locked(LOCK_FILE, print_lock)
def build(filename, force, do_publish):
# Check if a build is needed and attempt to build the image from image file
image_name = get_image_name(filename)
if force or image_name not in repo_local.get_images():
image = Image(image_name, False)
print(f'Building image {image_name} from file {os.path.abspath(filename)}')
image.delete()
image.create(ImageBuilder(), filename)
print(f'Image {image_name} built successfully')
# If publishing was requested, force publish after successful build
force = True
else:
print(f'Image {image_name} already built, skipping build task')
if do_publish:
publish(image_name, force)
def publish(image_name, force):
# Check if publishing is needed and attempt to publish the image
if force or image_name not in repo_publish.get_images():
image = Image(image_name)
print(f'Publishing image {image_name}')
image.unpublish()
size, dlsize = image.publish()
print(f'Image {image_name} compressed from {readable_size(size)} to {readable_size(dlsize)} and published successfully')
else:
print(f'Image {image_name} already published, skipping publish task')
def unpublish(image_name):
# Remove the image from publish repo
Image(image_name, False).unpublish()
parser = argparse.ArgumentParser(description='SPOC image manager')
parser.set_defaults(action=None)
subparsers = parser.add_subparsers()
parser_list = subparsers.add_parser('list')
parser_list.set_defaults(action=listing)
parser_list.add_argument('type', choices=('installed', 'online', 'published'), default='installed', const='installed', nargs='?', help='Selected repository')
parser_download = subparsers.add_parser('download')
parser_download.set_defaults(action=download)
parser_download.add_argument('image', help='Name of the image to download')
parser_delete = subparsers.add_parser('delete')
parser_delete.set_defaults(action=delete)
parser_delete.add_argument('image', help='Name of the image to delete')
parser_clean = subparsers.add_parser('clean')
parser_clean.set_defaults(action=clean)
parser_build = subparsers.add_parser('build')
parser_build.set_defaults(action=build)
parser_build.add_argument('-f', '--force', action='store_true', help='Force rebuild already existing image')
parser_build.add_argument('-p', '--publish', action='store_true', help='Publish the image after successful build')
parser_build.add_argument('filename', help='Path to the file with build recipe')
parser_publish = subparsers.add_parser('publish')
parser_publish.set_defaults(action=publish)
parser_publish.add_argument('-f', '--force', action='store_true', help='Force republish already published image')
parser_publish.add_argument('image', help='Name of the image to publish')
parser_unpublish = subparsers.add_parser('unpublish')
parser_unpublish.set_defaults(action=unpublish)
parser_unpublish.add_argument('image', help='Name of the image to unpublish')
args = parser.parse_args()
if args.action is listing:
listing(args.type)
elif args.action is download:
download(args.image)
elif args.action is delete:
delete(args.image)
elif args.action is clean:
clean()
elif args.action is build:
build(args.filename, args.force, args.publish)
elif args.action is publish:
publish(args.image, args.force)
elif args.action is unpublish:
unpublish(args.image)
else:
parser.print_usage()

View File

@ -1 +0,0 @@
# -*- coding: utf-8 -*-

View File

@ -1,213 +0,0 @@
# -*- coding: utf-8 -*-
import copy
import json
import os
import shutil
import subprocess
import tarfile
import urllib.parse
from . import config, repo_local, repo_online, repo_publish
from .container import Container
from .image import Image
DEFINITION_MEMBERS = {'version', 'meta', 'autostart', 'containers'}
class App:
def __init__(self, name, define_containers=True, load_from_repo=True):
self.name = name
self.version = None
self.app_dir = os.path.join(config.APPS_DIR, name)
self.meta = {}
self.autostart = False
self.containers = []
if load_from_repo:
self.set_definition(repo_local.get_app(name), define_containers)
def set_definition(self, definition, define_containers):
# Set attributes given by definition
for key in DEFINITION_MEMBERS.intersection(definition):
setattr(self, key, definition[key])
# Populate containers property with actual container objects
self.containers = [Container(container, define_containers) for container in definition['containers']]
def get_definition(self):
# Return shallow copy of image definition as dictionary
definition = {}
for key in DEFINITION_MEMBERS:
value = getattr(self, key)
if value:
definition[key] = copy.copy(value)
# Overwrite containers key with list of container names
definition['containers'] = [container.name for container in self.containers]
return definition
def download(self, observer=None):
# Download the archive with application scripts and install data
os.makedirs(config.TMP_APPS_DIR, 0o700, True)
archive_url = urllib.parse.urljoin(config.ONLINE_APPS_URL, f'{self.name}.tar.xz')
archive_path = os.path.join(config.TMP_APPS_DIR, f'{self.name}.tar.xz')
definition = repo_online.get_app(self.name)
if observer:
observer.units_total = definition['dlsize']
repo_online.download_archive(archive_url, archive_path, definition['hash'], observer)
def unpack_downloaded(self, observer=None):
# Unpack downloaded archive with application scripts and install data
archive_path = os.path.join(config.TMP_APPS_DIR, f'{self.name}.tar.xz')
definition = repo_online.get_app(self.name)
if observer:
observer.units_total = definition['size']
repo_online.unpack_archive(archive_path, config.APPS_DIR, definition['hash'], observer)
def run_script(self, action):
# Runs script for an app, if the script is present
script_dir = os.path.join(self.app_dir, action)
script_path = os.path.join(self.app_dir, f'{script_dir}.sh')
if os.path.exists(script_path):
# Run the script in its working directory, if there is one, so it doesn't have to figure out paths to packaged files
env = os.environ.copy()
env['LAYERS_DIR'] = config.LAYERS_DIR
env['VOLUMES_DIR'] = config.VOLUMES_DIR
env['APPS_DIR'] = config.APPS_DIR
env['LOG_DIR'] = config.LOG_DIR
cwd = script_dir if os.path.exists(script_dir) else self.app_dir
subprocess.run(script_path, cwd=cwd, env=env, check=True)
def create_container(self, name, definition):
# Create container and enhance its definition (typically mounts) based on application requirements
container = Container(name, False)
container.set_definition(Image(definition['image']).get_definition())
if 'depends' in definition:
container.depends = definition['depends']
if 'env' in definition:
container.env.update(definition['env'])
if 'mounts' in definition:
container.mounts.update(definition['mounts'])
container.create()
self.containers.append(container)
def install(self, observer=None):
# Install the application
definition = repo_online.get_app(self.name)
self.version = definition['version']
self.meta = definition['meta']
self.run_script('uninstall')
# Build containers
for container,container_defintion in definition['containers'].items():
self.create_container(container, container_defintion)
# Run install script and register the app
try:
self.run_script('install')
except:
# Stop all containers if install.sh fails
for container in self.containers:
container.stop()
raise
repo_local.register_app(self.name, self.get_definition())
def update(self, observer=None):
# Stop and remove containers
for container in self.containers.copy():
if container.is_running():
container.stop()
container.destroy()
self.containers.remove(container)
# Load online definition
definition = repo_online.get_app(self.name)
self.version = definition['version']
self.meta = definition['meta']
# Build containers
for container,container_defintion in definition['containers'].items():
self.create_container(container, container_defintion)
# Run update script and re-register the app
try:
self.run_script('update')
except:
# Stop all containers if update.sh fails
for container in self.containers:
container.stop()
raise
repo_local.register_app(self.name, self.get_definition())
def uninstall(self, observer=None):
# Stop and remove containers
for container in self.containers:
if container.is_running():
container.stop()
container.destroy()
# Run uninstall script
self.run_script('uninstall')
# Unregister app and remove scripts
repo_local.unregister_app(self.name)
try:
shutil.rmtree(self.app_dir)
except FileNotFoundError:
pass
def start(self, observer=None):
# Start all application containers
if observer:
observer.units_total = len(self.containers)
for container in self.containers:
container.start()
if observer:
observer.units_done += 1
def stop(self, observer=None):
# Stop all application containers
if observer:
observer.units_total = len(self.containers)
for container in self.containers:
container.stop()
if observer:
observer.units_done += 1
def status(self):
# Return status for all application containers
return {container.name:container.get_state() for container in self.containers}
def is_running(self):
# Convenience method to determine if any of the application's containers are running
for container in self.containers:
if container.is_running():
return True
return False
def is_stopped(self):
# Convenience method to determine if all of the application's containers are stopped
return not self.is_running()
def set_autostart(self, autostart):
# Configure if the application should be automatically started after boot
self.autostart = autostart
repo_local.register_app(self.name, self.get_definition())
def publish(self, filename):
# Create application archive and register to publish repository
builddir = os.path.dirname(filename)
os.makedirs(config.PUB_APPS_DIR, 0o755, True)
files = repo_publish.TarSizeCounter()
archive_path = os.path.join(config.PUB_APPS_DIR, f'{self.name}.tar.xz')
with tarfile.open(archive_path, 'w:xz') as tar:
for content in ('install', 'install.sh', 'update', 'update.sh', 'uninstall', 'uninstall.sh'):
content_path = os.path.join(builddir, content)
if os.path.exists(content_path):
tar.add(content_path, os.path.join(self.name, content), filter=files.add_file)
with open(filename) as f:
definition = json.load(f)
definition['size'] = files.size
definition['dlsize'] = os.path.getsize(archive_path)
definition['hash'] = repo_publish.sign_file(archive_path).hex()
repo_publish.register_app(self.name, definition)
return (definition['size'], definition['dlsize'])
def unpublish(self):
# Remove the application from publish repository
repo_publish.unregister_app(self.name)
archive_path = os.path.join(config.PUB_APPS_DIR, f'{self.name}.tar.xz')
try:
os.unlink(archive_path)
except FileNotFoundError:
pass

View File

@ -1,81 +0,0 @@
# -*- coding: utf-8 -*-
import os
import time
from concurrent.futures import ThreadPoolExecutor
from math import floor
SIZE_PREFIXES = ('', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
class ActionItem:
def __init__(self, text, action):
self.text = text
self.action = action
self.units_total = 0
self.units_done = 0
def run(self):
with ThreadPoolExecutor() as executor:
future = executor.submit(self.action, self)
while not future.done():
time.sleep(0.2)
self.print_progress()
# Get the result of the future and let it raise exception, if there was any
future.result()
self.print_progress('\n')
def print_progress(self, end='\r'):
text = self.text
if self.units_total:
text = f'{text} ({self.units_done}/{self.units_total}) [{floor(self.units_done/self.units_total*100)} %]'
print(f'\x1b[K{text}', end=end)
class ActionQueue:
def __init__(self):
self.queue = []
def download_image(self, image):
self.queue.append(ActionItem(f'Downloading image {image.name}', image.download))
self.queue.append(ActionItem(f'Unpacking image {image.name}', image.unpack_downloaded))
def delete_image(self, image):
self.queue.append(ActionItem(f'Deleting image {image.name}', image.delete))
def install_app(self, app):
self.queue.append(ActionItem(f'Downloading application {app.name}', app.download))
self.queue.append(ActionItem(f'Unpacking application {app.name}', app.unpack_downloaded))
self.queue.append(ActionItem(f'Installing application {app.name}', app.install))
def update_app(self, app):
self.queue.append(ActionItem(f'Downloading application {app.name}', app.download))
self.queue.append(ActionItem(f'Unpacking application {app.name}', app.unpack_downloaded))
self.queue.append(ActionItem(f'Updating application {app.name}', app.update))
def uninstall_app(self, app):
self.queue.append(ActionItem(f'Uninstalling application {app.name}', app.uninstall))
def start_app(self, app):
self.queue.append(ActionItem(f'Starting application {app.name}', app.start))
def stop_app(self, app):
self.queue.append(ActionItem(f'Stopping application {app.name}', app.stop))
def process(self):
index = 0
queue_length = len(self.queue)
for item in self.queue:
index += 1
item.text = f'[{index}/{queue_length}] {item.text}'
item.run()
def readable_size(bytes):
i = 0
while bytes > 1024:
i += 1
bytes /= 1024
return f'{bytes:.2f} {SIZE_PREFIXES[i]}B'
def print_lock(pid):
with open(os.path.join('/proc', pid, 'cmdline')) as f:
cmdline = f.read().replace('\0', ' ').strip()
print(f'Waiting for lock currently held by process {pid} - {cmdline}')

View File

@ -1,47 +0,0 @@
# -*- coding: utf-8 -*-
import configparser
import os
import urllib.parse
CONFIG_FILE = '/etc/spoc/spoc.conf'
config = configparser.ConfigParser()
config.read(CONFIG_FILE)
NETWORK_INTERFACE = config.get('general', 'network-interface', fallback='spocbr0')
RESOLV_CONF = config.get('general', 'resolv-conf', fallback='/etc/resolv.conf')
DATA_DIR = config.get('general', 'data-dir', fallback='/var/lib/spoc/')
APPS_DIR = os.path.join(DATA_DIR, 'apps/')
CONTAINERS_DIR = os.path.join(DATA_DIR, 'containers/')
LAYERS_DIR = os.path.join(DATA_DIR, 'layers/')
VOLUMES_DIR = os.path.join(DATA_DIR, 'volumes/')
HOSTS_FILE = os.path.join(DATA_DIR, 'hosts')
REPO_FILE = os.path.join(DATA_DIR, 'repository.json')
LOCK_DIR = '/run/lock'
LOCK_FILE = os.path.join(LOCK_DIR, 'spoc.lock')
HOSTS_LOCK_FILE = os.path.join(LOCK_DIR, 'spoc-hosts.lock')
REPO_LOCK_FILE = os.path.join(LOCK_DIR, 'spoc-local.lock')
TMP_DIR = os.path.join(DATA_DIR, 'tmp/')
TMP_APPS_DIR = os.path.join(TMP_DIR, 'apps/')
TMP_LAYERS_DIR = os.path.join(TMP_DIR, 'layers/')
LOG_DIR = config.get('general', 'log-dir', fallback='/var/log/spoc')
PUB_DIR = config.get('publish', 'publish-dir', fallback=os.path.join(DATA_DIR, 'publish'))
PUB_LAYERS_DIR = os.path.join(PUB_DIR, 'layers/')
PUB_APPS_DIR = os.path.join(PUB_DIR, 'apps/')
PUB_REPO_FILE = os.path.join(PUB_DIR, 'repository.json')
PUB_SIG_FILE = os.path.join(PUB_DIR, 'repository.sig')
PUB_PRIVKEY_FILE = config.get('publish', 'signing-key', fallback='/etc/spoc/publish.key')
PUB_LOCK_FILE = os.path.join(LOCK_DIR, 'spoc-publish.lock')
# URLs which are an actual directories need to end with trailing slash
ONLINE_BASE_URL = '{}/'.format(config.get('repo', 'url', fallback='https://localhost').rstrip('/'))
ONLINE_LAYERS_URL = urllib.parse.urljoin(ONLINE_BASE_URL, 'layers/')
ONLINE_APPS_URL = urllib.parse.urljoin(ONLINE_BASE_URL, 'apps/')
ONLINE_REPO_URL = urllib.parse.urljoin(ONLINE_BASE_URL, 'repository.json')
ONLINE_SIG_URL = urllib.parse.urljoin(ONLINE_BASE_URL, 'repository.sig')
ONLINE_PUBKEY = config.get('repo', 'public-key', fallback='')

View File

@ -1,279 +0,0 @@
# -*- coding: utf-8 -*-
import copy
import enum
import os
import shlex
import shutil
import subprocess
import time
from concurrent.futures import ThreadPoolExecutor
from . import config, net, repo_local, templates
from .depsolver import DepSolver
from .exceptions import InvalidContainerStateError
# States taken from https://github.com/lxc/lxc/blob/master/src/lxc/state.h
class ContainerState(enum.Enum):
STOPPED = 'STOPPED'
STARTING = 'STARTING'
RUNNING = 'RUNNING'
STOPPING = 'STOPPING'
ABORTING = 'ABORTING'
FREEZING = 'FREEZING'
FROZEN = 'FROZEN'
THAWED = 'THAWED'
UNKNOWN = 'UNKNOWN'
DEFINITION_MEMBERS = {'build', 'depends', 'layers', 'mounts', 'env', 'uid', 'gid', 'cmd', 'cwd', 'ready', 'halt'}
class Container:
def __init__(self, name, load_from_repo=True):
self.name = name
self.build = False
self.depends = []
self.layers = []
self.mounts = {}
self.env = {}
self.uid = None
self.gid = None
self.cmd = None
self.cwd = None
self.ready = None
self.halt = None
self.container_path = os.path.join(config.CONTAINERS_DIR, name)
self.config_path = os.path.join(self.container_path, 'config')
self.rootfs_path = os.path.join(self.container_path, 'rootfs')
self.olwork_path = os.path.join(self.container_path, 'olwork')
self.ephemeral_layer_path = os.path.join(self.container_path, 'ephemeral')
self.log_path = os.path.join(config.LOG_DIR, f'{name}.log')
if load_from_repo:
self.set_definition(repo_local.get_container(name))
def set_definition(self, definition):
# Set attributes given by definition
for key in DEFINITION_MEMBERS.intersection(definition):
setattr(self, key, definition[key])
def get_definition(self):
# Return shallow copy of container definition as dictionary
definition = {}
for key in DEFINITION_MEMBERS:
value = getattr(self, key)
if value:
definition[key] = copy.copy(value)
return definition
def get_state(self):
# Get current state of the container, uses LXC monitor socket accessible only in ocntainer's namespace
try:
state = subprocess.run(['lxc-info', '-sH', '-P', config.CONTAINERS_DIR, self.name], capture_output=True, check=True)
return ContainerState[state.stdout.strip().decode()]
except subprocess.CalledProcessError:
return ContainerState.UNKNOWN
def is_running(self):
# Convenience method to determine if the container is running
return self.get_state() == ContainerState.RUNNING
def is_stopped(self):
# Convenience method to determine if the container is stopped
return self.get_state() == ContainerState.STOPPED
def await_state(self, awaited_state):
# Block execution until the container reaches the desired state or until timeout
try:
subprocess.run(['lxc-wait', '-P', config.CONTAINERS_DIR, '-s', awaited_state.value, '-t', '30', self.name], check=True)
except subprocess.CalledProcessError:
# Sometimes LXC decides to return rc 1 even on successful state change
actual_state = self.get_state()
if actual_state != awaited_state:
raise InvalidContainerStateError(self.name, actual_state)
def mount_rootfs(self):
# Prepares container rootfs
# Called in lxc.hook.pre-start as the standard mount options are insufficient for rootless containers (see notes for overlayfs below)
layers = [os.path.join(config.LAYERS_DIR, layer) for layer in self.layers]
if not self.build:
# Add ephemeral layer if the container is not created as part of build process
layers.append(self.ephemeral_layer_path)
if len(layers) > 1:
# Multiple layers require overlayfs, however non-root users don't normally have capability to create overlayfs mounts - https://www.spinics.net/lists/linux-fsdevel/msg105877.html
# Standard linux kernels currently doesn't support overlay mounts in user namespaces (lxc.hook.pre-mount)
# The exception is Ubuntu or custom patches such as https://salsa.debian.org/kernel-team/linux/blob/master/debian/patches/debian/overlayfs-permit-mounts-in-userns.patch
# Possible alternative is fuse-overlayfs, which doesn't work well on Alpine (and it's FUSE anyway, so it needs an extra service and a process for each mount)
# Another alternative would be to mount in the namespace via -N option, but LXC doesn't expose PID or namespaces of the process during container setup
overlay_opts = f'upperdir={layers[-1]},lowerdir={":".join(reversed(layers[:-1]))},workdir={self.olwork_path}'
subprocess.run(['mount', '-t', 'overlay', '-o', overlay_opts, 'none', self.rootfs_path])
else:
# We only have a single layer, no overlay needed
subprocess.run(['mount', '--bind', layers[0], self.rootfs_path])
def unmount_rootfs(self):
# Recursively unmounts container rootfs
# Called in lxc.hook.post-stop
# For unprivileged containers it could theoretically be called already in lxc.hook.start-host, as the user namespace clones the mounts,
# so they are not needed in the parent namespace anymore, but removing rootfs on container stop seems more intuitive
subprocess.run(['umount', '-R', self.rootfs_path], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def clean_ephemeral_layer(self):
# Cleans container ephemeral layer. Called in lxc.hook.post-stop and lxc.hook.pre-start in case of unclean shutdown
# This is done early in the container start process, so the inode of the ephemeral directory must remain unchanged
for item in os.scandir(self.ephemeral_layer_path):
shutil.rmtree(item.path) if item.is_dir() else os.unlink(item.path)
def get_mount_entry(self, volume, mountpoint):
mount_type = 'dir'
if mountpoint.endswith(':file'):
mount_type = 'file'
mountpoint = mountpoint[:-5]
return f'lxc.mount.entry = {os.path.join(config.VOLUMES_DIR, volume)} {mountpoint} none bind,create={mount_type} 0 0'
def create(self):
# Create container directories
os.makedirs(self.rootfs_path, 0o755, True)
os.makedirs(self.olwork_path, 0o755, True)
os.makedirs(self.ephemeral_layer_path, 0o755, True)
os.makedirs(config.LOG_DIR, 0o750, True)
# Change UID/GID of the ephemeral layer directory
# Chown is possible only when the process is running as root, for user namespaces, see https://linuxcontainers.org/lxc/manpages/man1/lxc-usernsexec.1.html
os.chown(self.ephemeral_layer_path, 100000, 100000)
# Create container configuration file based on the container definition
mounts = '\n'.join([self.get_mount_entry(v, m) for v,m in self.mounts.items()])
env = '\n'.join([f'lxc.environment = {k}={v}' for k,v in self.env.items()])
uid = self.uid if self.uid else 0
gid = self.gid if self.gid else 0
cmd = self.cmd if self.cmd else '/sbin/init'
cwd = self.cwd if self.cwd else '/'
halt = self.halt if self.halt else 'SIGINT'
ip_address, ip_netmask, ip_gateway = net.request_ip(self.name)
# Write LXC configuration file
with open(self.config_path, 'w') as f:
f.write(templates.LXC_CONTAINER_TEMPLATE.format(name=self.name,
interface=config.NETWORK_INTERFACE,
resolv_conf=config.RESOLV_CONF,
ip_address=ip_address,
ip_netmask=ip_netmask,
ip_gateway=ip_gateway,
rootfs=self.rootfs_path,
hosts=config.HOSTS_FILE,
mounts=mounts,
env=env,
uid=uid,
gid=gid,
cmd=cmd,
cwd=cwd,
halt=halt,
log=self.log_path))
repo_local.register_container(self.name, self.get_definition())
def destroy(self):
repo_local.unregister_container(self.name)
self.unmount_rootfs()
try:
shutil.rmtree(self.container_path)
except FileNotFoundError:
pass
try:
os.unlink(self.log_path)
except FileNotFoundError:
pass
# Release the IP address from global hosts configuration
net.release_ip(self.name)
def start(self, command=None):
# Start the container including its dependencies
depsolver = DepSolver()
self.get_start_dependencies(depsolver)
for dependency in depsolver.solve():
if not dependency.is_running():
# Pass start command only to the current container
dependency.do_start(command if dependency.name == self.name else None)
def do_start(self, command=None):
cmd = ['--']+command if command else []
# Start the current container, wait until it is reported as started and execute application readiness check
subprocess.Popen(['lxc-start', '-P', config.CONTAINERS_DIR, self.name]+cmd)
self.await_state(ContainerState.RUNNING)
# Launch the readiness check in a separate thread, so it can be reliably cancelled after timeout
with ThreadPoolExecutor(max_workers=1) as pool:
# Create anonymous object to pass the task cancellation information
guard = type('', (object,), {'cancel': False})()
future = pool.submit(self.check_readiness, guard)
future.result(timeout=30)
guard.cancel = True
def check_readiness(self, guard):
# Run spoc.init.ready until it returns return code 0 or the guard cancels the loop
ready_cmd = shlex.split(self.ready) if self.ready else ['/bin/true']
while not guard.cancel:
state = self.get_state()
if state != ContainerState.RUNNING:
raise InvalidContainerStateError(self.name, state)
check = subprocess.run(['lxc-attach', '-P', config.CONTAINERS_DIR, '--clear-env', self.name, '--']+ready_cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, timeout=30)
if check.returncode == 0:
break
time.sleep(0.25)
def stop(self):
# Stop the containers depending on the current cotnainer
depsolver = DepSolver()
self.get_stop_dependencies(depsolver)
for dependency in depsolver.solve():
if not dependency.is_stopped():
dependency.do_stop()
def do_stop(self):
# Stop the current container and wait until it stops completely
lxc_stop = subprocess.Popen(['lxc-stop', '-P', config.CONTAINERS_DIR, self.name])
self.await_state(ContainerState.STOPPED)
# Reap the lxc-stop process
lxc_stop.wait()
def execute(self, cmd, uid=None, gid=None, **kwargs):
# If the container is starting or stopping, wait until the operation is finished
state = self.get_state()
if state == ContainerState.STARTING:
self.await_state(ContainerState.RUNNING)
state = self.get_state()
elif state == ContainerState.STOPPING:
self.await_state(ContainerState.STOPPED)
state = self.get_state()
# Resolve UID/GID, if they have been given
uidgid_param = []
uid,gid = self.get_uidgid(uid, gid)
if uid:
uidgid_param.extend(('-u', uid))
if gid:
uidgid_param.extend(('-g', gid))
# If the container is stopped, use lxc-execute, otherwise use lxc-attach
if state == ContainerState.STOPPED:
return subprocess.run(['lxc-execute', '-P', config.CONTAINERS_DIR]+uidgid_param+[self.name, '--']+cmd, **kwargs)
elif state == ContainerState.RUNNING:
return subprocess.run(['lxc-attach', '-P', config.CONTAINERS_DIR, '--clear-env']+uidgid_param+[self.name, '--']+cmd, **kwargs)
else:
raise InvalidContainerStateError(self.name, state)
def get_uidgid(self, user=None, group=None):
# Helper function to get UID/GID of an user/group from the container
uid,gid = None,None
if user:
uid_entry = self.execute(['/usr/bin/getent', 'passwd', user], capture_output=True, check=True).stdout.decode().split(':')
uid,gid = uid_entry[2],uid_entry[3]
if group:
gid = self.execute(['/usr/bin/getent', 'group', group], capture_output=True, check=True).stdout.decode().split(':')[2]
return (uid,gid)
def get_start_dependencies(self, depsolver):
depsolver.add(self.name, self.depends, self)
for dependency in self.depends:
Container(dependency).get_start_dependencies(depsolver)
def get_stop_dependencies(self, depsolver):
reverse_depends = []
for name, definition in repo_local.get_containers().items():
if 'depends' in definition and self.name in definition['depends']:
reverse_depends.append(name)
depsolver.add(self.name, reverse_depends, self)
for dependency in reverse_depends:
Container(dependency).get_stop_dependencies(depsolver)

View File

@ -1,36 +0,0 @@
# -*- coding: utf-8 -*-
from .exceptions import CircularDependencyError
class Node:
def __init__(self, name, depends, instance):
self.name = name
# Remove the node from its own dependencies
self.depends = set(depends) - {name}
self.instance = instance
class DepSolver:
def __init__(self):
self.nodes = []
def add(self, name, depends, instance):
self.nodes.append(Node(name, depends, instance))
def solve(self):
# Returns a list of instances ordered by dependency
deps = {node.name: node for node in self.nodes}
result = []
while deps:
# Get a batch of nodes not depending on anything (or originally depending on already resolved nodes)
batch = {name for name, node in deps.items() if not node.depends}
if not batch:
# If there are no such nodes, we have found a circular dependency
raise CircularDependencyError(deps)
# Add instances tied to the resolved keys to the result and remove resolved keys from the dependecy map
for name in batch:
result.append(deps[name].instance)
del deps[name]
# Remove resolved keys from the dependencies of yet unresolved nodes
for node in deps.values():
node.depends -= batch
return result

View File

@ -1,41 +0,0 @@
# -*- coding: utf-8 -*-
class AppNotFoundError(Exception):
def __init__(self, name):
self.name = name
def __str__(self):
return f'Application {self.name} not found'
class ContainerNotFoundError(Exception):
def __init__(self, name):
self.name = name
def __str__(self):
return f'Container {self.name} not found'
class ImageNotFoundError(Exception):
def __init__(self, name):
self.name = name
def __str__(self):
return f'Image {self.name} not found'
class InvalidContainerStateError(Exception):
# Container is not in expected state (running, stopped etc.)
def __init__(self, container_name, container_state):
self.container_name = container_name
self.container_state = container_state
def __str__(self):
return f'Container "{self.container_name}" reached unexpected state {self.container_state}'
class CircularDependencyError(Exception):
# Dependecy solver has found a circular dependency between nodes
def __init__(self, deps):
self.deps = deps
def __str__(self):
result = ['Dependency resolution failed due to circular dependency. Dumping unresolved dependencies:']
result.extend(f'{dep} => {node.depends}' for dep, node in self.deps.items())
return '\n'.join(result)

View File

@ -1,48 +0,0 @@
# -*- coding: utf-8 -*-
import errno
import fcntl
import os
import time
from contextlib import contextmanager
@contextmanager
def lock(lock_file, fail_callback=None):
with open(lock_file, 'a'):
# Open the lock file in append mode first to ensure its existence but not modify any data if it already exists
pass
# Open the lock file in read + write mode without truncation
with open(lock_file, 'r+') as f:
while True:
try:
# Try to obtain exclusive lock in non-blocking mode
fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
break
except OSError as e:
# If lock is already locked by another process
if e.errno == errno.EAGAIN:
if fail_callback:
# Call the callback function with contents of the lock file (PID of the process holding the lock)
fail_callback(f.read())
# Remove the callback function so it's not called in every loop
fail_callback = None
# Set the position for future truncation
f.seek(0)
# Wait for the lock to be freed
time.sleep(0.1)
else:
raise
# If the lock was obtained, truncate the file and write PID of the process holding the lock
f.truncate()
f.write(str(os.getpid()))
f.flush()
yield f
# Function decorator
def locked(lock_file, fail_callback=None):
def decorator(target):
def wrapper(*args, **kwargs):
with lock(lock_file, fail_callback):
return target(*args, **kwargs)
return wrapper
return decorator

View File

@ -1,99 +0,0 @@
# -*- coding: utf-8 -*-
import copy
import os
import shutil
import tarfile
import urllib.parse
from . import config, repo_local, repo_online, repo_publish
DEFINITION_MEMBERS = {'layers', 'env', 'uid', 'gid', 'cmd', 'cwd', 'ready', 'halt'}
class Image:
def __init__(self, name, load_from_repo=True):
self.name = name
self.layer_path = os.path.join(config.LAYERS_DIR, name)
self.layers = [name]
self.env = {}
self.uid = None
self.gid = None
self.cmd = None
self.cwd = None
self.ready = None
self.halt = None
if load_from_repo:
self.set_definition(repo_local.get_image(name))
def set_definition(self, definition):
# Set attributes given by definition
for key in DEFINITION_MEMBERS.intersection(definition):
setattr(self, key, definition[key])
def get_definition(self):
# Return shallow copy of image definition as dictionary
definition = {}
for key in DEFINITION_MEMBERS:
value = getattr(self, key)
if value:
definition[key] = copy.copy(value)
return definition
def create(self, imagebuilder, filename):
# Build the container from image file and save to local repository
# Chown is possible only when the process is running as root, for user namespaces, see https://linuxcontainers.org/lxc/manpages/man1/lxc-usernsexec.1.html
os.makedirs(self.layer_path, 0o755, True)
os.chown(self.layer_path, 100000, 100000)
imagebuilder.build(self, filename)
repo_local.register_image(self.name, self.get_definition())
def delete(self, observer=None):
# Remove the layer from local repository and filesystem
repo_local.unregister_image(self.name)
try:
shutil.rmtree(self.layer_path)
except FileNotFoundError:
pass
def download(self, observer=None):
# Download the archive with layer data
os.makedirs(config.TMP_LAYERS_DIR, 0o700, True)
archive_url = urllib.parse.urljoin(config.ONLINE_LAYERS_URL, f'{self.name}.tar.xz')
archive_path = os.path.join(config.TMP_LAYERS_DIR, f'{self.name}.tar.xz')
definition = repo_online.get_image(self.name)
if observer:
observer.units_total = definition['dlsize']
repo_online.download_archive(archive_url, archive_path, definition['hash'], observer)
def unpack_downloaded(self, observer=None):
# Unpack downloaded archive with layer data
archive_path = os.path.join(config.TMP_LAYERS_DIR, f'{self.name}.tar.xz')
definition = repo_online.get_image(self.name)
if observer:
observer.units_total = definition['size']
repo_online.unpack_archive(archive_path, config.LAYERS_DIR, definition['hash'], observer)
self.set_definition(definition)
repo_local.register_image(self.name, definition)
def publish(self):
# Create layer archive and register to publish repository
os.makedirs(config.PUB_LAYERS_DIR, 0o755, True)
files = repo_publish.TarSizeCounter()
archive_path = os.path.join(config.PUB_LAYERS_DIR, f'{self.name}.tar.xz')
with tarfile.open(archive_path, 'w:xz') as tar:
tar.add(self.layer_path, self.name, filter=files.add_file)
definition = self.get_definition()
definition['size'] = files.size
definition['dlsize'] = os.path.getsize(archive_path)
definition['hash'] = repo_publish.sign_file(archive_path).hex()
repo_publish.register_image(self.name, definition)
return (definition['size'], definition['dlsize'])
def unpublish(self):
# Remove the layer from publish repository
repo_publish.unregister_image(self.name)
archive_path = os.path.join(config.PUB_LAYERS_DIR, f'{self.name}.tar.xz')
try:
os.unlink(archive_path)
except FileNotFoundError:
pass

View File

@ -1,177 +0,0 @@
# -*- coding: utf-8 -*-
import os
import requests
import shutil
import stat
import tarfile
import tempfile
import zipfile
from .container import Container
from .image import Image
class ImageBuilder:
def build(self, image, filename):
# Reset internal state, read and process lines from filename
self.image = image
self.builddir = os.path.dirname(filename)
self.script_eof = None
self.script_lines = []
with open(filename, 'r') as f:
for line in f:
self.process_line(line.strip())
def process_line(self, line):
# Parse a line from image file
if self.script_eof:
if line == self.script_eof:
self.script_eof = None
self.run_script(self.script_lines)
else:
self.script_lines.append(line)
elif line:
self.process_directive(*line.split(None, 1))
def process_directive(self, directive, args):
# Process a directive from image file
if 'RUN' == directive:
self.script_lines = []
self.script_eof = args
elif 'FROM' == directive:
# Set the values of image from which this one inherits
self.image.set_definition(Image(args).get_definition())
self.image.layers.append(self.image.name)
elif 'COPY' == directive:
srcdst = args.split()
self.copy_files(srcdst[0], srcdst[1] if len(srcdst) > 1 else '')
elif 'ENV' == directive:
# Sets/unsets environment variable
self.set_env(*args.split(None, 1))
elif 'USER' == directive:
# Sets init UID / GID
self.set_uidgid(*args.split())
elif 'CMD' == directive:
# Sets init command
self.image.cmd = args
elif 'WORKDIR' == directive:
# Sets init working directory
self.image.cwd = args
elif 'HALT' == directive:
# Sets signal to be sent to init when stopping the container
self.image.halt = args
elif 'READY' == directive:
# Sets a command to check readiness of the container after it has been started
self.image.ready = args
def run_script(self, script_lines):
# Creates a temporary container, runs a script in its namespace, and stores the files modified by it as part of the layer
# Note: If USER or WORKDIR directive has already been set, the command is run under that UID/GID or working directory
script_fd, script_path = tempfile.mkstemp(suffix='.sh', dir=self.image.layer_path, text=True)
script_name = os.path.basename(script_path)
script_lines = '\n'.join(script_lines)
with os.fdopen(script_fd, 'w') as script:
script.write(f'#!/bin/sh\nset -ev\n\n{script_lines}\n')
os.chmod(script_path, 0o755)
os.chown(script_path, 100000, 100000)
# Create a temporary container from the current image definition and execute the script within the container
container = Container(self.image.name, False)
container.set_definition(self.image.get_definition())
container.build = True
container.create()
container.execute(['/bin/sh', '-lc', os.path.join('/', script_name)], check=True)
container.destroy()
os.unlink(script_path)
def set_env(self, key, value=None):
# Set or unset environement variable
if value:
self.image.env[key] = value
else:
try:
del self.image.env[key]
except KeyError:
pass
def set_uidgid(self, uid, gid=''):
# Set UID/GID for init
if not uid.isdigit() or not gid.isdigit():
# Resolve the UID/GID from container if either of them is entered as string
container = Container(self.image.name, False)
container.set_definition(self.image.get_definition())
container.create()
uid,gid = container.get_uidgid(uid, gid)
container.destroy()
self.image.uid = uid
self.image.gid = gid
def copy_files(self, src, dst):
# Copy files from the host or download them from a http(s) URL
dst = os.path.join(self.image.layer_path, dst.lstrip('/'))
if src.startswith('http://') or src.startswith('https://'):
unpack_http_archive(src, dst)
else:
src = os.path.join(self.builddir, src)
if os.path.isdir(src):
copy_tree(src, dst)
else:
shutil.copy2(src, dst)
# Shift UID/GID of the files to the unprivileged range
shift_uid(dst, os.stat(dst, follow_symlinks=False))
def unpack_http_archive(src, dst):
# Decompress an archive downloaded via http(s)
with tempfile.TemporaryFile() as tmp_archive:
# Download the file via http(s) and store as temporary file
with requests.Session() as session:
resource = session.get(src, stream=True)
resource.raise_for_status()
for chunk in resource.iter_content(chunk_size=None):
if chunk:
tmp_archive.write(chunk)
# Check if the magic bytes and determine if the file is zip
tmp_archive.seek(0)
is_zip = zipfile.is_zipfile(tmp_archive)
# Extract the file. If it is not zip, assume tar (bzip2, gizp or xz)
tmp_archive.seek(0)
if is_zip:
with zipfile.ZipFile(tmp_archive) as zip:
zip.extractall(dst)
else:
with tarfile.open(fileobj=tmp_archive) as tar:
tar.extractall(dst, numeric_owner=True)
def copy_tree(src, dst):
# Copy directory tree from host to container, leaving the existing modes and attributed unchanged,
# which is crucial e.g. whenever anything is copied into /tmp
# This function is a stripped and customized variant of shutil.copytree()
for srcentry in os.scandir(src):
dstname = os.path.join(dst, srcentry.name)
is_new = not os.path.exists(dstname)
if srcentry.is_dir():
if is_new:
os.mkdir(dstname)
copy_tree(srcentry, dstname)
else:
shutil.copy2(srcentry, dstname)
if is_new:
shutil.copystat(srcentry, dstname, follow_symlinks=False)
def shift_uid(path, path_stat):
# Shifts UID/GID of a file or a directory and its contents to the unprivileged range
# The function parameters could arguably be more friendly, but os.scandir() already calls stat() on the entires,
# so it would be wasteful to not reuse them for considerable performance gain
uid = path_stat.st_uid
gid = path_stat.st_gid
do_chown = False
if uid < 100000:
uid = uid + 100000
do_chown = True
if gid < 100000:
gid = gid + 100000
do_chown = True
if do_chown:
os.chown(path, uid, gid, follow_symlinks=False)
if stat.S_ISDIR(path_stat.st_mode):
for entry in os.scandir(path):
shift_uid(entry.path, entry.stat(follow_symlinks=False))

View File

@ -1,81 +0,0 @@
# -*- coding: utf-8 -*-
import fcntl
import ipaddress
import os
import socket
import struct
from . import config
from .flock import locked
# ioctl magic constants taken from https://git.musl-libc.org/cgit/musl/tree/include/sys/ioctl.h (same as glibc)
IOCTL_SIOCGIFADDR = 0x8915
IOCTL_SIOCGIFNETMASK = 0x891b
leases = {}
mtime = None
@locked(config.HOSTS_LOCK_FILE)
def load_leases():
# Read and parse all IP-hostname pairs from the global hosts file
global leases
global mtime
try:
file_mtime = os.stat(config.HOSTS_FILE).st_mtime
if mtime != file_mtime:
with open(config.HOSTS_FILE, 'r') as f:
leases = [lease.strip().split(None, 1) for lease in f]
leases = {ip: hostname for ip, hostname in leases}
mtime = file_mtime
except FileNotFoundError:
# If the file doesn't exist, create it with localhost and container host as default records
interface = get_bridge_interface()
leases = {'127.0.0.1': 'localhost', str(interface.ip): 'host'}
@locked(config.HOSTS_LOCK_FILE)
def save_leases():
# write all IP-hostname pairs to the global hosts file
global mtime
with open(config.HOSTS_FILE, 'w') as f:
for ip,hostname in sorted(leases.items(), key=lambda lease: socket.inet_aton(lease[0])):
f.write(f'{ip} {hostname}\n')
mtime = os.stat(config.HOSTS_FILE).st_mtime
def get_bridge_interface():
# Returns bridge interface's IP address and netmask
with socket.socket(socket.AF_INET) as sock:
# Get IPv4Interface for given interface name
packed_ifname = struct.pack('256s', config.NETWORK_INTERFACE.encode())
ip = socket.inet_ntoa(fcntl.ioctl(sock.fileno(), IOCTL_SIOCGIFADDR, packed_ifname)[20:24])
netmask = socket.inet_ntoa(fcntl.ioctl(sock.fileno(), IOCTL_SIOCGIFNETMASK, packed_ifname)[20:24])
return ipaddress.IPv4Interface(f'{ip}/{netmask}')
def get_ip(container_name):
load_leases()
for ip,hostname in leases.items():
if hostname == container_name:
return ip
return None
def request_ip(container_name):
# Find if and IP hasn't been leased for the hostname
interface = get_bridge_interface()
load_leases()
for ip in leases:
if leases[ip] == container_name:
return (ip, str(interface.network.prefixlen), str(interface.ip))
# If not, get the first unassigned IP from the interface's network
for ip in interface.network.hosts():
ip = str(ip)
if ip not in leases:
leases[ip] = container_name
save_leases()
return (ip, str(interface.network.prefixlen), str(interface.ip))
def release_ip(container_name):
# Delete the lease from hosts file
global leases
load_leases()
leases = {ip: h for ip, h in leases.items() if h != container_name}
save_leases()

View File

@ -1,96 +0,0 @@
# -*- coding: utf-8 -*-
import fcntl
import json
import os
from . import config
from .exceptions import AppNotFoundError, ContainerNotFoundError, ImageNotFoundError
from .flock import locked
TYPE_APP = 'apps'
TYPE_CONTAINER = 'containers'
TYPE_IMAGE = 'images'
data = {TYPE_IMAGE: {}, TYPE_CONTAINER: {}, TYPE_APP: {}}
mtime = 0
def load():
global data
global mtime
try:
file_mtime = os.stat(config.REPO_FILE).st_mtime
if mtime != file_mtime:
with open(config.REPO_FILE) as f:
data = json.load(f)
mtime = file_mtime
except FileNotFoundError:
pass
def save():
global mtime
with open(config.REPO_FILE, 'w') as f:
json.dump(data, f, sort_keys=True, indent=4)
mtime = os.stat(config.REPO_FILE).st_mtime
@locked(config.REPO_LOCK_FILE)
def get_entries(entry_type):
load()
return data[entry_type]
def get_entry(entry_type, name, exception):
try:
return get_entries(entry_type)[name]
except KeyError as e:
raise exception(name) from e
@locked(config.REPO_LOCK_FILE)
def add_entry(entry_type, name, definition):
load()
data[entry_type][name] = definition
save()
@locked(config.REPO_LOCK_FILE)
def delete_entry(entry_type, name):
load()
try:
del data[entry_type][name]
save()
except KeyError:
pass
def get_images():
return get_entries(TYPE_IMAGE)
def get_image(image_name):
return get_entry(TYPE_IMAGE, image_name, ImageNotFoundError)
def register_image(image_name, definition):
add_entry(TYPE_IMAGE, image_name, definition)
def unregister_image(image_name):
delete_entry(TYPE_IMAGE, image_name)
def get_containers():
return get_entries(TYPE_CONTAINER)
def get_container(container_name):
return get_entry(TYPE_CONTAINER, container_name, ContainerNotFoundError)
def register_container(container_name, definition):
add_entry(TYPE_CONTAINER, container_name, definition)
def unregister_container(container_name):
delete_entry(TYPE_CONTAINER, container_name)
def get_apps():
return get_entries(TYPE_APP)
def get_app(app_name):
return get_entry(TYPE_APP, app_name, AppNotFoundError)
def register_app(app_name, definition):
add_entry(TYPE_APP, app_name, definition)
def unregister_app(app_name):
delete_entry(TYPE_APP, app_name)

View File

@ -1,131 +0,0 @@
# -*- coding: utf-8 -*-
import json
import os
import requests
import shutil
import tarfile
import time
from cryptography.exceptions import InvalidSignature
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import ec, utils
from cryptography.hazmat.primitives.serialization import load_pem_public_key
from . import config
from .exceptions import AppNotFoundError, ImageNotFoundError
TYPE_APP = 'apps'
TYPE_IMAGE = 'images'
public_key = None
def get_public_key():
global public_key
if not public_key:
pem = f'-----BEGIN PUBLIC KEY-----\n{config.ONLINE_PUBKEY}\n-----END PUBLIC KEY-----'
public_key = load_pem_public_key(pem.encode(), default_backend())
return public_key
def verify_fileobj(fileobj, expected_hash):
hasher = hashes.Hash(hashes.SHA512(), default_backend())
while True:
data = fileobj.read(64*1024)
if not data:
break
hasher.update(data)
get_public_key().verify(bytes.fromhex(expected_hash), hasher.finalize(), ec.ECDSA(utils.Prehashed(hashes.SHA512())))
def download_archive(archive_url, archive_path, expected_hash, observer=None):
# Check if an archive needs to be downloaded via http(s)
do_download = True
# If the file already exists in the temporary directory, verify the signature
if os.path.exists(archive_path):
try:
with open(archive_path, 'rb') as f:
verify_fileobj(f, expected_hash)
# If the signature matches, skip download
if observer:
observer.units_done = os.path.getsize(archive_path)
do_download = False
except InvalidSignature:
# If the signature is invalid, redownload the file
pass
if do_download:
do_download_archive(archive_url, archive_path, expected_hash, observer)
def do_download_archive(archive_url, archive_path, expected_hash, observer=None):
# Download archive via http(s) and store in temporary directory
with open(archive_path, 'wb') as f, requests.Session() as session:
resource = session.get(archive_url, stream=True)
resource.raise_for_status()
if observer:
for chunk in resource.iter_content(chunk_size=64*1024):
if chunk:
observer.units_done += f.write(chunk)
else:
for chunk in resource.iter_content(chunk_size=64*1024):
if chunk:
f.write(chunk)
def unpack_archive(archive_path, destination, expected_hash, observer):
with open(archive_path, 'rb') as f:
# Verify file object, then seek back and open it as tar without losing handle, preventing possible malicious race conditions
verify_fileobj(f, expected_hash)
f.seek(0)
# Remove the target directory, if it exists from previous failed installation
dst_dir = os.path.join(destination, os.path.basename(archive_path)[:-7])
try:
shutil.rmtree(dst_dir)
except FileNotFoundError:
pass
# Extract the tar members while counting their size
# If this is done as non-root, extractall() from https://github.com/python/cpython/blob/master/Lib/tarfile.py needs to be reimplemented instead
tar = tarfile.open(fileobj=f)
if observer:
for tarinfo in tar:
tar.extract(tarinfo, destination, numeric_owner=True)
observer.units_done += tarinfo.size
else:
tar.extractall(destination, numeric_owner=True)
# Remove the archive
os.unlink(archive_path)
data = None
def load(force=False):
# Download package manifest and signature and verify
global data
if not data or force:
with requests.Session() as session:
resource = session.get(config.ONLINE_REPO_URL, timeout=5)
resource.raise_for_status()
packages = resource.content
resource = session.get(config.ONLINE_SIG_URL, timeout=5)
resource.raise_for_status()
packages_sig = resource.content
# Raises cryptography.exceptions.InvalidSignature on verification failure
get_public_key().verify(packages_sig, packages, ec.ECDSA(hashes.SHA512()))
data = json.loads(packages.decode())
def get_entries(entry_type):
load()
return data[entry_type]
def get_entry(entry_type, name, exception):
try:
return get_entries(entry_type)[name]
except KeyError as e:
raise exception(name) from e
def get_images():
return get_entries(TYPE_IMAGE)
def get_image(image_name):
return get_entry(TYPE_IMAGE, image_name, ImageNotFoundError)
def get_apps():
return get_entries(TYPE_APP)
def get_app(app_name):
return get_entry(TYPE_APP, app_name, AppNotFoundError)

View File

@ -1,114 +0,0 @@
# -*- coding: utf-8 -*-
import fcntl
import json
import os
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import ec, utils
from cryptography.hazmat.primitives.serialization import load_pem_private_key
from . import config
from .exceptions import AppNotFoundError, ImageNotFoundError
from .flock import locked
TYPE_APP = 'apps'
TYPE_IMAGE = 'images'
class TarSizeCounter:
def __init__(self):
self.size = 0
def add_file(self, tarinfo):
self.size += tarinfo.size
return tarinfo
def sign_file(file_path):
# Generate ECDSA HMAC SHA512 signature of a file using EC private key
sha512 = hashes.SHA512()
hasher = hashes.Hash(sha512, default_backend())
with open(file_path, 'rb') as f:
while True:
data = f.read(64*1024)
if not data:
break
hasher.update(data)
with open(config.PUB_PRIVKEY_FILE, 'rb') as f:
private_key = load_pem_private_key(f.read(), None, default_backend())
return private_key.sign(hasher.finalize(), ec.ECDSA(utils.Prehashed(sha512)))
data = {TYPE_IMAGE: {}, TYPE_APP: {}}
mtime = 0
def load():
global data
global mtime
try:
file_mtime = os.stat(config.PUB_REPO_FILE).st_mtime
if mtime != file_mtime:
with open(config.PUB_REPO_FILE) as f:
data = json.load(f)
mtime = file_mtime
except FileNotFoundError:
pass
def save():
global mtime
# Open the repository file in read + write mode using exclusive lock
with open(config.PUB_REPO_FILE, 'w') as f:
json.dump(data, f, sort_keys=True, indent=4)
mtime = os.stat(config.PUB_REPO_FILE).st_mtime
# Cryptographically sign the repository file
signature = sign_file(config.PUB_REPO_FILE)
with open(config.PUB_SIG_FILE, 'wb') as f:
f.write(signature)
@locked(config.PUB_LOCK_FILE)
def get_entries(entry_type):
load()
return data[entry_type]
def get_entry(entry_type, name, exception):
try:
return get_entries(entry_type)[name]
except KeyError as e:
raise exception(name) from e
@locked(config.PUB_LOCK_FILE)
def add_entry(entry_type, name, definition):
load()
data[entry_type][name] = definition
save()
@locked(config.PUB_LOCK_FILE)
def delete_entry(entry_type, name):
load()
try:
del data[entry_type][name]
save()
except KeyError:
pass
def get_images():
return get_entries(TYPE_IMAGE)
def get_image(image_name):
return get_entry(TYPE_IMAGE, image_name, ImageNotFoundError)
def register_image(image_name, definition):
add_entry(TYPE_IMAGE, image_name, definition)
def unregister_image(image_name):
delete_entry(TYPE_IMAGE, image_name)
def get_apps():
return get_entries(TYPE_APP)
def get_app(app_name):
return get_entry(TYPE_APP, app_name, ImageNotFoundError)
def register_app(app_name, definition):
add_entry(TYPE_APP, app_name, definition)
def unregister_app(app_name):
delete_entry(TYPE_APP, app_name)

View File

@ -1,52 +0,0 @@
# -*- coding: utf-8 -*-
LXC_CONTAINER_TEMPLATE = '''# Container name
lxc.uts.name = {name}
# Network
lxc.net.0.type = veth
lxc.net.0.link = {interface}
lxc.net.0.flags = up
lxc.net.0.ipv4.address = {ip_address}/{ip_netmask}
lxc.net.0.ipv4.gateway = {ip_gateway}
# Root filesystem
lxc.rootfs.path = {rootfs}
# Mounts
lxc.mount.entry = shm dev/shm tmpfs rw,nodev,noexec,nosuid,relatime,mode=1777,create=dir 0 0
lxc.mount.entry = {resolv_conf} etc/resolv.conf none bind,ro,create=file 0 0
lxc.mount.entry = {hosts} etc/hosts none bind,ro,create=file 0 0
{mounts}
# Environment
lxc.environment = PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
{env}
# Init
lxc.init.uid = {uid}
lxc.init.gid = {gid}
lxc.init.cwd = {cwd}
lxc.init.cmd = {cmd}
# Halt
lxc.signal.halt = {halt}
# Log
lxc.console.size = 1MB
lxc.console.logfile = {log}
# ID map
lxc.idmap = u 0 100000 65536
lxc.idmap = g 0 100000 65536
# Hooks
lxc.hook.version = 1
lxc.hook.pre-start = /usr/bin/spoc-hook
lxc.hook.post-stop = /usr/bin/spoc-hook
# Other
lxc.arch = linux64
lxc.include = /usr/share/lxc/config/common.conf
lxc.include = /usr/share/lxc/config/userns.conf
'''