diff --git a/APKBUILD b/APKBUILD index b296817..8d15867 100644 --- a/APKBUILD +++ b/APKBUILD @@ -1,13 +1,13 @@ # Contributor: Disassembler # Maintainer: Disassembler pkgname=spoc -pkgver=0.9.3 +pkgver=2.0.0 pkgrel=0 -pkgdesc="SPOC application, container, and image manager" +pkgdesc="SPOC application and container manager" url="https://spotter.vm/" arch="noarch" license="GPL" -depends="lxc python3 py3-cffi py3-cryptography py3-requests" +depends="podman python3 py3-requests" options="!check !strip" build() { diff --git a/etc/init.d/spoc b/etc/init.d/spoc deleted file mode 100755 index c248117..0000000 --- a/etc/init.d/spoc +++ /dev/null @@ -1,16 +0,0 @@ -#!/sbin/openrc-run - -description="SPOC" - -depend() { - need localmount sysfs cgroups - after firewall net -} - -start() { - /usr/bin/spoc-app start-autostarted -} - -stop() { - /usr/bin/spoc-app stop-all -} diff --git a/etc/spoc/spoc.conf b/etc/spoc/spoc.conf deleted file mode 100644 index 380e35b..0000000 --- a/etc/spoc/spoc.conf +++ /dev/null @@ -1,13 +0,0 @@ -[general] -data-dir = /var/lib/spoc/ -log-dir = /var/log/spoc/ -network-interface = spocbr0 -resolv-conf = /etc/resolv.conf - -[publish] -publish-dir = /srv/build/spoc/ -signing-key = /etc/spoc/publish.key - -[repo] -url = https://repo.spotter.cz/spoc/ -public-key = MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEWJXH4Qm0kt2L86sntQH+C1zOJNQ0qMRt0vx4krTxRs9HQTQYAy//JC92ea2aKleA8OL0JF90b1NYXcQCWdAS+vE/ng9IEAii8C2+5nfuFeZ5YUjbQhfFblwHSM0c7hEG diff --git a/service/spoc.conf b/service/spoc.conf new file mode 100644 index 0000000..415eba2 --- /dev/null +++ b/service/spoc.conf @@ -0,0 +1,3 @@ +[spoc] +data-dir = /var/lib/spoc/ +repo-url = https://repo.spotter.cz/spoc/ diff --git a/service/spoc.openrc b/service/spoc.openrc new file mode 100644 index 0000000..0ed3c1e --- /dev/null +++ b/service/spoc.openrc @@ -0,0 +1,15 @@ +#!/sbin/openrc-run + +description="SPOC" + +depend() { + need podman +} + +start() { + /usr/bin/spoc start-autostarted +} + +stop() { + /usr/bin/spoc stop-all +} diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000..3b8ee3b --- /dev/null +++ b/setup.cfg @@ -0,0 +1,39 @@ +[metadata] +name = spoc +version = 2.0.0 +license = GPLv3+ +author = Disassembler +author_email = disassembler@dasm.cz +description = SPOC application and container manager. A simple orchestrator for podman. +classifiers = + Development Status :: 5 - Production/Stable + Environment :: Console + Intended Audience :: System Administrators + License :: OSI Approved :: GNU General Public License v3 or later + Operating System :: POSIX + Programming Language :: Python :: 3.5 + Programming Language :: Python :: 3.6 + Programming Language :: Python :: 3.7 + Programming Language :: Python :: 3.8 + Programming Language :: Python :: 3.9 + Topic :: System :: Installation/Setup + Topic :: System :: Systems Administration + +[options] +packages = find: +package_dir = = src +python_requires = >= 3.5 +install_requires = requests + +[options.packages.find] +where = src + +[options.entry_points] +console_scripts = + spoc = spoc:main + +[tool:pytest] +testpaths = tests + +[coverage:run] +branch = True diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..a4fad11 --- /dev/null +++ b/setup.py @@ -0,0 +1,6 @@ +# This file is intended to be used only by PEP 517 incompatible build frontends +# Metadata are in setup.cfg + +from setuptools import setup + +setup() diff --git a/src/spoc/__init__.py b/src/spoc/__init__.py new file mode 100644 index 0000000..0c40f9c --- /dev/null +++ b/src/spoc/__init__.py @@ -0,0 +1,152 @@ +import argparse +import os +from pkg_resources import parse_version + +from . import app +from . import autostart +from . import config +from . import podman +from . import repo +from .flock import locked + +def print_lock(pid): + with open(os.path.join('/proc', pid, 'cmdline')) as f: + cmdline = f.read().replace('\0', ' ').strip() + print(f'Waiting for lock currently held by process {pid} - {cmdline}') + +@locked(config.LOCK_FILE, print_lock) +def listing(list_type): + if list_type == 'installed': + apps = podman.get_apps() + elif list_type == 'online': + apps = {app:definition['version'] for app,definition in repo.get_apps().items()} + elif list_type == 'updates': + online_apps = {app:definition['version'] for app,definition in repo.get_apps().items()} + apps = {app:f'{version} -> {online_apps[app]}' for app,version in podman.get_apps().items() + if app in online_apps + and parse_version(online_apps[app]) > parse_version(version)} + else: + apps = {} + for app_name, app_version in sorted(apps.items()): + print(app_name, app_version) + +@locked(config.LOCK_FILE, print_lock) +def install(app_name): + app.install(app_name) + +@locked(config.LOCK_FILE, print_lock) +def update(app_name): + app.update(app_name) + +@locked(config.LOCK_FILE, print_lock) +def uninstall(app_name): + app.uninstall(app_name) + +@locked(config.LOCK_FILE, print_lock) +def start(app_name): + podman.start_pod(app_name) + +@locked(config.LOCK_FILE, print_lock) +def stop(app_name): + podman.stop_pod(app_name) + +@locked(config.LOCK_FILE, print_lock) +def status(app_name): + app_status = podman.get_pod_status(app_name) + print(app_status) + +@locked(config.LOCK_FILE, print_lock) +def set_autostart(app_name, value): + enabled = value.lower() in ('1', 'on', 'enable', 'true') + autostart.set_app(app_name, enabled) + +@locked(config.LOCK_FILE, print_lock) +def start_autostarted(): + for app_name in autostart.get_apps(): + podman.start_pod(app_name) + +@locked(config.LOCK_FILE, print_lock) +def stop_all(): + for app_name in podman.get_apps(): + podman.stop_pod(app_name) + +@locked(config.LOCK_FILE, print_lock) +def prune(): + podman.prune() + +def parse_args(args=None): + parser = argparse.ArgumentParser(description='SPOC application manager') + parser.set_defaults(action=None) + subparsers = parser.add_subparsers() + + parser_list = subparsers.add_parser('list') + parser_list.set_defaults(action=listing) + parser_list.add_argument('type', choices=('installed', 'online', 'updates'), + default='installed', const='installed', nargs='?', + help='Selected repository or application criteria') + + parser_install = subparsers.add_parser('install') + parser_install.set_defaults(action=install) + parser_install.add_argument('app', help='Name of the application to install') + + parser_update = subparsers.add_parser('update') + parser_update.set_defaults(action=update) + parser_update.add_argument('app', help='Name of the application to update') + + parser_uninstall = subparsers.add_parser('uninstall') + parser_uninstall.set_defaults(action=uninstall) + parser_uninstall.add_argument('app', help='Name of the application to uninstall') + + parser_start = subparsers.add_parser('start') + parser_start.set_defaults(action=start) + parser_start.add_argument('app', help='Name of the application to start') + + parser_stop = subparsers.add_parser('stop') + parser_stop.set_defaults(action=stop) + parser_stop.add_argument('app', help='Name of the application to stop') + + parser_status = subparsers.add_parser('status') + parser_status.set_defaults(action=status) + parser_status.add_argument('app', nargs='?', help='Name of the application to check') + + parser_autostart = subparsers.add_parser('autostart') + parser_autostart.set_defaults(action=set_autostart) + parser_autostart.add_argument('app', help='Name of the application to be automatically started') + parser_autostart.add_argument('value', choices=('1', 'on', 'enable', 'true', '0', 'off', 'disable', 'false'), help='Set or unset the applications to be automatically started after the host boots up') + + parser_start_autostarted = subparsers.add_parser('start-autostarted') + parser_start_autostarted.set_defaults(action=start_autostarted) + + parser_stop_all = subparsers.add_parser('stop-all') + parser_stop_all.set_defaults(action=stop_all) + + parser_prune = subparsers.add_parser('prune') + parser_prune.set_defaults(action=prune) + + return parser.parse_args(args) + + +def main(): + args = parse_args() + if args.action is listing: + listing(args.type) + elif args.action is install: + install(args.app) + elif args.action is update: + update(args.app) + elif args.action is uninstall: + uninstall(args.app) + elif args.action is start: + start(args.app) + elif args.action is stop: + stop(args.app) + elif args.action is status: + status(args.app) + elif args.action is set_autostart: + set_autostart(args.app, args.value) + elif args.action is start_autostarted: + start_autostarted() + elif args.action is stop_all: + stop_all() + elif args.action is prune: + prune() diff --git a/src/spoc/app.py b/src/spoc/app.py new file mode 100644 index 0000000..2972010 --- /dev/null +++ b/src/spoc/app.py @@ -0,0 +1,129 @@ +import os + +from . import config +from . import depsolver +from . import podman +from . import repo + +class App: + def __init__(self, app_name): + self.app_name = app_name + self.env_file = os.path.join(config.DATA_DIR, f'{app_name}.env') + + def install(self, update=False): + definition = repo.get_apps()[self.app_name] + version = definition['version'] + containers = definition['containers'] + + # Create volumes + volumes = set() + for container in containers.values(): + volumes |= set(container.get('volumes', {})) + existing_volumes = self.get_existing_volumes() + if update: + # Remove volumes no longer referenced by the containers + volumes_to_remove = existing_volumes - volumes + volumes -= existing_volumes + else: + # If this is a clean install, remove all volumes with the app label + volumes_to_remove = existing_volumes + self.remove_volumes(volumes_to_remove) + self.create_volumes(volumes) + + # Create env file + envs = definition.get('environment', {}) + if update: + # Keep old values on update + for key,value in self.read_env_vars().items(): + if key in envs: + envs[key] = value + self.write_env_vars(envs) + + # Create pod and containers + self.create_pod(version) + self.create_containers(containers) + + def uninstall(self): + self.remove_pod() + self.remove_env_vars() + self.remove_volumes(self.get_existing_volumes()) + + def create_pod(self, version): + podman.remove_pod(self.app_name) + podman.create_pod(self.app_name, version) + + def remove_pod(self): + podman.remove_pod(self.app_name) + + def read_env_vars(self): + vars = {} + try: + with open(self.env_file) as f: + lines = f.read().splitlines() + for line in lines: + key,value = line.split('=', 1) + vars[key] = value + except FileNotFoundError: + pass + return vars + + def write_env_vars(self, vars): + os.makedirs(config.DATA_DIR, exist_ok=True) + with open(self.env_file, 'w') as f: + for key,value in vars.items(): + f.write(f'{key}={value}\n') + + def remove_env_vars(self): + try: + os.unlink(self.env_file) + except FileNotFoundError: + pass + + def get_existing_volumes(self): + existing_volumes = podman.get_volumes_for_app(self.app_name) + strip_len = len(self.app_name)+1 + return set(volume[strip_len:] for volume in existing_volumes) + + def create_volumes(self, volumes): + for volume in volumes: + self.create_volume(volume) + + def remove_volumes(self, volumes): + for volume in volumes: + self.remove_volume(volume) + + def create_volume(self, volume): + volume = f'{self.app_name}-{volume}' + podman.create_volume(self.app_name, volume) + + def remove_volume(self, volume): + volume = f'{self.app_name}-{volume}' + podman.remove_volume(volume) + + def create_containers(self, containers): + deps = depsolver.DepSolver() + for name,definition in containers.items(): + deps.add(name, definition.get('requires', [])) + container_order = deps.solve() + + hosts = set(containers) + for name in container_order: + self.create_container(name, containers[name], hosts) + + def create_container(self, name, definition, hosts): + name = f'{self.app_name}-{name}' + image = definition['image'] + volumes = {f'{self.app_name}-{volume}':mount + for volume,mount in definition.get('volumes', {}).items()} + requires = set(f'{self.app_name}-{require}' for require in definition.get('requires', [])) + podman.create_container(self.app_name, name, image, env_file=self.env_file, + volumes=volumes, requires=requires, hosts=hosts) + +def install(app_name): + App(app_name).install() + +def update(app_name): + App(app_name).install(update=True) + +def uninstall(app_name): + App(app_name).uninstall() diff --git a/src/spoc/autostart.py b/src/spoc/autostart.py new file mode 100644 index 0000000..37a8285 --- /dev/null +++ b/src/spoc/autostart.py @@ -0,0 +1,25 @@ +import os + +from . import config + +def get_apps(): + try: + with open(config.AUTOSTART_FILE) as f: + lines = f.read().splitlines() + return set(lines) + except FileNotFoundError: + return set() + +def set_app(app_name, enabled): + apps = get_apps() + if enabled: + apps.add(app_name) + else: + try: + apps.remove(app_name) + except KeyError: + pass + os.makedirs(config.DATA_DIR, exist_ok=True) + with open(config.AUTOSTART_FILE, 'w') as f: + for app in apps: + f.write(f'{app}\n') diff --git a/src/spoc/config.py b/src/spoc/config.py new file mode 100644 index 0000000..ac549d4 --- /dev/null +++ b/src/spoc/config.py @@ -0,0 +1,23 @@ +import configparser +import os + +CONFIG_FILE = '/etc/spoc/spoc.conf' +LOCK_FILE = '/run/lock/spoc.lock' + +DATA_DIR = None +AUTOSTART_FILE = None +REPO_BASE_URL = None +REPO_FILE_URL = None + +def reload(config_file=CONFIG_FILE): + global DATA_DIR, AUTOSTART_FILE, REPO_BASE_URL, REPO_FILE_URL + + config = configparser.ConfigParser() + config.read(config_file) + + DATA_DIR = config.get('spoc', 'data-dir', fallback='/var/lib/spoc') + AUTOSTART_FILE = os.path.join(DATA_DIR, 'autostart') + REPO_BASE_URL = config.get('spoc', 'repo-url', fallback='https://localhost').rstrip('/') + REPO_FILE_URL = f'{REPO_BASE_URL}/repository.json' + +reload() diff --git a/src/spoc/depsolver.py b/src/spoc/depsolver.py new file mode 100644 index 0000000..bc1357d --- /dev/null +++ b/src/spoc/depsolver.py @@ -0,0 +1,57 @@ +class CircularDependencyError(Exception): + # Dependecy solver has found a circular dependency between items + def __init__(self, deps): + self.deps = deps + + def __str__(self): + result = ['Dependency resolution failed due to circular dependency.', + 'Unresolved dependencies:'] + result.extend(f' {item} => {item_deps}' for item, item_deps in self.deps.items()) + return '\n'.join(result) + +class MissingDependencyError(Exception): + # Dependecy solver has found an items depents on a nonexistent item + def __init__(self, deps, missing): + self.deps = deps + self.missing = missing + + def __str__(self): + result = ['Dependency resolution failed due to missing dependency.', + 'Missing dependencies:'] + result.append(f' {self.missing}') + result.append('Unresolved dependencies:') + result.extend(f' {item} => {item_deps}' for item, item_deps in self.deps.items()) + return '\n'.join(result) + +class DepSolver: + def __init__(self): + self.unresolved = {} + + def add(self, item, dependencies): + self.unresolved[item] = set(dependencies) + +#flat_list = [item for sublist in t for item in sublist] + def solve(self): + # Returns a list of instances ordered by dependency + resolved = [] + while self.unresolved: + # Get a batch of items not depending on anything + # or originally depending on already resolved items + batch = {item for item,deps in self.unresolved.items() if not deps} + if not batch: + # If there are no such items, check if a dependecy is missing + wanted_deps = set(dep for deps in self.unresolved.values() for dep in deps) + missing_deps = wanted_deps - set(self.unresolved) + if missing_deps: + raise MissingDependencyError(self.unresolved, missing_deps) + else: + # If all dependencies exist, we have found a circular dependency + raise CircularDependencyError(self.unresolved) + # Add resolved items to the result and remove from the unresolved ones + for item in batch: + resolved.append(item) + del self.unresolved[item] + # Remove resolved items from the dependencies of yet unresolved items + for item in self.unresolved: + self.unresolved[item] -= batch + return resolved diff --git a/usr/lib/python3.8/spoc/flock.py b/src/spoc/flock.py similarity index 85% rename from usr/lib/python3.8/spoc/flock.py rename to src/spoc/flock.py index 6b8f867..edbb978 100644 --- a/usr/lib/python3.8/spoc/flock.py +++ b/src/spoc/flock.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - import errno import fcntl import os @@ -9,7 +7,8 @@ from contextlib import contextmanager @contextmanager def lock(lock_file, fail_callback=None): with open(lock_file, 'a'): - # Open the lock file in append mode first to ensure its existence but not modify any data if it already exists + # Open the lock file in append mode first to ensure its existence + # but not modify any data if it already exists pass # Open the lock file in read + write mode without truncation with open(lock_file, 'r+') as f: @@ -22,7 +21,8 @@ def lock(lock_file, fail_callback=None): # If lock is already locked by another process if e.errno == errno.EAGAIN: if fail_callback: - # Call the callback function with contents of the lock file (PID of the process holding the lock) + # Call the callback function with contents of the lock file + # (PID of the process holding the lock) fail_callback(f.read()) # Remove the callback function so it's not called in every loop fail_callback = None @@ -32,7 +32,8 @@ def lock(lock_file, fail_callback=None): time.sleep(0.1) else: raise - # If the lock was obtained, truncate the file and write PID of the process holding the lock + # If the lock was obtained, truncate the file + # and write PID of the process holding the lock f.truncate() f.write(str(os.getpid())) f.flush() diff --git a/src/spoc/podman.py b/src/spoc/podman.py new file mode 100644 index 0000000..524858d --- /dev/null +++ b/src/spoc/podman.py @@ -0,0 +1,75 @@ +import json +import subprocess + +def get_apps(): + apps = {} + cmd = ['podman', 'pod', 'ps', '--format', 'json'] + pod_ps = subprocess.run(cmd, check=True, stdout=subprocess.PIPE, text=True) + data = json.loads(pod_ps.stdout) + for pod in data: + app_name = pod['Labels'].get('spoc.app') + app_version = pod['Labels'].get('spoc.version') + if app_name: + apps[app_name] = app_version + return apps + +def get_volumes_for_app(app_name): + cmd = ['podman', 'volume', 'ls', '--filter', f'label=spoc.app={app_name}', '--format', 'json'] + volume_ls = subprocess.run(cmd, check=True, stdout=subprocess.PIPE, text=True) + return set(volume['Name'] for volume in json.loads(volume_ls.stdout)) + +def start_pod(app_name): + cmd = ['podman', 'pod', 'start', app_name] + subprocess.run(cmd, check=True) + +def stop_pod(app_name): + cmd = ['podman', 'pod', 'stop', '--ignore', app_name] + subprocess.run(cmd, check=True) + +def get_pod_status(app_name=None): + cmd = ['podman', 'pod', 'ps'] + if app_name: + cmd.extend(['--filter', f'label=spoc.app={app_name}']) + pod_ps = subprocess.run(cmd, check=True, stdout=subprocess.PIPE, text=True) + return pod_ps.stdout.strip() + +def create_volume(app_name, vol_name): + cmd = ['podman', 'volume', 'create', '--label', f'spoc.app={app_name}', vol_name] + subprocess.run(cmd, check=True) + +def remove_volume(vol_name): + cmd = ['podman', 'volume', 'rm', vol_name] + subprocess.run(cmd, check=True) + +def create_pod(app_name, app_version): + cmd = ['podman', 'pod', 'create', '--name', app_name, + '--label', f'spoc.app={app_name}', '--label', f'spoc.version={app_version}'] + subprocess.run(cmd, check=True) + +def remove_pod(app_name): + stop_pod(app_name) + cmd = ['podman', 'pod', 'rm', '--ignore', app_name] + subprocess.run(cmd, check=True) + +def create_container(app_name, cnt_name, image, env_file=None, volumes=None, + requires=None, hosts=None): + cmd = ['podman', 'container', 'create', '--name', cnt_name, '--pod', app_name, + '--restart', 'unless-stopped'] + if env_file: + cmd.extend(['--env-file', env_file]) + if requires: + cmd.extend(['--requires', ','.join(sorted(requires))]) + if volumes: + for volume,mount in sorted(volumes.items(), key=lambda x: x[1]): + cmd.extend(['--volume', f'{volume}:{mount}']) + if hosts: + for host in sorted(hosts): + cmd.extend(['--add-host', f'{host}:127.0.0.1']) + cmd.append(image) + subprocess.run(cmd, check=True) + +def prune(): + cmd = ['podman', 'image', 'prune', '--all', '--force'] + subprocess.run(cmd, check=True) + cmd = ['podman', 'volume', 'prune', '--force'] + subprocess.run(cmd, check=True) diff --git a/src/spoc/repo.py b/src/spoc/repo.py new file mode 100644 index 0000000..8f6e9a4 --- /dev/null +++ b/src/spoc/repo.py @@ -0,0 +1,16 @@ +import requests + +from . import config + +_data = {} + +def load(force=False): + global _data + if not _data or force: + response = requests.get(config.REPO_FILE_URL, timeout=5) + response.raise_for_status() + _data = response.json() + +def get_apps(): + load() + return _data diff --git a/tests/test_app.py b/tests/test_app.py new file mode 100644 index 0000000..d5db0c5 --- /dev/null +++ b/tests/test_app.py @@ -0,0 +1,238 @@ +import json +import os +from unittest.mock import patch, call, mock_open + +from spoc import app +from spoc import config + + +TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'test_data') +with open(os.path.join(TEST_DATA_DIR, 'repository.json')) as f: + MOCK_REPODATA = json.load(f) + +with open(os.path.join(TEST_DATA_DIR, 'test.env')) as f: + MOCK_ENV = f.read() + +with open(os.path.join(TEST_DATA_DIR, 'test.env.json')) as f: + MOCK_ENV_JSON = json.load(f) + + +def test_init(): + instance = app.App('someapp') + + assert instance.app_name == 'someapp' + assert instance.env_file == os.path.join(config.DATA_DIR, 'someapp.env') + +@patch('spoc.repo.get_apps', return_value=MOCK_REPODATA) +@patch('spoc.app.App.get_existing_volumes', return_value=set('somevol')) +@patch('spoc.app.App.remove_volumes') +@patch('spoc.app.App.create_volumes') +@patch('spoc.app.App.read_env_vars') +@patch('spoc.app.App.write_env_vars') +@patch('spoc.app.App.create_pod') +@patch('spoc.app.App.create_containers') +def test_install(create_containers, create_pod, write_env_vars, read_env_vars, create_volumes, + remove_volumes, get_existing_volumes, repo_get_apps): + instance = app.App('someapp') + instance.install() + + repo_get_apps.assert_called_once() + get_existing_volumes.assert_called_once() + remove_volumes.assert_called_once_with(set('somevol')) + create_volumes.assert_called_once_with(set(('migrate', 'storage', 'uploads', 'postgres-data'))) + read_env_vars.assert_not_called() + write_env_vars.assert_called_once_with(MOCK_REPODATA['someapp']['environment']) + create_pod.assert_called_once_with('0.23.5-210416') + create_containers.assert_called_once_with(MOCK_REPODATA['someapp']['containers']) + +@patch('spoc.repo.get_apps', return_value=MOCK_REPODATA) +@patch('spoc.app.App.get_existing_volumes', return_value=set(('somevol', 'migrate', 'storage'))) +@patch('spoc.app.App.remove_volumes') +@patch('spoc.app.App.create_volumes') +@patch('spoc.app.App.read_env_vars', return_value=MOCK_ENV_JSON) +@patch('spoc.app.App.write_env_vars') +@patch('spoc.app.App.create_pod') +@patch('spoc.app.App.create_containers') +def test_update(create_containers, create_pod, write_env_vars, read_env_vars, create_volumes, + remove_volumes, get_existing_volumes, repo_get_apps): + instance = app.App('someapp') + instance.install(update=True) + + repo_get_apps.assert_called_once() + get_existing_volumes.assert_called_once() + remove_volumes.assert_called_once_with(set(('somevol',))) + create_volumes.assert_called_once_with(set(('uploads', 'postgres-data'))) + read_env_vars.assert_called_once() + expected_env_data = MOCK_REPODATA['someapp']['environment'].copy() + expected_env_data.update(MOCK_ENV_JSON) + del expected_env_data['SOMEKEY'] + write_env_vars.assert_called_once_with(expected_env_data) + create_pod.assert_called_once_with('0.23.5-210416') + create_containers.assert_called_once_with(MOCK_REPODATA['someapp']['containers']) + +@patch('spoc.app.App.remove_pod') +@patch('spoc.app.App.remove_env_vars') +@patch('spoc.app.App.get_existing_volumes', return_value=set(('somevol', 'anothervol'))) +@patch('spoc.app.App.remove_volumes') +def test_uninstall(remove_volumes, get_existing_volumes, remove_env_vars, remove_pod): + instance = app.App('someapp') + instance.uninstall() + + remove_pod.assert_called_once() + remove_env_vars.assert_called_once() + get_existing_volumes.assert_called_once() + remove_volumes.assert_called_once_with(set(('somevol', 'anothervol'))) + +@patch('spoc.podman.remove_pod') +@patch('spoc.podman.create_pod') +def test_create_pod(create_pod, remove_pod): + instance = app.App('someapp') + instance.create_pod('0.1') + + remove_pod.assert_called_once_with('someapp') + create_pod.assert_called_once_with('someapp', '0.1') + +@patch('spoc.podman.remove_pod') +def test_remove_pod(remove_pod): + instance = app.App('someapp') + instance.remove_pod() + + remove_pod.assert_called_once_with('someapp') + +@patch('builtins.open', new_callable=mock_open, read_data=MOCK_ENV) +def test_read_env_vars(env_open): + instance = app.App('someapp') + env_vars = instance.read_env_vars() + + env_file = os.path.join(config.DATA_DIR, 'someapp.env') + env_open.assert_called_once_with(env_file) + assert env_vars == MOCK_ENV_JSON + +@patch('builtins.open', side_effect=FileNotFoundError('someapp.env')) +def test_read_env_vars_filenotfound(env_open): + instance = app.App('someapp') + env_vars = instance.read_env_vars() + + env_file = os.path.join(config.DATA_DIR, 'someapp.env') + env_open.assert_called_once_with(env_file) + assert env_vars == {} + +@patch('os.makedirs') +@patch('builtins.open', new_callable=mock_open) +def test_write_env_vars(env_open, makedirs): + instance = app.App('someapp') + instance.write_env_vars(MOCK_ENV_JSON) + + makedirs.assert_called_once_with(config.DATA_DIR, exist_ok=True) + env_file = os.path.join(config.DATA_DIR, 'someapp.env') + env_open.assert_called_once_with(env_file, 'w') + expected_writes = [call(line) for line in MOCK_ENV.splitlines(True)] + env_open().write.assert_has_calls(expected_writes, any_order=True) + +@patch('os.unlink') +def test_remove_env_vars(unlink): + instance = app.App('someapp') + instance.remove_env_vars() + + env_file = os.path.join(config.DATA_DIR, 'someapp.env') + unlink.assert_called_once_with(env_file) + +@patch('os.unlink', side_effect=FileNotFoundError('someapp.env')) +def test_remove_env_vars_filenotfound(unlink): + instance = app.App('someapp') + instance.remove_env_vars() + + env_file = os.path.join(config.DATA_DIR, 'someapp.env') + unlink.assert_called_once_with(env_file) + +@patch('spoc.podman.get_volumes_for_app', return_value={'someapp-vol1', 'someapp-vol2'}) +def test_get_existing_volumes(get_volume_names): + instance = app.App('someapp') + volumes = instance.get_existing_volumes() + + get_volume_names.assert_called_once_with('someapp') + assert volumes == {'vol1', 'vol2'} + +@patch('spoc.app.App.create_volume') +def test_create_volumes(create_volume): + instance = app.App('someapp') + instance.create_volumes({'vol1', 'vol2'}) + + create_volume.assert_has_calls([ + call('vol1'), + call('vol2'), + ], any_order=True) + +@patch('spoc.app.App.remove_volume') +def test_remove_volumes(remove_volume): + instance = app.App('someapp') + instance.remove_volumes({'vol1', 'vol2'}) + + remove_volume.assert_has_calls([ + call('vol1'), + call('vol2'), + ], any_order=True) + +@patch('spoc.podman.create_volume') +def test_create_volume(create_volume): + instance = app.App('someapp') + instance.create_volume('vol1') + + create_volume.assert_called_once_with('someapp', 'someapp-vol1') + +@patch('spoc.podman.remove_volume') +def test_remove_volume(remove_volume): + instance = app.App('someapp') + instance.remove_volume('vol1') + + remove_volume.assert_called_once_with('someapp-vol1') + +@patch('spoc.app.App.create_container') +def test_create_containers(create_container): + instance = app.App('someapp') + definitions = MOCK_REPODATA['someapp']['containers'] + instance.create_containers(definitions) + + # Ordered by dependency + create_container.assert_has_calls([ + call('postgres', definitions['postgres'], {'someapp', 'postgres'}), + call('someapp', definitions['someapp'], {'someapp', 'postgres'}), + ]) + +@patch('spoc.podman.create_container') +def test_create_container(create_container): + instance = app.App('someapp') + definition = MOCK_REPODATA['someapp']['containers']['someapp'] + instance.create_container('someapp', definition, {'someapp', 'postgres'}) + + env_file = os.path.join(config.DATA_DIR, 'someapp.env') + volumes = {'someapp-migrate': '/srv/app/db/migrate', + 'someapp-storage': '/srv/app/storage', + 'someapp-uploads': '/srv/app/public/uploads'} + create_container.assert_called_once_with('someapp', 'someapp-someapp', + 'example.com/someapp:0.23.6-210515', + env_file=env_file, + volumes=volumes, + requires={'someapp-postgres'}, + hosts={'someapp', 'postgres'}) + +@patch('spoc.app.App') +def test_module_install(instance): + app.install('someapp') + + instance.assert_called_once_with('someapp') + instance.return_value.install.assert_called_once() + +@patch('spoc.app.App') +def test_module_update(instance): + app.update('someapp') + + instance.assert_called_once_with('someapp') + instance.return_value.install.assert_called_once_with(update=True) + +@patch('spoc.app.App') +def test_module_uninstall(instance): + app.uninstall('someapp') + + instance.assert_called_once_with('someapp') + instance.return_value.uninstall.assert_called_once() diff --git a/tests/test_autostart.py b/tests/test_autostart.py new file mode 100644 index 0000000..b6fae03 --- /dev/null +++ b/tests/test_autostart.py @@ -0,0 +1,58 @@ +from unittest.mock import patch, call, mock_open + +from spoc import autostart +from spoc import config + +@patch('builtins.open', new_callable=mock_open, read_data='someapp\nanotherapp\n') +def test_get_apps(file_open): + apps = autostart.get_apps() + + file_open.assert_called_once_with(config.AUTOSTART_FILE) + assert apps == {'someapp', 'anotherapp'} + +@patch('builtins.open', side_effect=FileNotFoundError('someapp.env')) +def test_get_apps_filenotfounderror(file_open): + apps = autostart.get_apps() + + file_open.assert_called_once_with(config.AUTOSTART_FILE) + assert apps == set() + +@patch('os.makedirs') +@patch('spoc.autostart.get_apps', return_value={'someapp'}) +@patch('builtins.open', new_callable=mock_open) +def test_set_app_enable(file_open, get_apps, makedirs): + autostart.set_app('anotherapp', True) + + get_apps.assert_called_once() + makedirs.assert_called_once_with(config.DATA_DIR, exist_ok=True) + file_open.assert_called_once_with(config.AUTOSTART_FILE, 'w') + file_open().write.assert_has_calls([ + call('someapp\n'), + call('anotherapp\n'), + ], any_order=True) + +@patch('os.makedirs') +@patch('spoc.autostart.get_apps', return_value={'someapp', 'anotherapp'}) +@patch('builtins.open', new_callable=mock_open) +def test_set_app_disable(file_open, get_apps, makedirs): + autostart.set_app('anotherapp', False) + + get_apps.assert_called_once() + makedirs.assert_called_once_with(config.DATA_DIR, exist_ok=True) + file_open.assert_called_once_with(config.AUTOSTART_FILE, 'w') + file_open().write.assert_has_calls([ + call('someapp\n'), + ]) + +@patch('os.makedirs') +@patch('spoc.autostart.get_apps', return_value={'someapp'}) +@patch('builtins.open', new_callable=mock_open) +def test_set_app_nonexistent(file_open, get_apps, makedirs): + autostart.set_app('anotherapp', False) + + get_apps.assert_called_once() + makedirs.assert_called_once_with(config.DATA_DIR, exist_ok=True) + file_open.assert_called_once_with(config.AUTOSTART_FILE, 'w') + file_open().write.assert_has_calls([ + call('someapp\n'), + ]) diff --git a/tests/test_config.py b/tests/test_config.py new file mode 100644 index 0000000..56bd74f --- /dev/null +++ b/tests/test_config.py @@ -0,0 +1,25 @@ +import os + +from spoc import config + +TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'test_data') +with open(os.path.join(TEST_DATA_DIR, 'spoc.conf')) as f: + MOCK_CONFIG = f.read() + +def test_config(): + config_file = os.path.join(os.path.dirname(__file__), 'test_data/spoc.conf') + config.reload(config_file) + + assert config.DATA_DIR == '/some/data/dir' + assert config.AUTOSTART_FILE == '/some/data/dir/autostart' + assert config.REPO_BASE_URL == 'https://user:pass@example.com/spoc' + assert config.REPO_FILE_URL == 'https://user:pass@example.com/spoc/repository.json' + +def test_default_config(): + config_file = os.path.join(os.path.dirname(__file__), 'test_data/nonexistent') + config.reload(config_file) + + assert config.DATA_DIR == '/var/lib/spoc' + assert config.AUTOSTART_FILE == '/var/lib/spoc/autostart' + assert config.REPO_BASE_URL == 'https://localhost' + assert config.REPO_FILE_URL == 'https://localhost/repository.json' diff --git a/tests/test_data/podman_pod_ps.json b/tests/test_data/podman_pod_ps.json new file mode 100644 index 0000000..c684f68 --- /dev/null +++ b/tests/test_data/podman_pod_ps.json @@ -0,0 +1,113 @@ +[ + { + "Cgroup": "/libpod_parent", + "Containers": [ + { + "Id": "59cacababc9ea7f0a7f4ad28c67227fdd6acc57a06b0b289390647e45152857b", + "Names": "yetanotherapp-cnt1", + "Status": "running" + }, + { + "Id": "720dabf6edc271c52ea22535398966db094ab5eff1de894e6beb7c68e4657847", + "Names": "4faa6b9ad5aa-infra", + "Status": "running" + }, + { + "Id": "7af90eef4b48f20dabdaaec90c6c7583fea6800d2433ef7879b805d51b81bfc4", + "Names": "yetanotherapp-cnt2", + "Status": "running" + } + ], + "Created": "2021-07-06T09:19:24.609538926+02:00", + "Id": "4faa6b9ad5aa28b915a8ac967a01d9c3317be3a3bfc198b0681636399c19372e", + "InfraId": "720dabf6edc271c52ea22535398966db094ab5eff1de894e6beb7c68e4657847", + "Name": "yetanotherapp", + "Namespace": "", + "Networks": [ + "podman" + ], + "Status": "Running", + "Labels": { + "spoc.app": "yetanotherapp", + "spoc.version": "0.3" + } + }, + { + "Cgroup": "/libpod_parent", + "Containers": [ + { + "Id": "798cae491ef9025db809c261fb1169f5cc09526119d252340b9d64f0fce37be1", + "Names": "97f0c135887c-infra", + "Status": "running" + }, + { + "Id": "9d02724a74d929818d08395b376d960b3dd30556738bc43e96f50a27f355b9a5", + "Names": "anotherapp-cnt2", + "Status": "configured" + }, + { + "Id": "b5833a8da89d40824fdb4f2b779d24135d07452f5bfa583f96e369c5953ee286", + "Names": "anotherapp-cnt1", + "Status": "stopped" + } + ], + "Created": "2021-07-06T08:47:06.389299933+02:00", + "Id": "97f0c135887c8ef6eccf4a37fbcc1e26a0f3c02e73de8edaa959bfba9592b1dd", + "InfraId": "798cae491ef9025db809c261fb1169f5cc09526119d252340b9d64f0fce37be1", + "Name": "anotherapp", + "Namespace": "", + "Networks": [ + "podman" + ], + "Status": "Degraded", + "Labels": { + "spoc.app": "anotherapp", + "spoc.version": "0.2" + } + }, + { + "Cgroup": "/libpod_parent", + "Containers": [ + { + "Id": "151e1e35083391eea41605db364b7e15fde7047a6119feffcd06984671a5c991", + "Names": "be0a8d0ab749-infra", + "Status": "running" + } + ], + "Created": "2021-07-03T20:01:37.63866841+02:00", + "Id": "be0a8d0ab749b3c089f72a844700b76aafa541fffca5186865bef185fc1914a0", + "InfraId": "151e1e35083391eea41605db364b7e15fde7047a6119feffcd06984671a5c991", + "Name": "notmyapp", + "Namespace": "", + "Networks": [ + "podman" + ], + "Status": "Running", + "Labels": { + + } + }, + { + "Cgroup": "/libpod_parent", + "Containers": [ + { + "Id": "0897891f6e7308903c4316ce80f569320176a38d5bc4de1fbf4b2323c1a51fcb", + "Names": "18c00febc93c-infra", + "Status": "configured" + } + ], + "Created": "2021-07-03T13:29:36.975071665+02:00", + "Id": "18c00febc93ca105b5d83247e7b4a0b2184c82262d421f2c857dbf155dbe97e8", + "InfraId": "0897891f6e7308903c4316ce80f569320176a38d5bc4de1fbf4b2323c1a51fcb", + "Name": "someapp", + "Namespace": "", + "Networks": [ + "podman" + ], + "Status": "Created", + "Labels": { + "spoc.app": "someapp", + "spoc.version": "0.1" + } + } +] diff --git a/tests/test_data/podman_volume_ls.json b/tests/test_data/podman_volume_ls.json new file mode 100644 index 0000000..9dcd69b --- /dev/null +++ b/tests/test_data/podman_volume_ls.json @@ -0,0 +1,28 @@ +[ + { + "Name": "someapp-conf", + "Driver": "local", + "Mountpoint": "/var/lib/containers/storage/volumes/someapp-conf/_data", + "CreatedAt": "2021-07-04T18:22:44.758466689+02:00", + "Labels": { + "spoc.app": "someapp" + }, + "Scope": "local", + "Options": { + + } + }, + { + "Name": "someapp-data", + "Driver": "local", + "Mountpoint": "/var/lib/containers/storage/volumes/someapp-data/_data", + "CreatedAt": "2021-07-03T13:22:11.455581712+02:00", + "Labels": { + "spoc.app": "someapp" + }, + "Scope": "local", + "Options": { + + } + } +] diff --git a/tests/test_data/repository.json b/tests/test_data/repository.json new file mode 100644 index 0000000..1c572a6 --- /dev/null +++ b/tests/test_data/repository.json @@ -0,0 +1,81 @@ +{ + "someapp": { + "version": "0.23.5-210416", + "meta": { + "title": "Some Application", + "desc-cs": "Platforma pro účast občanů", + "desc-en": "Platform for citizen participation", + "license": "GPL" + }, + "environment": { + "RAILS_ENV": "production", + "RAILS_LOG_TO_STDOUT": "1", + "POSTGRES_USER": "someapp", + "POSTGRES_PASSWORD": "someapp", + "POSTGRES_DB": "someapp", + "POSTGRES_HOST": "someapp-postgres", + "DATABASE_URL": "postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}/${POSTGRES_DB}", + "APP_ADMIN_USER": "admin@example.com", + "APP_ADMIN_PASSWORD": "someapp123456", + "SECRET_KEY_BASE": "some_key", + "SMTP_USERNAME": "admin@example.com", + "SMTP_PASSWORD": "", + "SMTP_ADDRESS": "someapp-smtp", + "SMTP_DOMAIN": "example.com", + "MAPS_API_KEY": "", + "TWILIO_ACCOUNT_SID": "", + "TWILIO_AUTH_TOKEN": "", + "TWILIO_SENDER_NUMBER": "" + }, + "containers": { + "someapp": { + "image": "example.com/someapp:0.23.6-210515", + "requires": [ + "postgres" + ], + "volumes": { + "migrate": "/srv/app/db/migrate", + "storage": "/srv/app/storage", + "uploads": "/srv/app/public/uploads" + } + }, + "postgres": { + "image": "docker.io/postgres:12-alpine", + "volumes": { + "postgres-data": "/var/lib/postgresql/data" + } + } + } + }, + "anotherapp": { + "version": "1.0.3-210106", + "meta": { + "title": "Another Application", + "desc-cs": "Řízení humanítární činnosti", + "desc-en": "Management of humanitarian activities", + "license": "GPL" + }, + "containers": { + "anotherapp": { + "image": "example.com/anotherapp:1.0.3-210106", + "requires": [ + "postgres" + ], + "volumes": { + "conf": "/srv/web2py/applications/app/models", + "data-Spotter": "/srv/web2py/applications/app/modules/templates/Spotter", + "data-databases": "/srv/web2py/applications/app/databases", + "data-errors": "/srv/web2py/applications/app/errors", + "data-sessions": "/srv/web2py/applications/app/sessions", + "data-uploads": "/srv/web2py/applications/app/uploads" + } + }, + "postgres": { + "image": "docker.io/postgres:12-alpine", + "volumes": { + "postgres-data": "/var/lib/postgresql/data" + } + } + } + } +} diff --git a/tests/test_data/spoc.conf b/tests/test_data/spoc.conf new file mode 100644 index 0000000..96bd063 --- /dev/null +++ b/tests/test_data/spoc.conf @@ -0,0 +1,3 @@ +[spoc] +data-dir = /some/data/dir +repo-url = https://user:pass@example.com/spoc/ diff --git a/tests/test_data/test.env b/tests/test_data/test.env new file mode 100644 index 0000000..be1eb9c --- /dev/null +++ b/tests/test_data/test.env @@ -0,0 +1,3 @@ +RAILS_ENV=test +POSTGRES_PASSWORD=asdf=1234 +SOMEKEY=someval diff --git a/tests/test_data/test.env.json b/tests/test_data/test.env.json new file mode 100644 index 0000000..0bac85e --- /dev/null +++ b/tests/test_data/test.env.json @@ -0,0 +1,5 @@ +{ + "RAILS_ENV": "test", + "POSTGRES_PASSWORD": "asdf=1234", + "SOMEKEY": "someval" +} diff --git a/tests/test_depsolver.py b/tests/test_depsolver.py new file mode 100644 index 0000000..0b33b6d --- /dev/null +++ b/tests/test_depsolver.py @@ -0,0 +1,91 @@ +import pytest + +from spoc import depsolver + +def test_circulardependencyerror(): + ex = depsolver.CircularDependencyError({'dep1': {'dep2'}, 'dep2': {'dep1'}}) + ex_str = str(ex) + + assert ex.deps == {'dep1': {'dep2'}, 'dep2': {'dep1'}} + assert ex_str == 'Dependency resolution failed due to circular dependency.\n' \ + 'Unresolved dependencies:\n' \ + ' dep1 => {\'dep2\'}\n' \ + ' dep2 => {\'dep1\'}' + +def test_missingdependencyerror(): + ex = depsolver.MissingDependencyError({'dep1': {'dep2'}}, {'dep2'}) + ex_str = str(ex) + + assert ex.deps == {'dep1': {'dep2'}} + assert ex.missing == {'dep2'} + assert ex_str == 'Dependency resolution failed due to missing dependency.\n' \ + 'Missing dependencies:\n' \ + ' {\'dep2\'}\n' \ + 'Unresolved dependencies:\n' \ + ' dep1 => {\'dep2\'}' + +def test_depsolver(): + solver = depsolver.DepSolver() + + assert solver.unresolved == {} + + solver.add('dep1', ['dep2', 'dep3']) + solver.add('dep2', ['dep3', 'dep3']) + solver.add('dep3', []) + + assert solver.unresolved == { + 'dep1': {'dep2', 'dep3'}, + 'dep2': {'dep3'}, + 'dep3': set(), + } + + resolved = solver.solve() + + assert resolved == ['dep3', 'dep2', 'dep1'] + +def test_depsolver_complex(): + solver = depsolver.DepSolver() + + solver.add('dep1', ['dep8', 'dep12']) + solver.add('dep2', ['dep10']) + solver.add('dep3', []) + solver.add('dep4', ['dep9']) + solver.add('dep5', ['dep1', 'dep6', 'dep8']) + solver.add('dep6', ['dep2','dep10', 'dep13', 'dep14']) + solver.add('dep7', ['dep9']) + solver.add('dep8', ['dep2', 'dep12']) + solver.add('dep9', []) + solver.add('dep10', ['dep9']) + solver.add('dep11', ['dep2', 'dep14']) + solver.add('dep12', ['dep7']) + solver.add('dep13', ['dep9']) + solver.add('dep14', ['dep4']) + + resolved = solver.solve() + + # Order within the same batch (i.e. items not depending on each other) can be random + assert list(sorted(resolved[:2])) == ['dep3', 'dep9'] + assert list(sorted(resolved[2:9])) == ['dep10', 'dep12', 'dep13', 'dep14', 'dep2', 'dep4', 'dep7'] + assert list(sorted(resolved[9:12])) == ['dep11', 'dep6', 'dep8'] + assert list(sorted(resolved[12:])) == ['dep1', 'dep5'] + +def test_depsolver_circular(): + solver = depsolver.DepSolver() + + solver.add('dep1', ['dep2', 'dep3']) + solver.add('dep2', ['dep3']) + solver.add('dep3', ['dep4']) + solver.add('dep4', ['dep1']) + + with pytest.raises(depsolver.CircularDependencyError): + solver.solve() + +def test_depsolver_missing(): + solver = depsolver.DepSolver() + + solver.add('dep1', ['dep2', 'dep3']) + solver.add('dep2', ['dep3']) + solver.add('dep4', ['dep1']) + + with pytest.raises(depsolver.MissingDependencyError): + solver.solve() diff --git a/tests/test_flock.py b/tests/test_flock.py new file mode 100644 index 0000000..2bf6819 --- /dev/null +++ b/tests/test_flock.py @@ -0,0 +1,100 @@ +import errno +import fcntl +import pytest +from unittest.mock import patch, call, mock_open + +from spoc import flock + +def fail_callback(pid): + print(f'Lock held by {pid}') + +@flock.locked('test.lock', fail_callback=fail_callback) +def mock_func(): + pass + +@patch('fcntl.flock') +@patch('time.sleep') +@patch('os.getpid', return_value=1234) +@patch('builtins.open', new_callable=mock_open) +def test_lock_success(lock_open, getpid, sleep, fcntl_flock): + mock_func() + + lock_open.assert_has_calls([ + call('test.lock', 'a'), + call().__enter__(), + call().__exit__(None, None, None), + call('test.lock', 'r+'), + call().__enter__(), + call().truncate(), + call().write('1234'), + call().flush(), + call().__exit__(None, None, None), + ]) + + fcntl_flock.assert_called_once_with(lock_open(), fcntl.LOCK_EX | fcntl.LOCK_NB) + sleep.assert_not_called() + getpid.assert_called_once() + +@patch('fcntl.flock') +@patch('time.sleep') +@patch('os.getpid', return_value=5678) +@patch('builtins.open', new_callable=mock_open, read_data='1234') +def test_lock_fail(lock_open, getpid, sleep, fcntl_flock, capsys): + fcntl_flock.side_effect = [ + OSError(errno.EAGAIN, 'in use'), + OSError(errno.EAGAIN, 'in use'), + None, + ] + + mock_func() + + lock_open.assert_has_calls([ + call('test.lock', 'a'), + call().__enter__(), + call().__exit__(None, None, None), + call('test.lock', 'r+'), + call().__enter__(), + call().read(), + call().seek(0), + call().truncate(), + call().write('5678'), + call().flush(), + call().__exit__(None, None, None), + ]) + + expected_fcntl_flock_call = call(lock_open(), fcntl.LOCK_EX | fcntl.LOCK_NB) + assert fcntl_flock.call_args_list.count(expected_fcntl_flock_call) == 3 + expected_sleep_call = call(0.1) + assert sleep.call_args_list.count(expected_sleep_call) == 2 + getpid.assert_called_once() + + captured = capsys.readouterr() + assert captured.out == 'Lock held by 1234\n' + +@patch('fcntl.flock', side_effect=OSError(errno.EBADF, 'nope')) +@patch('time.sleep') +@patch('os.getpid', return_value=5678) +@patch('builtins.open', new_callable=mock_open, read_data='1234') +def test_lock_error(lock_open, getpid, sleep, fcntl_flock): + with pytest.raises(OSError): + mock_func() + + # Last call is + # call().__exit__(, OSError(9, 'nope'), ) + # The exception can be captured above and checked as follows + # call().__exit__(ex.type, ex.value, ex.tb.tb_next.tb_next.tb_next) + # but it may by CPython specific, and frankly, that tb_next chain looks horrible. + # hence checking just the method and comparing the args with themselves + last_exit_call_args = lock_open().__exit__.call_args_list[-1][0] + lock_open.assert_has_calls([ + call('test.lock', 'a'), + call().__enter__(), + call().__exit__(None, None, None), + call('test.lock', 'r+'), + call().__enter__(), + call().__exit__(*last_exit_call_args), + ]) + + fcntl_flock.assert_called_once_with(lock_open(), fcntl.LOCK_EX | fcntl.LOCK_NB) + sleep.assert_not_called() + getpid.assert_not_called() diff --git a/tests/test_podman.py b/tests/test_podman.py new file mode 100644 index 0000000..a2501a4 --- /dev/null +++ b/tests/test_podman.py @@ -0,0 +1,126 @@ +import subprocess +import os +from unittest.mock import patch, call + +from spoc import podman + +TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'test_data') + +@patch('subprocess.run') +def test_get_apps(run): + with open(os.path.join(TEST_DATA_DIR, 'podman_pod_ps.json')) as f: + run.return_value.stdout = f.read() + + pods = podman.get_apps() + + expected_cmd = ['podman', 'pod', 'ps', '--format', 'json'] + run.assert_called_once_with(expected_cmd, check=True, stdout=subprocess.PIPE, text=True) + assert pods == {'someapp': '0.1', 'anotherapp': '0.2', 'yetanotherapp': '0.3'} + +@patch('subprocess.run') +def test_get_volumes_for_app(run): + with open(os.path.join(TEST_DATA_DIR, 'podman_volume_ls.json')) as f: + run.return_value.stdout = f.read() + + volumes = podman.get_volumes_for_app('someapp') + + expected_cmd = ['podman', 'volume', 'ls', '--filter', 'label=spoc.app=someapp', + '--format', 'json'] + run.asert_called_once_with(expected_cmd, check=True, stdout=subprocess.PIPE, text=True) + assert volumes == {'someapp-conf', 'someapp-data'} + +@patch('subprocess.run') +def test_start_pod(run): + podman.start_pod('someapp') + + expected_cmd = ['podman', 'pod', 'start', 'someapp'] + run.assert_called_once_with(expected_cmd, check=True) + +@patch('subprocess.run') +def test_stop_pod(run): + podman.stop_pod('someapp') + + expected_cmd = ['podman', 'pod', 'stop', '--ignore', 'someapp'] + run.assert_called_once_with(expected_cmd, check=True) + +@patch('subprocess.run') +def test_get_pod_status(run): + run.return_value.stdout = 'RESULT\n' + status = podman.get_pod_status('someapp') + + expected_cmd = ['podman', 'pod', 'ps', '--filter', 'label=spoc.app=someapp'] + run.assert_called_once_with(expected_cmd, check=True, stdout=subprocess.PIPE, text=True) + assert status == 'RESULT' + +@patch('subprocess.run') +def test_get_pod_status_all(run): + run.return_value.stdout = 'RESULT\n' + status = podman.get_pod_status() + + expected_cmd = ['podman', 'pod', 'ps'] + run.assert_called_once_with(expected_cmd, check=True, stdout=subprocess.PIPE, text=True) + assert status == 'RESULT' + +@patch('subprocess.run') +def test_create_volume(run): + podman.create_volume('someapp', 'someapp-vol') + + expected_cmd = ['podman', 'volume', 'create', '--label', 'spoc.app=someapp', 'someapp-vol'] + run.assert_called_once_with(expected_cmd, check=True) + +@patch('subprocess.run') +def test_remove_volume(run): + podman.remove_volume('someapp-vol') + + expected_cmd = ['podman', 'volume', 'rm', 'someapp-vol'] + run.assert_called_once_with(expected_cmd, check=True) + +@patch('subprocess.run') +def test_create_pod(run): + podman.create_pod('someapp', '0.1') + + expected_cmd = ['podman', 'pod', 'create', '--name', 'someapp', + '--label', 'spoc.app=someapp', '--label', 'spoc.version=0.1'] + run.assert_called_once_with(expected_cmd, check=True) + +@patch('spoc.podman.stop_pod') +@patch('subprocess.run') +def test_remove_pod(run, stop_pod): + podman.remove_pod('someapp') + + stop_pod.assert_called_once_with('someapp') + expected_cmd = ['podman', 'pod', 'rm', '--ignore', 'someapp'] + run.assert_called_once_with(expected_cmd, check=True) + +@patch('subprocess.run') +def test_create_container(run): + podman.create_container('someapp', 'someapp-cnt', 'example.com/someapp:0.23.6-210515', + env_file='/var/lib/spoc/someapp.env', + volumes={'someapp-srv': '/srv', 'someapp-mnt': '/mnt'}, + requires={'someapp-cnt3', 'someapp-cnt2'}, + hosts={'cnt2', 'cnt3', 'cnt'}) + + expected_cmd = ['podman', 'container', 'create', '--name', 'someapp-cnt', '--pod', 'someapp', + '--restart', 'unless-stopped', '--env-file', '/var/lib/spoc/someapp.env', + '--requires', 'someapp-cnt2,someapp-cnt3', '--volume', 'someapp-mnt:/mnt', + '--volume', 'someapp-srv:/srv', '--add-host', 'cnt:127.0.0.1', + '--add-host', 'cnt2:127.0.0.1', '--add-host', 'cnt3:127.0.0.1', + 'example.com/someapp:0.23.6-210515'] + run.assert_called_once_with(expected_cmd, check=True) + +@patch('subprocess.run') +def test_create_container_minimal(run): + podman.create_container('someapp', 'someapp-cnt', 'example.com/someapp:0.23.6-210515') + + expected_cmd = ['podman', 'container', 'create', '--name', 'someapp-cnt', '--pod', 'someapp', + '--restart', 'unless-stopped', 'example.com/someapp:0.23.6-210515'] + run.assert_called_once_with(expected_cmd, check=True) + +@patch('subprocess.run') +def test_prune(run): + podman.prune() + + run.assert_has_calls([ + call(['podman', 'image', 'prune', '--all', '--force'], check=True), + call(['podman', 'volume', 'prune', '--force'], check=True), + ]) diff --git a/tests/test_repo.py b/tests/test_repo.py new file mode 100644 index 0000000..4353aaf --- /dev/null +++ b/tests/test_repo.py @@ -0,0 +1,40 @@ +from unittest.mock import patch, call + +from spoc import config +from spoc import repo + +@patch('spoc.repo._data', {}) +@patch('requests.get') +def test_load(req_get): + repo.load() + + req_get.assert_called_once_with(config.REPO_FILE_URL, timeout=5) + req_get.return_value.raise_for_status.asert_called_once() + req_get.return_value.json.asert_called_once() + +@patch('spoc.repo._data', {}) +@patch('requests.get') +def test_load_twice_no_force(req_get): + repo.load() + repo.load() + + req_get.assert_called_once_with(config.REPO_FILE_URL, timeout=5) + req_get.return_value.raise_for_status.asert_called_once() + req_get.return_value.json.asert_called_once() + +@patch('spoc.repo._data', {}) +@patch('requests.get') +def test_load_twice_force(req_get): + repo.load() + repo.load(force=True) + + expected_call = call(config.REPO_FILE_URL, timeout=5) + assert req_get.call_args_list.count(expected_call) == 2 + assert req_get.return_value.raise_for_status.call_count == 2 + assert req_get.return_value.json.call_count == 2 + +@patch('spoc.repo.load') +def test_get_apps(repo_load): + repo.get_apps() + + repo_load.assert_called_once() diff --git a/tests/test_spoc.py b/tests/test_spoc.py new file mode 100644 index 0000000..a54ef9f --- /dev/null +++ b/tests/test_spoc.py @@ -0,0 +1,266 @@ +import pytest +from argparse import Namespace +from unittest.mock import call, mock_open, patch + +import spoc + +@patch('builtins.open', new_callable=mock_open, read_data='foo\0arg1\0arg2\n') +def test_print_lock(cmdline_open, capsys): + spoc.print_lock('123') + + cmdline_open.assert_called_once_with('/proc/123/cmdline') + captured = capsys.readouterr() + assert captured.out == 'Waiting for lock currently held by process 123 - foo arg1 arg2\n' + +@patch('spoc.podman.get_apps', return_value={'anotherapp': '0.1', 'someapp': '0.1'}) +def test_listing_installed(get_apps, capsys): + spoc.listing('installed') + + get_apps.assert_called_once() + + # Order is important here + captured = capsys.readouterr() + assert captured.out == 'anotherapp 0.1\nsomeapp 0.1\n' + +@patch('spoc.repo.get_apps') +def test_listing_online(get_apps): + spoc.listing('online') + + get_apps.assert_called_once() + +@patch('spoc.repo.get_apps', + return_value={'someapp': {'version': '0.2'}, 'anotherapp': {'version': '0.1'}}) +@patch('spoc.podman.get_apps', return_value={'someapp': '0.1'}) +def test_listing_updates(repo_get_apps, podman_get_apps, capsys): + spoc.listing('updates') + + repo_get_apps.assert_called_once() + podman_get_apps.assert_called_once() + + captured = capsys.readouterr() + assert captured.out == 'someapp 0.1 -> 0.2\n' + +@patch('spoc.repo.get_apps') +@patch('spoc.podman.get_apps') +def test_listing_invalid(repo_get_apps, podman_get_apps, capsys): + spoc.listing('invalid') + + repo_get_apps.assert_not_called() + podman_get_apps.assert_not_called() + + captured = capsys.readouterr() + assert captured.out == '' + +@patch('spoc.app.install') +def test_install(app_install): + spoc.install('someapp') + + app_install.assert_called_once_with('someapp') + +@patch('spoc.app.update') +def test_update(app_update): + spoc.update('someapp') + + app_update.assert_called_once_with('someapp') + +@patch('spoc.app.uninstall') +def test_uninstall(app_uninstall): + spoc.uninstall('someapp') + + app_uninstall.assert_called_once_with('someapp') + +@patch('spoc.podman.start_pod') +def test_start(start_pod): + spoc.start('someapp') + + start_pod.assert_called_once_with('someapp') + +@patch('spoc.podman.stop_pod') +def test_stop(stop_pod): + spoc.stop('someapp') + + stop_pod.assert_called_once_with('someapp') + +@patch('spoc.podman.get_pod_status', return_value='RESULT') +def test_status(get_pod_status, capsys): + spoc.status('someapp') + + get_pod_status.assert_called_once_with('someapp') + captured = capsys.readouterr() + assert captured.out == 'RESULT\n' + +@pytest.mark.parametrize('value,expected',[ + ('1', True), + ('on', True), + ('Enable', True), + ('TRUE', True), + ('whatever', False), +]) +@patch('spoc.autostart.set_app') +def test_set_autostart(set_app, value, expected): + spoc.set_autostart('someapp', value) + + set_app.assert_called_once_with('someapp', expected) + +@patch('spoc.autostart.get_apps', return_value={'someapp', 'anotherapp'}) +@patch('spoc.podman.start_pod') +def test_start_autostarted(start_pod, get_apps): + spoc.start_autostarted() + + get_apps.assert_called_once() + start_pod.assert_has_calls([ + call('someapp'), + call('anotherapp'), + ], any_order=True) + +@patch('spoc.podman.get_apps', return_value={'someapp': '0.1', 'anotherapp': '0.1'}) +@patch('spoc.podman.stop_pod') +def test_stop_all(stop_pod, get_apps): + spoc.stop_all() + + get_apps.assert_called_once() + stop_pod.assert_has_calls([ + call('someapp'), + call('anotherapp'), + ], any_order=True) + +@patch('spoc.podman.prune') +def test_prune(prune): + spoc.prune() + + prune.assert_called_once() + +@patch('sys.argv', ['foo', 'list']) +@patch('spoc.listing') +def test_main_listing(listing): + spoc.main() + + listing.assert_called_once_with('installed') + +@patch('sys.argv', ['foo', 'list', 'online']) +@patch('spoc.listing') +def test_main_listing_online(listing): + spoc.main() + + listing.assert_called_once_with('online') + +@patch('sys.argv', ['foo', 'install', 'someapp']) +@patch('spoc.install') +def test_main_install(install): + spoc.main() + + install.assert_called_once_with('someapp') + +@patch('sys.argv', ['foo', 'update', 'someapp']) +@patch('spoc.update') +def test_main_update(update): + spoc.main() + + update.assert_called_once_with('someapp') + +@patch('sys.argv', ['foo', 'uninstall', 'someapp']) +@patch('spoc.uninstall') +def test_main_uninstall(uninstall): + spoc.main() + + uninstall.assert_called_once_with('someapp') + +@patch('sys.argv', ['foo', 'start', 'someapp']) +@patch('spoc.start') +def test_main_start(start): + spoc.main() + + start.assert_called_once_with('someapp') + +@patch('sys.argv', ['foo', 'stop', 'someapp']) +@patch('spoc.stop') +def test_main_stop(stop): + spoc.main() + + stop.assert_called_once_with('someapp') + +@patch('sys.argv', ['foo', 'status', 'someapp']) +@patch('spoc.status') +def test_main_status(status): + spoc.main() + + status.assert_called_once_with('someapp') + +@patch('sys.argv', ['foo', 'status']) +@patch('spoc.status') +def test_main_status_all(status): + spoc.main() + + status.assert_called_once_with(None) + +@patch('sys.argv', ['foo', 'autostart', 'someapp', 'on']) +@patch('spoc.set_autostart') +def test_main_autostart(autostart): + spoc.main() + + autostart.assert_called_once_with('someapp', 'on') + +@patch('sys.argv', ['foo', 'start-autostarted']) +@patch('spoc.start_autostarted') +def test_main_start_autostarted(start_autostarted): + spoc.main() + + start_autostarted.assert_called_once() + +@patch('sys.argv', ['foo', 'stop-all']) +@patch('spoc.stop_all') +def test_main_stop_all(stop_all): + spoc.main() + + stop_all.assert_called_once() + +@patch('sys.argv', ['foo', 'prune']) +@patch('spoc.prune') +def test_main_prune(prune): + spoc.main() + + prune.assert_called_once() + +@patch('spoc.parse_args', return_value=Namespace(action=None)) +@patch('spoc.listing') +@patch('spoc.install') +@patch('spoc.update') +@patch('spoc.uninstall') +@patch('spoc.start') +@patch('spoc.stop') +@patch('spoc.status') +@patch('spoc.start_autostarted') +@patch('spoc.stop_all') +@patch('spoc.prune') +def test_main_invalid(prune, stop_all, start_autostarted, status, stop, start, + uninstall, update, install, listing, parse_args): + spoc.main() + + parse_args.assert_called_once() + listing.assert_not_called() + install.assert_not_called() + update.assert_not_called() + uninstall.assert_not_called() + start.assert_not_called() + stop.assert_not_called() + status.assert_not_called() + start_autostarted.assert_not_called() + stop_all.assert_not_called() + prune.assert_not_called() + +@pytest.mark.parametrize('argv', [ + ['list', 'invalid'], + ['install'], + ['update'], + ['uninstall'], + ['start'], + ['stop'], + ['autostart'], + ['autostart', 'someapp'], + ['invalid'], +]) +def test_main_systemexit(argv): + argv.insert(0, 'foo') + with patch('sys.argv', argv): + with pytest.raises(SystemExit): + spoc.main() diff --git a/usr/bin/spoc-app b/usr/bin/spoc-app deleted file mode 100755 index 06ce2e8..0000000 --- a/usr/bin/spoc-app +++ /dev/null @@ -1,199 +0,0 @@ -#!/usr/bin/python3 -# -*- coding: utf-8 -*- - -import argparse -import os -from pkg_resources import parse_version - -from spoc import repo_local, repo_online, repo_publish -from spoc.app import App -from spoc.cli import ActionQueue, print_lock, readable_size -from spoc.config import LOCK_FILE -from spoc.flock import locked -from spoc.image import Image - -def listing(list_type): - # Lists applications in particular state - if list_type == 'installed': - apps = repo_local.get_apps() - elif list_type == 'online': - apps = repo_online.get_apps() - elif list_type == 'updates': - online_apps = repo_online.get_apps() - apps = [a for a,d in repo_local.get_apps().items() if a in online_apps and parse_version(online_apps[a]['version']) > parse_version(d['version'])] - elif list_type == 'published': - apps = repo_publish.get_apps() - elif list_type == 'running': - apps = [app for app in repo_local.get_apps() if App(app).is_running()] - elif list_type == 'stopped': - apps = [app for app in repo_local.get_apps() if App(app).is_stopped()] - for app in apps: - print(app) - -@locked(LOCK_FILE, print_lock) -def install(app_name): - # Install application from online repository - queue = ActionQueue() - required_images = [] - for container in repo_online.get_app(app_name)['containers'].values(): - required_images.extend(repo_online.get_image(container['image'])['layers']) - local_images = repo_local.get_images() - # Layers need to be downloaded in correct order - for layer in list(dict.fromkeys(required_images)): - if layer not in local_images: - queue.download_image(Image(layer, False)) - queue.install_app(App(app_name, False, False)) - queue.process() - -@locked(LOCK_FILE, print_lock) -def update(app_name): - # Update application from online repository - queue = ActionQueue() - required_images = [] - for container in repo_online.get_app(app_name)['containers'].values(): - required_images.extend(repo_online.get_image(container['image'])['layers']) - local_images = repo_local.get_images() - # Layers need to be downloaded in correct order - for layer in list(dict.fromkeys(required_images)): - if layer not in local_images: - queue.download_image(Image(layer, False)) - queue.update_app(App(app_name, False)) - queue.process() - -@locked(LOCK_FILE, print_lock) -def uninstall(app_name): - # Remove application and its containers from local repository - queue = ActionQueue() - queue.uninstall_app(App(app_name, False)) - queue.process() - -def start(app_name): - # Start all application containers - queue = ActionQueue() - queue.start_app(App(app_name)) - queue.process() - -def stop(app_name): - # Stop all application containers - queue = ActionQueue() - queue.stop_app(App(app_name)) - queue.process() - -def status(app_name): - # Print status of all application containers - for container,status in sorted(App(app_name).status().items()): - print(f'{container}: {status.value}') - -def publish(filename, force): - app_name = os.path.basename(os.path.dirname(os.path.abspath(filename))) - # Check if publishing is needed and attempt to publish the application - if force or app_name not in repo_publish.get_apps(): - app = App(app_name, False, False) - print(f'Publishing application {app_name} from file {os.path.abspath(filename)}') - app.unpublish() - size, dlsize = app.publish(filename) - print(f'Application {app_name} compressed from {readable_size(size)} to {readable_size(dlsize)} and published successfully') - else: - print(f'Application {app_name} already published, skipping publish task') - -def unpublish(app_name): - # Remove the application from publish repo - App(app_name, False, False).unpublish() - -def autostart(app_name, value): - # Set if the application should be autostarted on boot - value = value.lower() in ('1', 'on', 'enable', 'true') - App(app_name, False).set_autostart(value) - -def start_autostarted(): - # Start all applications (resp. their containers) which are set to be autoostarted on boot - apps = [App(a) for a,d in repo_local.get_apps().items() if d.get('autostart')] - for app in apps: - app.start() - -def stop_all(): - # Stop all applications (resp. their containers) - apps = [App(a) for a,d in repo_local.get_apps().items()] - for app in apps: - app.stop() - -parser = argparse.ArgumentParser(description='SPOC application manager') -parser.set_defaults(action=None) -subparsers = parser.add_subparsers() - -parser_list = subparsers.add_parser('list') -parser_list.set_defaults(action=listing) -parser_list.add_argument('type', choices=('installed', 'online', 'updates', 'published', 'running', 'stopped'), default='installed', const='installed', nargs='?', help='Selected repository or application criteria') - -parser_install = subparsers.add_parser('install') -parser_install.set_defaults(action=install) -parser_install.add_argument('app', help='Name of the application to install') - -parser_update = subparsers.add_parser('update') -parser_update.set_defaults(action=update) -parser_update.add_argument('app', help='Name of the application to update') - -parser_uninstall = subparsers.add_parser('uninstall') -parser_uninstall.set_defaults(action=uninstall) -parser_uninstall.add_argument('app', help='Name of the application to uninstall') - -parser_start = subparsers.add_parser('start') -parser_start.set_defaults(action=start) -parser_start.add_argument('app', help='Name of the application to start') - -parser_stop = subparsers.add_parser('stop') -parser_stop.set_defaults(action=stop) -parser_stop.add_argument('app', help='Name of the application to stop') - -parser_status = subparsers.add_parser('status') -parser_status.set_defaults(action=status) -parser_status.add_argument('app', help='Name of the application to check') - -parser_publish = subparsers.add_parser('publish') -parser_publish.set_defaults(action=publish) -parser_publish.add_argument('-f', '--force', action='store_true', help='Force republish already published application') -parser_publish.add_argument('filename', help='Path to metadata file of the application to publish') - -parser_unpublish = subparsers.add_parser('unpublish') -parser_unpublish.set_defaults(action=unpublish) -parser_unpublish.add_argument('app', help='Name of the application to unpublish') - -parser_autostart = subparsers.add_parser('autostart') -parser_autostart.set_defaults(action=autostart) -parser_autostart.add_argument('app', help='Name of the application to be automatically started') -parser_autostart.add_argument('value', choices=('1', 'on', 'enable', 'true', '0', 'off', 'disable', 'false'), help='Set or unset the applications to be automatically started after the host boots up') - -parser_start_autostarted = subparsers.add_parser('start-autostarted') -parser_start_autostarted.set_defaults(action=start_autostarted) - -parser_stop_all = subparsers.add_parser('stop-all') -parser_stop_all.set_defaults(action=stop_all) - -args = parser.parse_args() - -if args.action is listing: - listing(args.type) -elif args.action is install: - install(args.app) -elif args.action is update: - update(args.app) -elif args.action is uninstall: - uninstall(args.app) -elif args.action is start: - start(args.app) -elif args.action is stop: - stop(args.app) -elif args.action is status: - status(args.app) -elif args.action is publish: - publish(args.filename, args.force) -elif args.action is unpublish: - unpublish(args.app) -elif args.action is autostart: - autostart(args.app, args.value) -elif args.action is start_autostarted: - start_autostarted() -elif args.action is stop_all: - stop_all() -else: - parser.print_usage() diff --git a/usr/bin/spoc-container b/usr/bin/spoc-container deleted file mode 100755 index e7e4ff2..0000000 --- a/usr/bin/spoc-container +++ /dev/null @@ -1,189 +0,0 @@ -#!/usr/bin/python3 -# -*- coding: utf-8 -*- - -import argparse -import os -import shlex -import sys - -from spoc import repo_local -from spoc.config import VOLUMES_DIR -from spoc.container import Container -from spoc.image import Image - -def listing(state): - # Lits containers in particular state - if state == 'all': - containers = repo_local.get_containers().keys() - elif state == 'running': - containers = [c for c in repo_local.get_containers() if Container(c).is_running()] - elif state == 'stopped': - containers = [c for c in repo_local.get_containers() if Container(c).is_stopped()] - for container in containers: - print(container) - -def modify_depend(container, depend): - # Change container dependencies - if depend.startswith('!'): - try: - container.depends.remove(depend[1:]) - except KeyError: - pass - else: - # Add the dependency and remove duplicates - container.depends.append(depend) - container.depends = list(set(container.depends)) - -def modify_mount(container, mount): - # Change container mount points - volume,mountpoint = mount.split(':', 1) - if mountpoint: - container.mounts[volume] = mountpoint - else: - try: - del container.mounts[volume] - except KeyError: - pass - -def modify_env(container, env): - # Change container environment values - key,value = env.split('=', 1) - if value: - container.env[key] = value - else: - try: - del container.env[key] - except KeyError: - pass - -def modify_container(container, depends, mounts, envs, uid, gid, cmd, cwd, ready, halt): - # Change container definition - for depend in depends: - modify_depend(container, depend) - for mount in mounts: - modify_mount(container, mount) - for env in envs: - modify_env(container, env) - args = locals() - for member in ('uid', 'gid', 'cmd', 'cwd', 'ready', 'halt'): - value = args[member] - if value: - setattr(container, member, value) - -def create(container_name, image_name, depends, mounts, env, uid, gid, cmd, cwd, ready, halt): - # Create container based on image definition and extra fields - container = Container(container_name, False) - container.set_definition(Image(image_name).get_definition()) - modify_container(container, depends, mounts, env, uid, gid, cmd, cwd, ready, halt) - container.create() - -def modify(container_name, depends, mounts, env, uid, gid, cmd, cwd, ready, halt): - # Change configuration of an existing container - container = Container(container_name) - modify_container(container, depends, mounts, env, uid, gid, cmd, cwd, ready, halt) - container.create() - -def destroy(container_name): - # Remove container and its directory - container = Container(container_name, False) - if container.is_running(): - container.stop() - container.destroy() - -def start(container_name, command): - # Start the container using init values from its definition - Container(container_name).start(command) - -def stop(container_name): - # Stop the container using halt signal from its definition - Container(container_name).stop() - -def status(container_name): - # Prints current running status of the container - print(Container(container_name).get_state().value) - -def execute(container_name, command, uid, gid): - # Execute a command in container's namespace - result = Container(container_name).execute(command, uid, gid) - # Set returncode to that of the command - sys.exit(result.returncode) - -parser = argparse.ArgumentParser(description='SPOC container manager') -parser.set_defaults(action=None) -subparsers = parser.add_subparsers() - -parser_list = subparsers.add_parser('list') -parser_list.set_defaults(action=listing) -parser_list.add_argument('type', choices=('all', 'running', 'stopped'), default='all', const='all', nargs='?', help='Selected container criteria') - -parser_create = subparsers.add_parser('create') -parser_create.set_defaults(action=create) -parser_create.add_argument('-d', '--depends', action='append', default=[], help='Add another container as a start dependency') -parser_create.add_argument('-m', '--mount', action='append', default=[], help='Add mount to the container - format volume:mountpoint[:file]') -parser_create.add_argument('-e', '--env', action='append', default=[], help='Add environment variable for the container - format KEY=value') -parser_create.add_argument('-u', '--uid', help='Sets the container init UID') -parser_create.add_argument('-g', '--gid', help='Sets the container init GID') -parser_create.add_argument('-c', '--cmd', help='Sets the container init command') -parser_create.add_argument('-w', '--workdir', help='Sets the container init working directory') -parser_create.add_argument('-r', '--ready', help='Sets the container ready command') -parser_create.add_argument('-s', '--stopsig', help='Sets the signal to be sent to init on container shutdown') -parser_create.add_argument('container', help='Name of the container to create') -parser_create.add_argument('image', help='Name of the image of which the container should be based') - -parser_modify = subparsers.add_parser('modify') -parser_modify.set_defaults(action=modify) -parser_modify.add_argument('-d', '--depends', action='append', default=[], help='Add another container as a start dependency - prepend the name with ! to remove the dependency') -parser_modify.add_argument('-m', '--mount', action='append', default=[], help='Add mount to the container - format volume:mountpoint - specify empty mountpoint to remove the mount') -parser_modify.add_argument('-e', '--env', action='append', default=[], help='Add environment variable for the container - format KEY=value - specify empty value to remove the env') -parser_modify.add_argument('-u', '--uid', help='Sets the container init UID') -parser_modify.add_argument('-g', '--gid', help='Sets the container init GID') -parser_modify.add_argument('-c', '--cmd', help='Sets the container init command') -parser_modify.add_argument('-w', '--workdir', help='Sets the container init working directory') -parser_modify.add_argument('-r', '--ready', help='Sets the container ready command') -parser_modify.add_argument('-s', '--stopsig', help='Sets the signal to be sent to init on container shutdown') -parser_modify.add_argument('container', help='Name of the container to modify') - -parser_destroy = subparsers.add_parser('destroy') -parser_destroy.set_defaults(action=destroy) -parser_destroy.add_argument('container', help='Name of the container to destroy') - -parser_start = subparsers.add_parser('start') -parser_start.set_defaults(action=start) -parser_start.add_argument('container', help='Name of the container to start') -parser_start.add_argument('command', nargs=argparse.REMAINDER, help='Command to be run instead of the default init command') - -parser_stop = subparsers.add_parser('stop') -parser_stop.set_defaults(action=stop) -parser_stop.add_argument('container', help='Name of the container to stop') - -parser_status = subparsers.add_parser('status') -parser_status.set_defaults(action=status) -parser_status.add_argument('container', help='Name of the container to check') - -parser_exec = subparsers.add_parser('exec') -parser_exec.set_defaults(action=execute) -parser_exec.add_argument('-u', '--uid', help='Sets the command UID') -parser_exec.add_argument('-g', '--gid', help='Sets the command GID') -parser_exec.add_argument('container', help='Name of the container in which to run the command') -parser_exec.add_argument('command', nargs=argparse.REMAINDER, help='The command to be run') - -args = parser.parse_args() - -if args.action is listing: - listing(args.type) -elif args.action is create: - create(args.container, args.image, args.depends, args.mount, args.env, args.uid, args.gid, args.cmd, args.workdir, args.ready, args.stopsig) -elif args.action is modify: - modify(args.container, args.depends, args.mount, args.env, args.uid, args.gid, args.cmd, args.workdir, args.ready, args.stopsig) -elif args.action is destroy: - destroy(args.container) -elif args.action is start: - start(args.container, args.command) -elif args.action is stop: - stop(args.container) -elif args.action is status: - status(args.container) -elif args.action is execute: - execute(args.container, args.command, args.uid, args.gid) -else: - parser.print_usage() diff --git a/usr/bin/spoc-hook b/usr/bin/spoc-hook deleted file mode 100755 index 084bc91..0000000 --- a/usr/bin/spoc-hook +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/python3 -# -*- coding: utf-8 -*- - -import os - -from spoc.container import Container - -if __name__ == '__main__': - hook_type = os.environ['LXC_HOOK_TYPE'] - container = Container(os.environ['LXC_NAME']) - if hook_type == 'pre-start': - container.clean_ephemeral_layer() - container.mount_rootfs() - elif hook_type == 'post-stop': - container.unmount_rootfs() - container.clean_ephemeral_layer() diff --git a/usr/bin/spoc-image b/usr/bin/spoc-image deleted file mode 100755 index 9e42e30..0000000 --- a/usr/bin/spoc-image +++ /dev/null @@ -1,161 +0,0 @@ -#!/usr/bin/python3 -# -*- coding: utf-8 -*- - -import argparse -import os -import sys - -from spoc import repo_local, repo_online, repo_publish -from spoc.cli import ActionQueue, print_lock, readable_size -from spoc.config import LOCK_FILE -from spoc.depsolver import DepSolver -from spoc.exceptions import ImageNotFoundError -from spoc.flock import locked -from spoc.image import Image -from spoc.imagebuilder import ImageBuilder - -def get_image_name(file_path): - # Read and return image name from image file - with open(file_path) as f: - for line in f: - if line.startswith('IMAGE '): - return line.split()[1] - return None - -def listing(list_type): - # Lists images in particular state - if list_type == 'installed': - images = repo_local.get_images() - elif list_type == 'online': - images = repo_online.get_images() - elif list_type == 'published': - images = repo_publish.get_images() - for image in images: - print(image) - -@locked(LOCK_FILE, print_lock) -def download(image_name): - # Download and unpack image from online repository - queue = ActionQueue() - local_images = repo_local.get_images() - for layer in repo_online.get_image(image_name)['layers']: - if layer not in local_images: - queue.download_image(Image(layer, False)) - queue.process() - -@locked(LOCK_FILE, print_lock) -def delete(image_name): - # Remove the image including all images that have it as one of its parents - # Check if image is in use - used_by = [c for c,d in repo_local.get_containers().items() if image_name in d['layers']] - if used_by: - sys.exit(f'Error: Image {image_name} is used by container{"s" if len(used_by) > 1 else ""} {", ".join(used_by)}') - # Gather layers inheriting from the layer to be removed which should be removed as well - retained_layers = set(image for image,definition in repo_local.get_images().items() if image_name not in definition['layers']) - remove_layers(retained_layers) - -@locked(LOCK_FILE, print_lock) -def clean(): - # Remove images which aren't used in any locally defined containers - retained_layers = set() - for definition in repo_local.get_containers().values(): - retained_layers.update(definition['layers']) - remove_layers(retained_layers) - -def remove_layers(retained_layers): - # Enqueue removal of images for cleanup - depsolver = DepSolver() - # Build dependency tree to safely remove the images in order of dependency - for image in set(repo_local.get_images()) - retained_layers: - image = Image(image) - depsolver.add(image.name, set(image.layers) - retained_layers, image) - # Enqueue and run the removal actions - queue = ActionQueue() - for image in reversed(depsolver.solve()): - queue.delete_image(image) - queue.process() - -@locked(LOCK_FILE, print_lock) -def build(filename, force, do_publish): - # Check if a build is needed and attempt to build the image from image file - image_name = get_image_name(filename) - if force or image_name not in repo_local.get_images(): - image = Image(image_name, False) - print(f'Building image {image_name} from file {os.path.abspath(filename)}') - image.delete() - image.create(ImageBuilder(), filename) - print(f'Image {image_name} built successfully') - # If publishing was requested, force publish after successful build - force = True - else: - print(f'Image {image_name} already built, skipping build task') - if do_publish: - publish(image_name, force) - -def publish(image_name, force): - # Check if publishing is needed and attempt to publish the image - if force or image_name not in repo_publish.get_images(): - image = Image(image_name) - print(f'Publishing image {image_name}') - image.unpublish() - size, dlsize = image.publish() - print(f'Image {image_name} compressed from {readable_size(size)} to {readable_size(dlsize)} and published successfully') - else: - print(f'Image {image_name} already published, skipping publish task') - -def unpublish(image_name): - # Remove the image from publish repo - Image(image_name, False).unpublish() - -parser = argparse.ArgumentParser(description='SPOC image manager') -parser.set_defaults(action=None) -subparsers = parser.add_subparsers() - -parser_list = subparsers.add_parser('list') -parser_list.set_defaults(action=listing) -parser_list.add_argument('type', choices=('installed', 'online', 'published'), default='installed', const='installed', nargs='?', help='Selected repository') - -parser_download = subparsers.add_parser('download') -parser_download.set_defaults(action=download) -parser_download.add_argument('image', help='Name of the image to download') - -parser_delete = subparsers.add_parser('delete') -parser_delete.set_defaults(action=delete) -parser_delete.add_argument('image', help='Name of the image to delete') - -parser_clean = subparsers.add_parser('clean') -parser_clean.set_defaults(action=clean) - -parser_build = subparsers.add_parser('build') -parser_build.set_defaults(action=build) -parser_build.add_argument('-f', '--force', action='store_true', help='Force rebuild already existing image') -parser_build.add_argument('-p', '--publish', action='store_true', help='Publish the image after successful build') -parser_build.add_argument('filename', help='Path to the file with build recipe') - -parser_publish = subparsers.add_parser('publish') -parser_publish.set_defaults(action=publish) -parser_publish.add_argument('-f', '--force', action='store_true', help='Force republish already published image') -parser_publish.add_argument('image', help='Name of the image to publish') - -parser_unpublish = subparsers.add_parser('unpublish') -parser_unpublish.set_defaults(action=unpublish) -parser_unpublish.add_argument('image', help='Name of the image to unpublish') - -args = parser.parse_args() - -if args.action is listing: - listing(args.type) -elif args.action is download: - download(args.image) -elif args.action is delete: - delete(args.image) -elif args.action is clean: - clean() -elif args.action is build: - build(args.filename, args.force, args.publish) -elif args.action is publish: - publish(args.image, args.force) -elif args.action is unpublish: - unpublish(args.image) -else: - parser.print_usage() diff --git a/usr/lib/python3.8/spoc/__init__.py b/usr/lib/python3.8/spoc/__init__.py deleted file mode 100644 index 40a96af..0000000 --- a/usr/lib/python3.8/spoc/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# -*- coding: utf-8 -*- diff --git a/usr/lib/python3.8/spoc/app.py b/usr/lib/python3.8/spoc/app.py deleted file mode 100644 index 6cc3eb8..0000000 --- a/usr/lib/python3.8/spoc/app.py +++ /dev/null @@ -1,213 +0,0 @@ -# -*- coding: utf-8 -*- - -import copy -import json -import os -import shutil -import subprocess -import tarfile -import urllib.parse - -from . import config, repo_local, repo_online, repo_publish -from .container import Container -from .image import Image - -DEFINITION_MEMBERS = {'version', 'meta', 'autostart', 'containers'} - -class App: - def __init__(self, name, define_containers=True, load_from_repo=True): - self.name = name - self.version = None - self.app_dir = os.path.join(config.APPS_DIR, name) - self.meta = {} - self.autostart = False - self.containers = [] - if load_from_repo: - self.set_definition(repo_local.get_app(name), define_containers) - - def set_definition(self, definition, define_containers): - # Set attributes given by definition - for key in DEFINITION_MEMBERS.intersection(definition): - setattr(self, key, definition[key]) - # Populate containers property with actual container objects - self.containers = [Container(container, define_containers) for container in definition['containers']] - - def get_definition(self): - # Return shallow copy of image definition as dictionary - definition = {} - for key in DEFINITION_MEMBERS: - value = getattr(self, key) - if value: - definition[key] = copy.copy(value) - # Overwrite containers key with list of container names - definition['containers'] = [container.name for container in self.containers] - return definition - - def download(self, observer=None): - # Download the archive with application scripts and install data - os.makedirs(config.TMP_APPS_DIR, 0o700, True) - archive_url = urllib.parse.urljoin(config.ONLINE_APPS_URL, f'{self.name}.tar.xz') - archive_path = os.path.join(config.TMP_APPS_DIR, f'{self.name}.tar.xz') - definition = repo_online.get_app(self.name) - if observer: - observer.units_total = definition['dlsize'] - repo_online.download_archive(archive_url, archive_path, definition['hash'], observer) - - def unpack_downloaded(self, observer=None): - # Unpack downloaded archive with application scripts and install data - archive_path = os.path.join(config.TMP_APPS_DIR, f'{self.name}.tar.xz') - definition = repo_online.get_app(self.name) - if observer: - observer.units_total = definition['size'] - repo_online.unpack_archive(archive_path, config.APPS_DIR, definition['hash'], observer) - - def run_script(self, action): - # Runs script for an app, if the script is present - script_dir = os.path.join(self.app_dir, action) - script_path = os.path.join(self.app_dir, f'{script_dir}.sh') - if os.path.exists(script_path): - # Run the script in its working directory, if there is one, so it doesn't have to figure out paths to packaged files - env = os.environ.copy() - env['LAYERS_DIR'] = config.LAYERS_DIR - env['VOLUMES_DIR'] = config.VOLUMES_DIR - env['APPS_DIR'] = config.APPS_DIR - env['LOG_DIR'] = config.LOG_DIR - cwd = script_dir if os.path.exists(script_dir) else self.app_dir - subprocess.run(script_path, cwd=cwd, env=env, check=True) - - def create_container(self, name, definition): - # Create container and enhance its definition (typically mounts) based on application requirements - container = Container(name, False) - container.set_definition(Image(definition['image']).get_definition()) - if 'depends' in definition: - container.depends = definition['depends'] - if 'env' in definition: - container.env.update(definition['env']) - if 'mounts' in definition: - container.mounts.update(definition['mounts']) - container.create() - self.containers.append(container) - - def install(self, observer=None): - # Install the application - definition = repo_online.get_app(self.name) - self.version = definition['version'] - self.meta = definition['meta'] - self.run_script('uninstall') - # Build containers - for container,container_defintion in definition['containers'].items(): - self.create_container(container, container_defintion) - # Run install script and register the app - try: - self.run_script('install') - except: - # Stop all containers if install.sh fails - for container in self.containers: - container.stop() - raise - repo_local.register_app(self.name, self.get_definition()) - - def update(self, observer=None): - # Stop and remove containers - for container in self.containers.copy(): - if container.is_running(): - container.stop() - container.destroy() - self.containers.remove(container) - # Load online definition - definition = repo_online.get_app(self.name) - self.version = definition['version'] - self.meta = definition['meta'] - # Build containers - for container,container_defintion in definition['containers'].items(): - self.create_container(container, container_defintion) - # Run update script and re-register the app - try: - self.run_script('update') - except: - # Stop all containers if update.sh fails - for container in self.containers: - container.stop() - raise - repo_local.register_app(self.name, self.get_definition()) - - def uninstall(self, observer=None): - # Stop and remove containers - for container in self.containers: - if container.is_running(): - container.stop() - container.destroy() - # Run uninstall script - self.run_script('uninstall') - # Unregister app and remove scripts - repo_local.unregister_app(self.name) - try: - shutil.rmtree(self.app_dir) - except FileNotFoundError: - pass - - def start(self, observer=None): - # Start all application containers - if observer: - observer.units_total = len(self.containers) - for container in self.containers: - container.start() - if observer: - observer.units_done += 1 - - def stop(self, observer=None): - # Stop all application containers - if observer: - observer.units_total = len(self.containers) - for container in self.containers: - container.stop() - if observer: - observer.units_done += 1 - - def status(self): - # Return status for all application containers - return {container.name:container.get_state() for container in self.containers} - - def is_running(self): - # Convenience method to determine if any of the application's containers are running - for container in self.containers: - if container.is_running(): - return True - return False - - def is_stopped(self): - # Convenience method to determine if all of the application's containers are stopped - return not self.is_running() - - def set_autostart(self, autostart): - # Configure if the application should be automatically started after boot - self.autostart = autostart - repo_local.register_app(self.name, self.get_definition()) - - def publish(self, filename): - # Create application archive and register to publish repository - builddir = os.path.dirname(filename) - os.makedirs(config.PUB_APPS_DIR, 0o755, True) - files = repo_publish.TarSizeCounter() - archive_path = os.path.join(config.PUB_APPS_DIR, f'{self.name}.tar.xz') - with tarfile.open(archive_path, 'w:xz') as tar: - for content in ('install', 'install.sh', 'update', 'update.sh', 'uninstall', 'uninstall.sh'): - content_path = os.path.join(builddir, content) - if os.path.exists(content_path): - tar.add(content_path, os.path.join(self.name, content), filter=files.add_file) - with open(filename) as f: - definition = json.load(f) - definition['size'] = files.size - definition['dlsize'] = os.path.getsize(archive_path) - definition['hash'] = repo_publish.sign_file(archive_path).hex() - repo_publish.register_app(self.name, definition) - return (definition['size'], definition['dlsize']) - - def unpublish(self): - # Remove the application from publish repository - repo_publish.unregister_app(self.name) - archive_path = os.path.join(config.PUB_APPS_DIR, f'{self.name}.tar.xz') - try: - os.unlink(archive_path) - except FileNotFoundError: - pass diff --git a/usr/lib/python3.8/spoc/cli.py b/usr/lib/python3.8/spoc/cli.py deleted file mode 100644 index ffd5e57..0000000 --- a/usr/lib/python3.8/spoc/cli.py +++ /dev/null @@ -1,81 +0,0 @@ -# -*- coding: utf-8 -*- - -import os -import time -from concurrent.futures import ThreadPoolExecutor -from math import floor - -SIZE_PREFIXES = ('', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') - -class ActionItem: - def __init__(self, text, action): - self.text = text - self.action = action - self.units_total = 0 - self.units_done = 0 - - def run(self): - with ThreadPoolExecutor() as executor: - future = executor.submit(self.action, self) - while not future.done(): - time.sleep(0.2) - self.print_progress() - # Get the result of the future and let it raise exception, if there was any - future.result() - self.print_progress('\n') - - def print_progress(self, end='\r'): - text = self.text - if self.units_total: - text = f'{text} ({self.units_done}/{self.units_total}) [{floor(self.units_done/self.units_total*100)} %]' - print(f'\x1b[K{text}', end=end) - -class ActionQueue: - def __init__(self): - self.queue = [] - - def download_image(self, image): - self.queue.append(ActionItem(f'Downloading image {image.name}', image.download)) - self.queue.append(ActionItem(f'Unpacking image {image.name}', image.unpack_downloaded)) - - def delete_image(self, image): - self.queue.append(ActionItem(f'Deleting image {image.name}', image.delete)) - - def install_app(self, app): - self.queue.append(ActionItem(f'Downloading application {app.name}', app.download)) - self.queue.append(ActionItem(f'Unpacking application {app.name}', app.unpack_downloaded)) - self.queue.append(ActionItem(f'Installing application {app.name}', app.install)) - - def update_app(self, app): - self.queue.append(ActionItem(f'Downloading application {app.name}', app.download)) - self.queue.append(ActionItem(f'Unpacking application {app.name}', app.unpack_downloaded)) - self.queue.append(ActionItem(f'Updating application {app.name}', app.update)) - - def uninstall_app(self, app): - self.queue.append(ActionItem(f'Uninstalling application {app.name}', app.uninstall)) - - def start_app(self, app): - self.queue.append(ActionItem(f'Starting application {app.name}', app.start)) - - def stop_app(self, app): - self.queue.append(ActionItem(f'Stopping application {app.name}', app.stop)) - - def process(self): - index = 0 - queue_length = len(self.queue) - for item in self.queue: - index += 1 - item.text = f'[{index}/{queue_length}] {item.text}' - item.run() - -def readable_size(bytes): - i = 0 - while bytes > 1024: - i += 1 - bytes /= 1024 - return f'{bytes:.2f} {SIZE_PREFIXES[i]}B' - -def print_lock(pid): - with open(os.path.join('/proc', pid, 'cmdline')) as f: - cmdline = f.read().replace('\0', ' ').strip() - print(f'Waiting for lock currently held by process {pid} - {cmdline}') diff --git a/usr/lib/python3.8/spoc/config.py b/usr/lib/python3.8/spoc/config.py deleted file mode 100644 index 3a8f626..0000000 --- a/usr/lib/python3.8/spoc/config.py +++ /dev/null @@ -1,47 +0,0 @@ -# -*- coding: utf-8 -*- - -import configparser -import os -import urllib.parse - -CONFIG_FILE = '/etc/spoc/spoc.conf' - -config = configparser.ConfigParser() -config.read(CONFIG_FILE) - -NETWORK_INTERFACE = config.get('general', 'network-interface', fallback='spocbr0') -RESOLV_CONF = config.get('general', 'resolv-conf', fallback='/etc/resolv.conf') - -DATA_DIR = config.get('general', 'data-dir', fallback='/var/lib/spoc/') -APPS_DIR = os.path.join(DATA_DIR, 'apps/') -CONTAINERS_DIR = os.path.join(DATA_DIR, 'containers/') -LAYERS_DIR = os.path.join(DATA_DIR, 'layers/') -VOLUMES_DIR = os.path.join(DATA_DIR, 'volumes/') -HOSTS_FILE = os.path.join(DATA_DIR, 'hosts') -REPO_FILE = os.path.join(DATA_DIR, 'repository.json') - -LOCK_DIR = '/run/lock' -LOCK_FILE = os.path.join(LOCK_DIR, 'spoc.lock') -HOSTS_LOCK_FILE = os.path.join(LOCK_DIR, 'spoc-hosts.lock') -REPO_LOCK_FILE = os.path.join(LOCK_DIR, 'spoc-local.lock') - -TMP_DIR = os.path.join(DATA_DIR, 'tmp/') -TMP_APPS_DIR = os.path.join(TMP_DIR, 'apps/') -TMP_LAYERS_DIR = os.path.join(TMP_DIR, 'layers/') -LOG_DIR = config.get('general', 'log-dir', fallback='/var/log/spoc') - -PUB_DIR = config.get('publish', 'publish-dir', fallback=os.path.join(DATA_DIR, 'publish')) -PUB_LAYERS_DIR = os.path.join(PUB_DIR, 'layers/') -PUB_APPS_DIR = os.path.join(PUB_DIR, 'apps/') -PUB_REPO_FILE = os.path.join(PUB_DIR, 'repository.json') -PUB_SIG_FILE = os.path.join(PUB_DIR, 'repository.sig') -PUB_PRIVKEY_FILE = config.get('publish', 'signing-key', fallback='/etc/spoc/publish.key') -PUB_LOCK_FILE = os.path.join(LOCK_DIR, 'spoc-publish.lock') - -# URLs which are an actual directories need to end with trailing slash -ONLINE_BASE_URL = '{}/'.format(config.get('repo', 'url', fallback='https://localhost').rstrip('/')) -ONLINE_LAYERS_URL = urllib.parse.urljoin(ONLINE_BASE_URL, 'layers/') -ONLINE_APPS_URL = urllib.parse.urljoin(ONLINE_BASE_URL, 'apps/') -ONLINE_REPO_URL = urllib.parse.urljoin(ONLINE_BASE_URL, 'repository.json') -ONLINE_SIG_URL = urllib.parse.urljoin(ONLINE_BASE_URL, 'repository.sig') -ONLINE_PUBKEY = config.get('repo', 'public-key', fallback='') diff --git a/usr/lib/python3.8/spoc/container.py b/usr/lib/python3.8/spoc/container.py deleted file mode 100644 index 86215b1..0000000 --- a/usr/lib/python3.8/spoc/container.py +++ /dev/null @@ -1,279 +0,0 @@ -# -*- coding: utf-8 -*- - -import copy -import enum -import os -import shlex -import shutil -import subprocess -import time -from concurrent.futures import ThreadPoolExecutor - -from . import config, net, repo_local, templates -from .depsolver import DepSolver -from .exceptions import InvalidContainerStateError - -# States taken from https://github.com/lxc/lxc/blob/master/src/lxc/state.h -class ContainerState(enum.Enum): - STOPPED = 'STOPPED' - STARTING = 'STARTING' - RUNNING = 'RUNNING' - STOPPING = 'STOPPING' - ABORTING = 'ABORTING' - FREEZING = 'FREEZING' - FROZEN = 'FROZEN' - THAWED = 'THAWED' - UNKNOWN = 'UNKNOWN' - -DEFINITION_MEMBERS = {'build', 'depends', 'layers', 'mounts', 'env', 'uid', 'gid', 'cmd', 'cwd', 'ready', 'halt'} - -class Container: - def __init__(self, name, load_from_repo=True): - self.name = name - self.build = False - self.depends = [] - self.layers = [] - self.mounts = {} - self.env = {} - self.uid = None - self.gid = None - self.cmd = None - self.cwd = None - self.ready = None - self.halt = None - self.container_path = os.path.join(config.CONTAINERS_DIR, name) - self.config_path = os.path.join(self.container_path, 'config') - self.rootfs_path = os.path.join(self.container_path, 'rootfs') - self.olwork_path = os.path.join(self.container_path, 'olwork') - self.ephemeral_layer_path = os.path.join(self.container_path, 'ephemeral') - self.log_path = os.path.join(config.LOG_DIR, f'{name}.log') - if load_from_repo: - self.set_definition(repo_local.get_container(name)) - - def set_definition(self, definition): - # Set attributes given by definition - for key in DEFINITION_MEMBERS.intersection(definition): - setattr(self, key, definition[key]) - - def get_definition(self): - # Return shallow copy of container definition as dictionary - definition = {} - for key in DEFINITION_MEMBERS: - value = getattr(self, key) - if value: - definition[key] = copy.copy(value) - return definition - - def get_state(self): - # Get current state of the container, uses LXC monitor socket accessible only in ocntainer's namespace - try: - state = subprocess.run(['lxc-info', '-sH', '-P', config.CONTAINERS_DIR, self.name], capture_output=True, check=True) - return ContainerState[state.stdout.strip().decode()] - except subprocess.CalledProcessError: - return ContainerState.UNKNOWN - - def is_running(self): - # Convenience method to determine if the container is running - return self.get_state() == ContainerState.RUNNING - - def is_stopped(self): - # Convenience method to determine if the container is stopped - return self.get_state() == ContainerState.STOPPED - - def await_state(self, awaited_state): - # Block execution until the container reaches the desired state or until timeout - try: - subprocess.run(['lxc-wait', '-P', config.CONTAINERS_DIR, '-s', awaited_state.value, '-t', '30', self.name], check=True) - except subprocess.CalledProcessError: - # Sometimes LXC decides to return rc 1 even on successful state change - actual_state = self.get_state() - if actual_state != awaited_state: - raise InvalidContainerStateError(self.name, actual_state) - - def mount_rootfs(self): - # Prepares container rootfs - # Called in lxc.hook.pre-start as the standard mount options are insufficient for rootless containers (see notes for overlayfs below) - layers = [os.path.join(config.LAYERS_DIR, layer) for layer in self.layers] - if not self.build: - # Add ephemeral layer if the container is not created as part of build process - layers.append(self.ephemeral_layer_path) - if len(layers) > 1: - # Multiple layers require overlayfs, however non-root users don't normally have capability to create overlayfs mounts - https://www.spinics.net/lists/linux-fsdevel/msg105877.html - # Standard linux kernels currently doesn't support overlay mounts in user namespaces (lxc.hook.pre-mount) - # The exception is Ubuntu or custom patches such as https://salsa.debian.org/kernel-team/linux/blob/master/debian/patches/debian/overlayfs-permit-mounts-in-userns.patch - # Possible alternative is fuse-overlayfs, which doesn't work well on Alpine (and it's FUSE anyway, so it needs an extra service and a process for each mount) - # Another alternative would be to mount in the namespace via -N option, but LXC doesn't expose PID or namespaces of the process during container setup - overlay_opts = f'upperdir={layers[-1]},lowerdir={":".join(reversed(layers[:-1]))},workdir={self.olwork_path}' - subprocess.run(['mount', '-t', 'overlay', '-o', overlay_opts, 'none', self.rootfs_path]) - else: - # We only have a single layer, no overlay needed - subprocess.run(['mount', '--bind', layers[0], self.rootfs_path]) - - def unmount_rootfs(self): - # Recursively unmounts container rootfs - # Called in lxc.hook.post-stop - # For unprivileged containers it could theoretically be called already in lxc.hook.start-host, as the user namespace clones the mounts, - # so they are not needed in the parent namespace anymore, but removing rootfs on container stop seems more intuitive - subprocess.run(['umount', '-R', self.rootfs_path], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) - - def clean_ephemeral_layer(self): - # Cleans container ephemeral layer. Called in lxc.hook.post-stop and lxc.hook.pre-start in case of unclean shutdown - # This is done early in the container start process, so the inode of the ephemeral directory must remain unchanged - for item in os.scandir(self.ephemeral_layer_path): - shutil.rmtree(item.path) if item.is_dir() else os.unlink(item.path) - - def get_mount_entry(self, volume, mountpoint): - mount_type = 'dir' - if mountpoint.endswith(':file'): - mount_type = 'file' - mountpoint = mountpoint[:-5] - return f'lxc.mount.entry = {os.path.join(config.VOLUMES_DIR, volume)} {mountpoint} none bind,create={mount_type} 0 0' - - def create(self): - # Create container directories - os.makedirs(self.rootfs_path, 0o755, True) - os.makedirs(self.olwork_path, 0o755, True) - os.makedirs(self.ephemeral_layer_path, 0o755, True) - os.makedirs(config.LOG_DIR, 0o750, True) - # Change UID/GID of the ephemeral layer directory - # Chown is possible only when the process is running as root, for user namespaces, see https://linuxcontainers.org/lxc/manpages/man1/lxc-usernsexec.1.html - os.chown(self.ephemeral_layer_path, 100000, 100000) - # Create container configuration file based on the container definition - mounts = '\n'.join([self.get_mount_entry(v, m) for v,m in self.mounts.items()]) - env = '\n'.join([f'lxc.environment = {k}={v}' for k,v in self.env.items()]) - uid = self.uid if self.uid else 0 - gid = self.gid if self.gid else 0 - cmd = self.cmd if self.cmd else '/sbin/init' - cwd = self.cwd if self.cwd else '/' - halt = self.halt if self.halt else 'SIGINT' - ip_address, ip_netmask, ip_gateway = net.request_ip(self.name) - # Write LXC configuration file - with open(self.config_path, 'w') as f: - f.write(templates.LXC_CONTAINER_TEMPLATE.format(name=self.name, - interface=config.NETWORK_INTERFACE, - resolv_conf=config.RESOLV_CONF, - ip_address=ip_address, - ip_netmask=ip_netmask, - ip_gateway=ip_gateway, - rootfs=self.rootfs_path, - hosts=config.HOSTS_FILE, - mounts=mounts, - env=env, - uid=uid, - gid=gid, - cmd=cmd, - cwd=cwd, - halt=halt, - log=self.log_path)) - repo_local.register_container(self.name, self.get_definition()) - - def destroy(self): - repo_local.unregister_container(self.name) - self.unmount_rootfs() - try: - shutil.rmtree(self.container_path) - except FileNotFoundError: - pass - try: - os.unlink(self.log_path) - except FileNotFoundError: - pass - # Release the IP address from global hosts configuration - net.release_ip(self.name) - - def start(self, command=None): - # Start the container including its dependencies - depsolver = DepSolver() - self.get_start_dependencies(depsolver) - for dependency in depsolver.solve(): - if not dependency.is_running(): - # Pass start command only to the current container - dependency.do_start(command if dependency.name == self.name else None) - - def do_start(self, command=None): - cmd = ['--']+command if command else [] - # Start the current container, wait until it is reported as started and execute application readiness check - subprocess.Popen(['lxc-start', '-P', config.CONTAINERS_DIR, self.name]+cmd) - self.await_state(ContainerState.RUNNING) - # Launch the readiness check in a separate thread, so it can be reliably cancelled after timeout - with ThreadPoolExecutor(max_workers=1) as pool: - # Create anonymous object to pass the task cancellation information - guard = type('', (object,), {'cancel': False})() - future = pool.submit(self.check_readiness, guard) - future.result(timeout=30) - guard.cancel = True - - def check_readiness(self, guard): - # Run spoc.init.ready until it returns return code 0 or the guard cancels the loop - ready_cmd = shlex.split(self.ready) if self.ready else ['/bin/true'] - while not guard.cancel: - state = self.get_state() - if state != ContainerState.RUNNING: - raise InvalidContainerStateError(self.name, state) - check = subprocess.run(['lxc-attach', '-P', config.CONTAINERS_DIR, '--clear-env', self.name, '--']+ready_cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, timeout=30) - if check.returncode == 0: - break - time.sleep(0.25) - - def stop(self): - # Stop the containers depending on the current cotnainer - depsolver = DepSolver() - self.get_stop_dependencies(depsolver) - for dependency in depsolver.solve(): - if not dependency.is_stopped(): - dependency.do_stop() - - def do_stop(self): - # Stop the current container and wait until it stops completely - lxc_stop = subprocess.Popen(['lxc-stop', '-P', config.CONTAINERS_DIR, self.name]) - self.await_state(ContainerState.STOPPED) - # Reap the lxc-stop process - lxc_stop.wait() - - def execute(self, cmd, uid=None, gid=None, **kwargs): - # If the container is starting or stopping, wait until the operation is finished - state = self.get_state() - if state == ContainerState.STARTING: - self.await_state(ContainerState.RUNNING) - state = self.get_state() - elif state == ContainerState.STOPPING: - self.await_state(ContainerState.STOPPED) - state = self.get_state() - # Resolve UID/GID, if they have been given - uidgid_param = [] - uid,gid = self.get_uidgid(uid, gid) - if uid: - uidgid_param.extend(('-u', uid)) - if gid: - uidgid_param.extend(('-g', gid)) - # If the container is stopped, use lxc-execute, otherwise use lxc-attach - if state == ContainerState.STOPPED: - return subprocess.run(['lxc-execute', '-P', config.CONTAINERS_DIR]+uidgid_param+[self.name, '--']+cmd, **kwargs) - elif state == ContainerState.RUNNING: - return subprocess.run(['lxc-attach', '-P', config.CONTAINERS_DIR, '--clear-env']+uidgid_param+[self.name, '--']+cmd, **kwargs) - else: - raise InvalidContainerStateError(self.name, state) - - def get_uidgid(self, user=None, group=None): - # Helper function to get UID/GID of an user/group from the container - uid,gid = None,None - if user: - uid_entry = self.execute(['/usr/bin/getent', 'passwd', user], capture_output=True, check=True).stdout.decode().split(':') - uid,gid = uid_entry[2],uid_entry[3] - if group: - gid = self.execute(['/usr/bin/getent', 'group', group], capture_output=True, check=True).stdout.decode().split(':')[2] - return (uid,gid) - - def get_start_dependencies(self, depsolver): - depsolver.add(self.name, self.depends, self) - for dependency in self.depends: - Container(dependency).get_start_dependencies(depsolver) - - def get_stop_dependencies(self, depsolver): - reverse_depends = [] - for name, definition in repo_local.get_containers().items(): - if 'depends' in definition and self.name in definition['depends']: - reverse_depends.append(name) - depsolver.add(self.name, reverse_depends, self) - for dependency in reverse_depends: - Container(dependency).get_stop_dependencies(depsolver) diff --git a/usr/lib/python3.8/spoc/depsolver.py b/usr/lib/python3.8/spoc/depsolver.py deleted file mode 100644 index d0587d3..0000000 --- a/usr/lib/python3.8/spoc/depsolver.py +++ /dev/null @@ -1,36 +0,0 @@ -# -*- coding: utf-8 -*- - -from .exceptions import CircularDependencyError - -class Node: - def __init__(self, name, depends, instance): - self.name = name - # Remove the node from its own dependencies - self.depends = set(depends) - {name} - self.instance = instance - -class DepSolver: - def __init__(self): - self.nodes = [] - - def add(self, name, depends, instance): - self.nodes.append(Node(name, depends, instance)) - - def solve(self): - # Returns a list of instances ordered by dependency - deps = {node.name: node for node in self.nodes} - result = [] - while deps: - # Get a batch of nodes not depending on anything (or originally depending on already resolved nodes) - batch = {name for name, node in deps.items() if not node.depends} - if not batch: - # If there are no such nodes, we have found a circular dependency - raise CircularDependencyError(deps) - # Add instances tied to the resolved keys to the result and remove resolved keys from the dependecy map - for name in batch: - result.append(deps[name].instance) - del deps[name] - # Remove resolved keys from the dependencies of yet unresolved nodes - for node in deps.values(): - node.depends -= batch - return result diff --git a/usr/lib/python3.8/spoc/exceptions.py b/usr/lib/python3.8/spoc/exceptions.py deleted file mode 100644 index c0c57b4..0000000 --- a/usr/lib/python3.8/spoc/exceptions.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- coding: utf-8 -*- - -class AppNotFoundError(Exception): - def __init__(self, name): - self.name = name - - def __str__(self): - return f'Application {self.name} not found' - -class ContainerNotFoundError(Exception): - def __init__(self, name): - self.name = name - - def __str__(self): - return f'Container {self.name} not found' - -class ImageNotFoundError(Exception): - def __init__(self, name): - self.name = name - - def __str__(self): - return f'Image {self.name} not found' - -class InvalidContainerStateError(Exception): - # Container is not in expected state (running, stopped etc.) - def __init__(self, container_name, container_state): - self.container_name = container_name - self.container_state = container_state - - def __str__(self): - return f'Container "{self.container_name}" reached unexpected state {self.container_state}' - -class CircularDependencyError(Exception): - # Dependecy solver has found a circular dependency between nodes - def __init__(self, deps): - self.deps = deps - - def __str__(self): - result = ['Dependency resolution failed due to circular dependency. Dumping unresolved dependencies:'] - result.extend(f'{dep} => {node.depends}' for dep, node in self.deps.items()) - return '\n'.join(result) diff --git a/usr/lib/python3.8/spoc/image.py b/usr/lib/python3.8/spoc/image.py deleted file mode 100644 index 430228b..0000000 --- a/usr/lib/python3.8/spoc/image.py +++ /dev/null @@ -1,99 +0,0 @@ -# -*- coding: utf-8 -*- - -import copy -import os -import shutil -import tarfile -import urllib.parse - -from . import config, repo_local, repo_online, repo_publish - -DEFINITION_MEMBERS = {'layers', 'env', 'uid', 'gid', 'cmd', 'cwd', 'ready', 'halt'} - -class Image: - def __init__(self, name, load_from_repo=True): - self.name = name - self.layer_path = os.path.join(config.LAYERS_DIR, name) - self.layers = [name] - self.env = {} - self.uid = None - self.gid = None - self.cmd = None - self.cwd = None - self.ready = None - self.halt = None - if load_from_repo: - self.set_definition(repo_local.get_image(name)) - - def set_definition(self, definition): - # Set attributes given by definition - for key in DEFINITION_MEMBERS.intersection(definition): - setattr(self, key, definition[key]) - - def get_definition(self): - # Return shallow copy of image definition as dictionary - definition = {} - for key in DEFINITION_MEMBERS: - value = getattr(self, key) - if value: - definition[key] = copy.copy(value) - return definition - - def create(self, imagebuilder, filename): - # Build the container from image file and save to local repository - # Chown is possible only when the process is running as root, for user namespaces, see https://linuxcontainers.org/lxc/manpages/man1/lxc-usernsexec.1.html - os.makedirs(self.layer_path, 0o755, True) - os.chown(self.layer_path, 100000, 100000) - imagebuilder.build(self, filename) - repo_local.register_image(self.name, self.get_definition()) - - def delete(self, observer=None): - # Remove the layer from local repository and filesystem - repo_local.unregister_image(self.name) - try: - shutil.rmtree(self.layer_path) - except FileNotFoundError: - pass - - def download(self, observer=None): - # Download the archive with layer data - os.makedirs(config.TMP_LAYERS_DIR, 0o700, True) - archive_url = urllib.parse.urljoin(config.ONLINE_LAYERS_URL, f'{self.name}.tar.xz') - archive_path = os.path.join(config.TMP_LAYERS_DIR, f'{self.name}.tar.xz') - definition = repo_online.get_image(self.name) - if observer: - observer.units_total = definition['dlsize'] - repo_online.download_archive(archive_url, archive_path, definition['hash'], observer) - - def unpack_downloaded(self, observer=None): - # Unpack downloaded archive with layer data - archive_path = os.path.join(config.TMP_LAYERS_DIR, f'{self.name}.tar.xz') - definition = repo_online.get_image(self.name) - if observer: - observer.units_total = definition['size'] - repo_online.unpack_archive(archive_path, config.LAYERS_DIR, definition['hash'], observer) - self.set_definition(definition) - repo_local.register_image(self.name, definition) - - def publish(self): - # Create layer archive and register to publish repository - os.makedirs(config.PUB_LAYERS_DIR, 0o755, True) - files = repo_publish.TarSizeCounter() - archive_path = os.path.join(config.PUB_LAYERS_DIR, f'{self.name}.tar.xz') - with tarfile.open(archive_path, 'w:xz') as tar: - tar.add(self.layer_path, self.name, filter=files.add_file) - definition = self.get_definition() - definition['size'] = files.size - definition['dlsize'] = os.path.getsize(archive_path) - definition['hash'] = repo_publish.sign_file(archive_path).hex() - repo_publish.register_image(self.name, definition) - return (definition['size'], definition['dlsize']) - - def unpublish(self): - # Remove the layer from publish repository - repo_publish.unregister_image(self.name) - archive_path = os.path.join(config.PUB_LAYERS_DIR, f'{self.name}.tar.xz') - try: - os.unlink(archive_path) - except FileNotFoundError: - pass diff --git a/usr/lib/python3.8/spoc/imagebuilder.py b/usr/lib/python3.8/spoc/imagebuilder.py deleted file mode 100644 index 37b9322..0000000 --- a/usr/lib/python3.8/spoc/imagebuilder.py +++ /dev/null @@ -1,177 +0,0 @@ -# -*- coding: utf-8 -*- - -import os -import requests -import shutil -import stat -import tarfile -import tempfile -import zipfile - -from .container import Container -from .image import Image - -class ImageBuilder: - def build(self, image, filename): - # Reset internal state, read and process lines from filename - self.image = image - self.builddir = os.path.dirname(filename) - self.script_eof = None - self.script_lines = [] - with open(filename, 'r') as f: - for line in f: - self.process_line(line.strip()) - - def process_line(self, line): - # Parse a line from image file - if self.script_eof: - if line == self.script_eof: - self.script_eof = None - self.run_script(self.script_lines) - else: - self.script_lines.append(line) - elif line: - self.process_directive(*line.split(None, 1)) - - def process_directive(self, directive, args): - # Process a directive from image file - if 'RUN' == directive: - self.script_lines = [] - self.script_eof = args - elif 'FROM' == directive: - # Set the values of image from which this one inherits - self.image.set_definition(Image(args).get_definition()) - self.image.layers.append(self.image.name) - elif 'COPY' == directive: - srcdst = args.split() - self.copy_files(srcdst[0], srcdst[1] if len(srcdst) > 1 else '') - elif 'ENV' == directive: - # Sets/unsets environment variable - self.set_env(*args.split(None, 1)) - elif 'USER' == directive: - # Sets init UID / GID - self.set_uidgid(*args.split()) - elif 'CMD' == directive: - # Sets init command - self.image.cmd = args - elif 'WORKDIR' == directive: - # Sets init working directory - self.image.cwd = args - elif 'HALT' == directive: - # Sets signal to be sent to init when stopping the container - self.image.halt = args - elif 'READY' == directive: - # Sets a command to check readiness of the container after it has been started - self.image.ready = args - - def run_script(self, script_lines): - # Creates a temporary container, runs a script in its namespace, and stores the files modified by it as part of the layer - # Note: If USER or WORKDIR directive has already been set, the command is run under that UID/GID or working directory - script_fd, script_path = tempfile.mkstemp(suffix='.sh', dir=self.image.layer_path, text=True) - script_name = os.path.basename(script_path) - script_lines = '\n'.join(script_lines) - with os.fdopen(script_fd, 'w') as script: - script.write(f'#!/bin/sh\nset -ev\n\n{script_lines}\n') - os.chmod(script_path, 0o755) - os.chown(script_path, 100000, 100000) - # Create a temporary container from the current image definition and execute the script within the container - container = Container(self.image.name, False) - container.set_definition(self.image.get_definition()) - container.build = True - container.create() - container.execute(['/bin/sh', '-lc', os.path.join('/', script_name)], check=True) - container.destroy() - os.unlink(script_path) - - def set_env(self, key, value=None): - # Set or unset environement variable - if value: - self.image.env[key] = value - else: - try: - del self.image.env[key] - except KeyError: - pass - - def set_uidgid(self, uid, gid=''): - # Set UID/GID for init - if not uid.isdigit() or not gid.isdigit(): - # Resolve the UID/GID from container if either of them is entered as string - container = Container(self.image.name, False) - container.set_definition(self.image.get_definition()) - container.create() - uid,gid = container.get_uidgid(uid, gid) - container.destroy() - self.image.uid = uid - self.image.gid = gid - - def copy_files(self, src, dst): - # Copy files from the host or download them from a http(s) URL - dst = os.path.join(self.image.layer_path, dst.lstrip('/')) - if src.startswith('http://') or src.startswith('https://'): - unpack_http_archive(src, dst) - else: - src = os.path.join(self.builddir, src) - if os.path.isdir(src): - copy_tree(src, dst) - else: - shutil.copy2(src, dst) - # Shift UID/GID of the files to the unprivileged range - shift_uid(dst, os.stat(dst, follow_symlinks=False)) - -def unpack_http_archive(src, dst): - # Decompress an archive downloaded via http(s) - with tempfile.TemporaryFile() as tmp_archive: - # Download the file via http(s) and store as temporary file - with requests.Session() as session: - resource = session.get(src, stream=True) - resource.raise_for_status() - for chunk in resource.iter_content(chunk_size=None): - if chunk: - tmp_archive.write(chunk) - # Check if the magic bytes and determine if the file is zip - tmp_archive.seek(0) - is_zip = zipfile.is_zipfile(tmp_archive) - # Extract the file. If it is not zip, assume tar (bzip2, gizp or xz) - tmp_archive.seek(0) - if is_zip: - with zipfile.ZipFile(tmp_archive) as zip: - zip.extractall(dst) - else: - with tarfile.open(fileobj=tmp_archive) as tar: - tar.extractall(dst, numeric_owner=True) - -def copy_tree(src, dst): - # Copy directory tree from host to container, leaving the existing modes and attributed unchanged, - # which is crucial e.g. whenever anything is copied into /tmp - # This function is a stripped and customized variant of shutil.copytree() - for srcentry in os.scandir(src): - dstname = os.path.join(dst, srcentry.name) - is_new = not os.path.exists(dstname) - if srcentry.is_dir(): - if is_new: - os.mkdir(dstname) - copy_tree(srcentry, dstname) - else: - shutil.copy2(srcentry, dstname) - if is_new: - shutil.copystat(srcentry, dstname, follow_symlinks=False) - -def shift_uid(path, path_stat): - # Shifts UID/GID of a file or a directory and its contents to the unprivileged range - # The function parameters could arguably be more friendly, but os.scandir() already calls stat() on the entires, - # so it would be wasteful to not reuse them for considerable performance gain - uid = path_stat.st_uid - gid = path_stat.st_gid - do_chown = False - if uid < 100000: - uid = uid + 100000 - do_chown = True - if gid < 100000: - gid = gid + 100000 - do_chown = True - if do_chown: - os.chown(path, uid, gid, follow_symlinks=False) - if stat.S_ISDIR(path_stat.st_mode): - for entry in os.scandir(path): - shift_uid(entry.path, entry.stat(follow_symlinks=False)) diff --git a/usr/lib/python3.8/spoc/net.py b/usr/lib/python3.8/spoc/net.py deleted file mode 100644 index b85aabf..0000000 --- a/usr/lib/python3.8/spoc/net.py +++ /dev/null @@ -1,81 +0,0 @@ -# -*- coding: utf-8 -*- - -import fcntl -import ipaddress -import os -import socket -import struct - -from . import config -from .flock import locked - -# ioctl magic constants taken from https://git.musl-libc.org/cgit/musl/tree/include/sys/ioctl.h (same as glibc) -IOCTL_SIOCGIFADDR = 0x8915 -IOCTL_SIOCGIFNETMASK = 0x891b - -leases = {} -mtime = None - -@locked(config.HOSTS_LOCK_FILE) -def load_leases(): - # Read and parse all IP-hostname pairs from the global hosts file - global leases - global mtime - try: - file_mtime = os.stat(config.HOSTS_FILE).st_mtime - if mtime != file_mtime: - with open(config.HOSTS_FILE, 'r') as f: - leases = [lease.strip().split(None, 1) for lease in f] - leases = {ip: hostname for ip, hostname in leases} - mtime = file_mtime - except FileNotFoundError: - # If the file doesn't exist, create it with localhost and container host as default records - interface = get_bridge_interface() - leases = {'127.0.0.1': 'localhost', str(interface.ip): 'host'} - -@locked(config.HOSTS_LOCK_FILE) -def save_leases(): - # write all IP-hostname pairs to the global hosts file - global mtime - with open(config.HOSTS_FILE, 'w') as f: - for ip,hostname in sorted(leases.items(), key=lambda lease: socket.inet_aton(lease[0])): - f.write(f'{ip} {hostname}\n') - mtime = os.stat(config.HOSTS_FILE).st_mtime - -def get_bridge_interface(): - # Returns bridge interface's IP address and netmask - with socket.socket(socket.AF_INET) as sock: - # Get IPv4Interface for given interface name - packed_ifname = struct.pack('256s', config.NETWORK_INTERFACE.encode()) - ip = socket.inet_ntoa(fcntl.ioctl(sock.fileno(), IOCTL_SIOCGIFADDR, packed_ifname)[20:24]) - netmask = socket.inet_ntoa(fcntl.ioctl(sock.fileno(), IOCTL_SIOCGIFNETMASK, packed_ifname)[20:24]) - return ipaddress.IPv4Interface(f'{ip}/{netmask}') - -def get_ip(container_name): - load_leases() - for ip,hostname in leases.items(): - if hostname == container_name: - return ip - return None - -def request_ip(container_name): - # Find if and IP hasn't been leased for the hostname - interface = get_bridge_interface() - load_leases() - for ip in leases: - if leases[ip] == container_name: - return (ip, str(interface.network.prefixlen), str(interface.ip)) - # If not, get the first unassigned IP from the interface's network - for ip in interface.network.hosts(): - ip = str(ip) - if ip not in leases: - leases[ip] = container_name - save_leases() - return (ip, str(interface.network.prefixlen), str(interface.ip)) - -def release_ip(container_name): - # Delete the lease from hosts file - global leases - load_leases() - leases = {ip: h for ip, h in leases.items() if h != container_name} - save_leases() diff --git a/usr/lib/python3.8/spoc/repo_local.py b/usr/lib/python3.8/spoc/repo_local.py deleted file mode 100644 index 5ffb5ca..0000000 --- a/usr/lib/python3.8/spoc/repo_local.py +++ /dev/null @@ -1,96 +0,0 @@ -# -*- coding: utf-8 -*- - -import fcntl -import json -import os - -from . import config -from .exceptions import AppNotFoundError, ContainerNotFoundError, ImageNotFoundError -from .flock import locked - -TYPE_APP = 'apps' -TYPE_CONTAINER = 'containers' -TYPE_IMAGE = 'images' - -data = {TYPE_IMAGE: {}, TYPE_CONTAINER: {}, TYPE_APP: {}} -mtime = 0 - -def load(): - global data - global mtime - try: - file_mtime = os.stat(config.REPO_FILE).st_mtime - if mtime != file_mtime: - with open(config.REPO_FILE) as f: - data = json.load(f) - mtime = file_mtime - except FileNotFoundError: - pass - -def save(): - global mtime - with open(config.REPO_FILE, 'w') as f: - json.dump(data, f, sort_keys=True, indent=4) - mtime = os.stat(config.REPO_FILE).st_mtime - -@locked(config.REPO_LOCK_FILE) -def get_entries(entry_type): - load() - return data[entry_type] - -def get_entry(entry_type, name, exception): - try: - return get_entries(entry_type)[name] - except KeyError as e: - raise exception(name) from e - -@locked(config.REPO_LOCK_FILE) -def add_entry(entry_type, name, definition): - load() - data[entry_type][name] = definition - save() - -@locked(config.REPO_LOCK_FILE) -def delete_entry(entry_type, name): - load() - try: - del data[entry_type][name] - save() - except KeyError: - pass - -def get_images(): - return get_entries(TYPE_IMAGE) - -def get_image(image_name): - return get_entry(TYPE_IMAGE, image_name, ImageNotFoundError) - -def register_image(image_name, definition): - add_entry(TYPE_IMAGE, image_name, definition) - -def unregister_image(image_name): - delete_entry(TYPE_IMAGE, image_name) - -def get_containers(): - return get_entries(TYPE_CONTAINER) - -def get_container(container_name): - return get_entry(TYPE_CONTAINER, container_name, ContainerNotFoundError) - -def register_container(container_name, definition): - add_entry(TYPE_CONTAINER, container_name, definition) - -def unregister_container(container_name): - delete_entry(TYPE_CONTAINER, container_name) - -def get_apps(): - return get_entries(TYPE_APP) - -def get_app(app_name): - return get_entry(TYPE_APP, app_name, AppNotFoundError) - -def register_app(app_name, definition): - add_entry(TYPE_APP, app_name, definition) - -def unregister_app(app_name): - delete_entry(TYPE_APP, app_name) diff --git a/usr/lib/python3.8/spoc/repo_online.py b/usr/lib/python3.8/spoc/repo_online.py deleted file mode 100644 index 4b136fc..0000000 --- a/usr/lib/python3.8/spoc/repo_online.py +++ /dev/null @@ -1,131 +0,0 @@ -# -*- coding: utf-8 -*- - -import json -import os -import requests -import shutil -import tarfile -import time -from cryptography.exceptions import InvalidSignature -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives import hashes -from cryptography.hazmat.primitives.asymmetric import ec, utils -from cryptography.hazmat.primitives.serialization import load_pem_public_key - -from . import config -from .exceptions import AppNotFoundError, ImageNotFoundError - -TYPE_APP = 'apps' -TYPE_IMAGE = 'images' - -public_key = None - -def get_public_key(): - global public_key - if not public_key: - pem = f'-----BEGIN PUBLIC KEY-----\n{config.ONLINE_PUBKEY}\n-----END PUBLIC KEY-----' - public_key = load_pem_public_key(pem.encode(), default_backend()) - return public_key - -def verify_fileobj(fileobj, expected_hash): - hasher = hashes.Hash(hashes.SHA512(), default_backend()) - while True: - data = fileobj.read(64*1024) - if not data: - break - hasher.update(data) - get_public_key().verify(bytes.fromhex(expected_hash), hasher.finalize(), ec.ECDSA(utils.Prehashed(hashes.SHA512()))) - -def download_archive(archive_url, archive_path, expected_hash, observer=None): - # Check if an archive needs to be downloaded via http(s) - do_download = True - # If the file already exists in the temporary directory, verify the signature - if os.path.exists(archive_path): - try: - with open(archive_path, 'rb') as f: - verify_fileobj(f, expected_hash) - # If the signature matches, skip download - if observer: - observer.units_done = os.path.getsize(archive_path) - do_download = False - except InvalidSignature: - # If the signature is invalid, redownload the file - pass - if do_download: - do_download_archive(archive_url, archive_path, expected_hash, observer) - -def do_download_archive(archive_url, archive_path, expected_hash, observer=None): - # Download archive via http(s) and store in temporary directory - with open(archive_path, 'wb') as f, requests.Session() as session: - resource = session.get(archive_url, stream=True) - resource.raise_for_status() - if observer: - for chunk in resource.iter_content(chunk_size=64*1024): - if chunk: - observer.units_done += f.write(chunk) - else: - for chunk in resource.iter_content(chunk_size=64*1024): - if chunk: - f.write(chunk) - -def unpack_archive(archive_path, destination, expected_hash, observer): - with open(archive_path, 'rb') as f: - # Verify file object, then seek back and open it as tar without losing handle, preventing possible malicious race conditions - verify_fileobj(f, expected_hash) - f.seek(0) - # Remove the target directory, if it exists from previous failed installation - dst_dir = os.path.join(destination, os.path.basename(archive_path)[:-7]) - try: - shutil.rmtree(dst_dir) - except FileNotFoundError: - pass - # Extract the tar members while counting their size - # If this is done as non-root, extractall() from https://github.com/python/cpython/blob/master/Lib/tarfile.py needs to be reimplemented instead - tar = tarfile.open(fileobj=f) - if observer: - for tarinfo in tar: - tar.extract(tarinfo, destination, numeric_owner=True) - observer.units_done += tarinfo.size - else: - tar.extractall(destination, numeric_owner=True) - # Remove the archive - os.unlink(archive_path) - -data = None - -def load(force=False): - # Download package manifest and signature and verify - global data - if not data or force: - with requests.Session() as session: - resource = session.get(config.ONLINE_REPO_URL, timeout=5) - resource.raise_for_status() - packages = resource.content - resource = session.get(config.ONLINE_SIG_URL, timeout=5) - resource.raise_for_status() - packages_sig = resource.content - # Raises cryptography.exceptions.InvalidSignature on verification failure - get_public_key().verify(packages_sig, packages, ec.ECDSA(hashes.SHA512())) - data = json.loads(packages.decode()) - -def get_entries(entry_type): - load() - return data[entry_type] - -def get_entry(entry_type, name, exception): - try: - return get_entries(entry_type)[name] - except KeyError as e: - raise exception(name) from e - -def get_images(): - return get_entries(TYPE_IMAGE) - -def get_image(image_name): - return get_entry(TYPE_IMAGE, image_name, ImageNotFoundError) - -def get_apps(): - return get_entries(TYPE_APP) - -def get_app(app_name): - return get_entry(TYPE_APP, app_name, AppNotFoundError) diff --git a/usr/lib/python3.8/spoc/repo_publish.py b/usr/lib/python3.8/spoc/repo_publish.py deleted file mode 100644 index fe1a89b..0000000 --- a/usr/lib/python3.8/spoc/repo_publish.py +++ /dev/null @@ -1,114 +0,0 @@ -# -*- coding: utf-8 -*- - -import fcntl -import json -import os -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives import hashes -from cryptography.hazmat.primitives.asymmetric import ec, utils -from cryptography.hazmat.primitives.serialization import load_pem_private_key - -from . import config -from .exceptions import AppNotFoundError, ImageNotFoundError -from .flock import locked - -TYPE_APP = 'apps' -TYPE_IMAGE = 'images' - -class TarSizeCounter: - def __init__(self): - self.size = 0 - - def add_file(self, tarinfo): - self.size += tarinfo.size - return tarinfo - -def sign_file(file_path): - # Generate ECDSA HMAC SHA512 signature of a file using EC private key - sha512 = hashes.SHA512() - hasher = hashes.Hash(sha512, default_backend()) - with open(file_path, 'rb') as f: - while True: - data = f.read(64*1024) - if not data: - break - hasher.update(data) - with open(config.PUB_PRIVKEY_FILE, 'rb') as f: - private_key = load_pem_private_key(f.read(), None, default_backend()) - return private_key.sign(hasher.finalize(), ec.ECDSA(utils.Prehashed(sha512))) - -data = {TYPE_IMAGE: {}, TYPE_APP: {}} -mtime = 0 - -def load(): - global data - global mtime - try: - file_mtime = os.stat(config.PUB_REPO_FILE).st_mtime - if mtime != file_mtime: - with open(config.PUB_REPO_FILE) as f: - data = json.load(f) - mtime = file_mtime - except FileNotFoundError: - pass - -def save(): - global mtime - # Open the repository file in read + write mode using exclusive lock - with open(config.PUB_REPO_FILE, 'w') as f: - json.dump(data, f, sort_keys=True, indent=4) - mtime = os.stat(config.PUB_REPO_FILE).st_mtime - # Cryptographically sign the repository file - signature = sign_file(config.PUB_REPO_FILE) - with open(config.PUB_SIG_FILE, 'wb') as f: - f.write(signature) - -@locked(config.PUB_LOCK_FILE) -def get_entries(entry_type): - load() - return data[entry_type] - -def get_entry(entry_type, name, exception): - try: - return get_entries(entry_type)[name] - except KeyError as e: - raise exception(name) from e - -@locked(config.PUB_LOCK_FILE) -def add_entry(entry_type, name, definition): - load() - data[entry_type][name] = definition - save() - -@locked(config.PUB_LOCK_FILE) -def delete_entry(entry_type, name): - load() - try: - del data[entry_type][name] - save() - except KeyError: - pass - -def get_images(): - return get_entries(TYPE_IMAGE) - -def get_image(image_name): - return get_entry(TYPE_IMAGE, image_name, ImageNotFoundError) - -def register_image(image_name, definition): - add_entry(TYPE_IMAGE, image_name, definition) - -def unregister_image(image_name): - delete_entry(TYPE_IMAGE, image_name) - -def get_apps(): - return get_entries(TYPE_APP) - -def get_app(app_name): - return get_entry(TYPE_APP, app_name, ImageNotFoundError) - -def register_app(app_name, definition): - add_entry(TYPE_APP, app_name, definition) - -def unregister_app(app_name): - delete_entry(TYPE_APP, app_name) diff --git a/usr/lib/python3.8/spoc/templates.py b/usr/lib/python3.8/spoc/templates.py deleted file mode 100644 index 34634ee..0000000 --- a/usr/lib/python3.8/spoc/templates.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- - -LXC_CONTAINER_TEMPLATE = '''# Container name -lxc.uts.name = {name} - -# Network -lxc.net.0.type = veth -lxc.net.0.link = {interface} -lxc.net.0.flags = up -lxc.net.0.ipv4.address = {ip_address}/{ip_netmask} -lxc.net.0.ipv4.gateway = {ip_gateway} - -# Root filesystem -lxc.rootfs.path = {rootfs} - -# Mounts -lxc.mount.entry = shm dev/shm tmpfs rw,nodev,noexec,nosuid,relatime,mode=1777,create=dir 0 0 -lxc.mount.entry = {resolv_conf} etc/resolv.conf none bind,ro,create=file 0 0 -lxc.mount.entry = {hosts} etc/hosts none bind,ro,create=file 0 0 -{mounts} - -# Environment -lxc.environment = PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin -{env} - -# Init -lxc.init.uid = {uid} -lxc.init.gid = {gid} -lxc.init.cwd = {cwd} -lxc.init.cmd = {cmd} - -# Halt -lxc.signal.halt = {halt} - -# Log -lxc.console.size = 1MB -lxc.console.logfile = {log} - -# ID map -lxc.idmap = u 0 100000 65536 -lxc.idmap = g 0 100000 65536 - -# Hooks -lxc.hook.version = 1 -lxc.hook.pre-start = /usr/bin/spoc-hook -lxc.hook.post-stop = /usr/bin/spoc-hook - -# Other -lxc.arch = linux64 -lxc.include = /usr/share/lxc/config/common.conf -lxc.include = /usr/share/lxc/config/userns.conf -'''