Don't import separate config constants, import whole module in case the constants are not so constant
This commit is contained in:
parent
794c46969b
commit
42bdace8f6
@ -8,10 +8,7 @@ import subprocess
|
|||||||
import tarfile
|
import tarfile
|
||||||
import urllib.parse
|
import urllib.parse
|
||||||
|
|
||||||
from . import repo_local
|
from . import config, repo_local, repo_online, repo_publish
|
||||||
from . import repo_online
|
|
||||||
from . import repo_publish
|
|
||||||
from .config import APPS_DIR, ONLINE_APPS_URL, PUB_APPS_DIR, TMP_APPS_DIR, LAYERS_DIR, VOLUMES_DIR
|
|
||||||
from .container import Container
|
from .container import Container
|
||||||
from .image import Image
|
from .image import Image
|
||||||
|
|
||||||
@ -21,7 +18,7 @@ class App:
|
|||||||
def __init__(self, name, define_containers=True, load_from_repo=True):
|
def __init__(self, name, define_containers=True, load_from_repo=True):
|
||||||
self.name = name
|
self.name = name
|
||||||
self.version = None
|
self.version = None
|
||||||
self.app_dir = os.path.join(APPS_DIR, name)
|
self.app_dir = os.path.join(config.APPS_DIR, name)
|
||||||
self.meta = {}
|
self.meta = {}
|
||||||
self.autostart = False
|
self.autostart = False
|
||||||
self.containers = []
|
self.containers = []
|
||||||
@ -48,9 +45,9 @@ class App:
|
|||||||
|
|
||||||
def download(self, observer=None):
|
def download(self, observer=None):
|
||||||
# Download the archive with application scripts and install data
|
# Download the archive with application scripts and install data
|
||||||
os.makedirs(TMP_APPS_DIR, 0o700, True)
|
os.makedirs(config.TMP_APPS_DIR, 0o700, True)
|
||||||
archive_url = urllib.parse.urljoin(ONLINE_APPS_URL, f'{self.name}.tar.xz')
|
archive_url = urllib.parse.urljoin(config.ONLINE_APPS_URL, f'{self.name}.tar.xz')
|
||||||
archive_path = os.path.join(TMP_APPS_DIR, f'{self.name}.tar.xz')
|
archive_path = os.path.join(config.TMP_APPS_DIR, f'{self.name}.tar.xz')
|
||||||
definition = repo_online.get_app(self.name)
|
definition = repo_online.get_app(self.name)
|
||||||
if observer:
|
if observer:
|
||||||
observer.units_total = definition['dlsize']
|
observer.units_total = definition['dlsize']
|
||||||
@ -58,11 +55,11 @@ class App:
|
|||||||
|
|
||||||
def unpack_downloaded(self, observer=None):
|
def unpack_downloaded(self, observer=None):
|
||||||
# Unpack downloaded archive with application scripts and install data
|
# Unpack downloaded archive with application scripts and install data
|
||||||
archive_path = os.path.join(TMP_APPS_DIR, f'{self.name}.tar.xz')
|
archive_path = os.path.join(config.TMP_APPS_DIR, f'{self.name}.tar.xz')
|
||||||
definition = repo_online.get_app(self.name)
|
definition = repo_online.get_app(self.name)
|
||||||
if observer:
|
if observer:
|
||||||
observer.units_total = definition['size']
|
observer.units_total = definition['size']
|
||||||
repo_online.unpack_archive(archive_path, APPS_DIR, definition['hash'], observer)
|
repo_online.unpack_archive(archive_path, config.APPS_DIR, definition['hash'], observer)
|
||||||
|
|
||||||
def run_script(self, action):
|
def run_script(self, action):
|
||||||
# Runs script for an app, if the script is present
|
# Runs script for an app, if the script is present
|
||||||
@ -71,8 +68,10 @@ class App:
|
|||||||
if os.path.exists(script_path):
|
if os.path.exists(script_path):
|
||||||
# Run the script in its working directory, if there is one, so it doesn't have to figure out paths to packaged files
|
# Run the script in its working directory, if there is one, so it doesn't have to figure out paths to packaged files
|
||||||
env = os.environ.copy()
|
env = os.environ.copy()
|
||||||
env['LAYERS_DIR'] = LAYERS_DIR
|
env['LAYERS_DIR'] = config.LAYERS_DIR
|
||||||
env['VOLUMES_DIR'] = VOLUMES_DIR
|
env['VOLUMES_DIR'] = config.VOLUMES_DIR
|
||||||
|
env['APPS_DIR'] = config.APPS_DIR
|
||||||
|
env['LOG_DIR'] = config.LOG_DIR
|
||||||
cwd = script_dir if os.path.exists(script_dir) else self.app_dir
|
cwd = script_dir if os.path.exists(script_dir) else self.app_dir
|
||||||
subprocess.run(script_path, cwd=cwd, env=env, check=True)
|
subprocess.run(script_path, cwd=cwd, env=env, check=True)
|
||||||
|
|
||||||
@ -163,9 +162,9 @@ class App:
|
|||||||
def publish(self, filename):
|
def publish(self, filename):
|
||||||
# Create application archive and register to publish repository
|
# Create application archive and register to publish repository
|
||||||
builddir = os.path.dirname(filename)
|
builddir = os.path.dirname(filename)
|
||||||
os.makedirs(PUB_APPS_DIR, 0o755, True)
|
os.makedirs(config.PUB_APPS_DIR, 0o755, True)
|
||||||
files = repo_publish.TarSizeCounter()
|
files = repo_publish.TarSizeCounter()
|
||||||
archive_path = os.path.join(PUB_APPS_DIR, f'{self.name}.tar.xz')
|
archive_path = os.path.join(config.PUB_APPS_DIR, f'{self.name}.tar.xz')
|
||||||
with tarfile.open(archive_path, 'w:xz') as tar:
|
with tarfile.open(archive_path, 'w:xz') as tar:
|
||||||
for content in ('install', 'install.sh', 'update', 'update.sh', 'uninstall', 'uninstall.sh'):
|
for content in ('install', 'install.sh', 'update', 'update.sh', 'uninstall', 'uninstall.sh'):
|
||||||
content_path = os.path.join(builddir, content)
|
content_path = os.path.join(builddir, content)
|
||||||
@ -182,7 +181,7 @@ class App:
|
|||||||
def unpublish(self):
|
def unpublish(self):
|
||||||
# Remove the application from publish repository
|
# Remove the application from publish repository
|
||||||
repo_publish.unregister_app(self.name)
|
repo_publish.unregister_app(self.name)
|
||||||
archive_path = os.path.join(PUB_APPS_DIR, f'{self.name}.tar.xz')
|
archive_path = os.path.join(config.PUB_APPS_DIR, f'{self.name}.tar.xz')
|
||||||
try:
|
try:
|
||||||
os.unlink(archive_path)
|
os.unlink(archive_path)
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
|
@ -43,10 +43,4 @@ ONLINE_LAYERS_URL = urllib.parse.urljoin(ONLINE_BASE_URL, 'layers/')
|
|||||||
ONLINE_APPS_URL = urllib.parse.urljoin(ONLINE_BASE_URL, 'apps/')
|
ONLINE_APPS_URL = urllib.parse.urljoin(ONLINE_BASE_URL, 'apps/')
|
||||||
ONLINE_REPO_URL = urllib.parse.urljoin(ONLINE_BASE_URL, 'repository.json')
|
ONLINE_REPO_URL = urllib.parse.urljoin(ONLINE_BASE_URL, 'repository.json')
|
||||||
ONLINE_SIG_URL = urllib.parse.urljoin(ONLINE_BASE_URL, 'repository.sig')
|
ONLINE_SIG_URL = urllib.parse.urljoin(ONLINE_BASE_URL, 'repository.sig')
|
||||||
ONLINE_REPO_FILE = os.path.join(TMP_DIR, 'online.json')
|
|
||||||
ONLINE_PUBKEY = config.get('repo', 'public-key', fallback='')
|
ONLINE_PUBKEY = config.get('repo', 'public-key', fallback='')
|
||||||
|
|
||||||
# Repo entry types constants
|
|
||||||
TYPE_APP = 'apps'
|
|
||||||
TYPE_CONTAINER = 'containers'
|
|
||||||
TYPE_IMAGE = 'images'
|
|
||||||
|
@ -9,12 +9,9 @@ import subprocess
|
|||||||
import time
|
import time
|
||||||
from concurrent.futures import ThreadPoolExecutor
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
|
|
||||||
from . import network
|
from . import config, network, repo_local, templates
|
||||||
from . import repo_local
|
|
||||||
from .depsolver import DepSolver
|
from .depsolver import DepSolver
|
||||||
from .exceptions import InvalidContainerStateError
|
from .exceptions import InvalidContainerStateError
|
||||||
from .config import CONTAINERS_DIR, LAYERS_DIR, LOG_DIR, HOSTS_FILE, VOLUMES_DIR
|
|
||||||
from .templates import LXC_CONTAINER_TEMPLATE
|
|
||||||
|
|
||||||
# States taken from https://github.com/lxc/lxc/blob/master/src/lxc/state.h
|
# States taken from https://github.com/lxc/lxc/blob/master/src/lxc/state.h
|
||||||
class ContainerState(enum.Enum):
|
class ContainerState(enum.Enum):
|
||||||
@ -43,12 +40,12 @@ class Container:
|
|||||||
self.cwd = None
|
self.cwd = None
|
||||||
self.ready = None
|
self.ready = None
|
||||||
self.halt = None
|
self.halt = None
|
||||||
self.container_path = os.path.join(CONTAINERS_DIR, name)
|
self.container_path = os.path.join(config.CONTAINERS_DIR, name)
|
||||||
self.config_path = os.path.join(self.container_path, 'config')
|
self.config_path = os.path.join(self.container_path, 'config')
|
||||||
self.rootfs_path = os.path.join(self.container_path, 'rootfs')
|
self.rootfs_path = os.path.join(self.container_path, 'rootfs')
|
||||||
self.olwork_path = os.path.join(self.container_path, 'olwork')
|
self.olwork_path = os.path.join(self.container_path, 'olwork')
|
||||||
self.ephemeral_layer_path = os.path.join(self.container_path, 'ephemeral')
|
self.ephemeral_layer_path = os.path.join(self.container_path, 'ephemeral')
|
||||||
self.log_path = os.path.join(LOG_DIR, f'{name}.log')
|
self.log_path = os.path.join(config.LOG_DIR, f'{name}.log')
|
||||||
if load_from_repo:
|
if load_from_repo:
|
||||||
self.set_definition(repo_local.get_container(name))
|
self.set_definition(repo_local.get_container(name))
|
||||||
|
|
||||||
@ -68,20 +65,20 @@ class Container:
|
|||||||
|
|
||||||
def get_state(self):
|
def get_state(self):
|
||||||
# Get current state of the container, uses LXC monitor socket accessible only in ocntainer's namespace
|
# Get current state of the container, uses LXC monitor socket accessible only in ocntainer's namespace
|
||||||
state = subprocess.run(['lxc-info', '-sH', '-P', CONTAINERS_DIR, self.name], capture_output=True, check=True)
|
state = subprocess.run(['lxc-info', '-sH', '-P', config.CONTAINERS_DIR, self.name], capture_output=True, check=True)
|
||||||
return ContainerState[state.stdout.strip().decode()]
|
return ContainerState[state.stdout.strip().decode()]
|
||||||
|
|
||||||
def await_state(self, awaited_state):
|
def await_state(self, awaited_state):
|
||||||
# Block execution until the container reaches the desired state or until timeout
|
# Block execution until the container reaches the desired state or until timeout
|
||||||
try:
|
try:
|
||||||
subprocess.run(['lxc-wait', '-P', CONTAINERS_DIR, '-s', awaited_state.value, '-t', '30', self.name], check=True)
|
subprocess.run(['lxc-wait', '-P', config.CONTAINERS_DIR, '-s', awaited_state.value, '-t', '30', self.name], check=True)
|
||||||
except subprocess.CalledProcessError:
|
except subprocess.CalledProcessError:
|
||||||
raise InvalidContainerStateError(self.name, self.get_state())
|
raise InvalidContainerStateError(self.name, self.get_state())
|
||||||
|
|
||||||
def mount_rootfs(self):
|
def mount_rootfs(self):
|
||||||
# Prepares container rootfs
|
# Prepares container rootfs
|
||||||
# Called in lxc.hook.pre-start as the standard mount options are insufficient for rootless containers (see notes for overlayfs below)
|
# Called in lxc.hook.pre-start as the standard mount options are insufficient for rootless containers (see notes for overlayfs below)
|
||||||
layers = [os.path.join(LAYERS_DIR, layer) for layer in self.layers]
|
layers = [os.path.join(config.LAYERS_DIR, layer) for layer in self.layers]
|
||||||
if not self.build:
|
if not self.build:
|
||||||
# Add ephemeral layer if the container is not created as part of build process
|
# Add ephemeral layer if the container is not created as part of build process
|
||||||
layers.append(self.ephemeral_layer_path)
|
layers.append(self.ephemeral_layer_path)
|
||||||
@ -115,14 +112,14 @@ class Container:
|
|||||||
if mountpoint.endswith(':file'):
|
if mountpoint.endswith(':file'):
|
||||||
mount_type = 'file'
|
mount_type = 'file'
|
||||||
mountpoint = mountpoint[:-5]
|
mountpoint = mountpoint[:-5]
|
||||||
return f'lxc.mount.entry = {os.path.join(VOLUMES_DIR, volume)} {mountpoint} none bind,create={mount_type} 0 0'
|
return f'lxc.mount.entry = {os.path.join(config.VOLUMES_DIR, volume)} {mountpoint} none bind,create={mount_type} 0 0'
|
||||||
|
|
||||||
def create(self):
|
def create(self):
|
||||||
# Create container directories
|
# Create container directories
|
||||||
os.makedirs(self.rootfs_path, 0o755, True)
|
os.makedirs(self.rootfs_path, 0o755, True)
|
||||||
os.makedirs(self.olwork_path, 0o755, True)
|
os.makedirs(self.olwork_path, 0o755, True)
|
||||||
os.makedirs(self.ephemeral_layer_path, 0o755, True)
|
os.makedirs(self.ephemeral_layer_path, 0o755, True)
|
||||||
os.makedirs(LOG_DIR, 0o750, True)
|
os.makedirs(config.LOG_DIR, 0o750, True)
|
||||||
# Change UID/GID of the ephemeral layer directory
|
# Change UID/GID of the ephemeral layer directory
|
||||||
# Chown is possible only when the process is running as root, for user namespaces, see https://linuxcontainers.org/lxc/manpages/man1/lxc-usernsexec.1.html
|
# Chown is possible only when the process is running as root, for user namespaces, see https://linuxcontainers.org/lxc/manpages/man1/lxc-usernsexec.1.html
|
||||||
os.chown(self.ephemeral_layer_path, 100000, 100000)
|
os.chown(self.ephemeral_layer_path, 100000, 100000)
|
||||||
@ -137,9 +134,7 @@ class Container:
|
|||||||
ip_address, ip_netmask, ip_gateway = network.request_ip(self.name)
|
ip_address, ip_netmask, ip_gateway = network.request_ip(self.name)
|
||||||
# Write LXC configuration file
|
# Write LXC configuration file
|
||||||
with open(self.config_path, 'w') as f:
|
with open(self.config_path, 'w') as f:
|
||||||
f.write(LXC_CONTAINER_TEMPLATE.format(name=self.name, ip_address=ip_address, ip_netmask=ip_netmask, ip_gateway=ip_gateway,
|
f.write(templates.LXC_CONTAINER_TEMPLATE.format(name=self.name, ip_address=ip_address, ip_netmask=ip_netmask, ip_gateway=ip_gateway, rootfs=self.rootfs_path, hosts=config.HOSTS_FILE, mounts=mounts, env=env, uid=uid, gid=gid, cmd=cmd, cwd=cwd, halt=halt, log=self.log_path))
|
||||||
rootfs=self.rootfs_path, hosts=HOSTS_FILE, mounts=mounts, env=env,
|
|
||||||
uid=uid, gid=gid, cmd=cmd, cwd=cwd, halt=halt, log=self.log_path))
|
|
||||||
repo_local.register_container(self.name, self.get_definition())
|
repo_local.register_container(self.name, self.get_definition())
|
||||||
|
|
||||||
def destroy(self):
|
def destroy(self):
|
||||||
@ -166,7 +161,7 @@ class Container:
|
|||||||
|
|
||||||
def do_start(self):
|
def do_start(self):
|
||||||
# Start the current container, wait until it is reported as started and execute application readiness check
|
# Start the current container, wait until it is reported as started and execute application readiness check
|
||||||
subprocess.Popen(['lxc-start', '-P', CONTAINERS_DIR, self.name])
|
subprocess.Popen(['lxc-start', '-P', config.CONTAINERS_DIR, self.name])
|
||||||
self.await_state(ContainerState.RUNNING)
|
self.await_state(ContainerState.RUNNING)
|
||||||
# Launch the readiness check in a separate thread, so it can be reliably cancelled after timeout
|
# Launch the readiness check in a separate thread, so it can be reliably cancelled after timeout
|
||||||
with ThreadPoolExecutor(max_workers=1) as pool:
|
with ThreadPoolExecutor(max_workers=1) as pool:
|
||||||
@ -183,7 +178,7 @@ class Container:
|
|||||||
state = self.get_state()
|
state = self.get_state()
|
||||||
if state != ContainerState.RUNNING:
|
if state != ContainerState.RUNNING:
|
||||||
raise InvalidContainerStateError(self.name, state)
|
raise InvalidContainerStateError(self.name, state)
|
||||||
check = subprocess.run(['lxc-attach', '-P', CONTAINERS_DIR, '--clear-env', self.name, '--']+ready_cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, timeout=30)
|
check = subprocess.run(['lxc-attach', '-P', config.CONTAINERS_DIR, '--clear-env', self.name, '--']+ready_cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, timeout=30)
|
||||||
if check.returncode == 0:
|
if check.returncode == 0:
|
||||||
break
|
break
|
||||||
time.sleep(0.25)
|
time.sleep(0.25)
|
||||||
@ -198,7 +193,7 @@ class Container:
|
|||||||
|
|
||||||
def do_stop(self):
|
def do_stop(self):
|
||||||
# Stop the current container and wait until it stops completely
|
# Stop the current container and wait until it stops completely
|
||||||
subprocess.Popen(['lxc-stop', '-P', CONTAINERS_DIR, self.name])
|
subprocess.Popen(['lxc-stop', '-P', config.CONTAINERS_DIR, self.name])
|
||||||
self.await_state(ContainerState.STOPPED)
|
self.await_state(ContainerState.STOPPED)
|
||||||
|
|
||||||
def execute(self, cmd, uid=None, gid=None, **kwargs):
|
def execute(self, cmd, uid=None, gid=None, **kwargs):
|
||||||
@ -219,9 +214,9 @@ class Container:
|
|||||||
uidgid_param.extend(('-g', gid))
|
uidgid_param.extend(('-g', gid))
|
||||||
# If the container is stopped, use lxc-execute, otherwise use lxc-attach
|
# If the container is stopped, use lxc-execute, otherwise use lxc-attach
|
||||||
if state == ContainerState.STOPPED:
|
if state == ContainerState.STOPPED:
|
||||||
return subprocess.run(['lxc-execute', '-P', CONTAINERS_DIR]+uidgid_param+[self.name, '--']+cmd, **kwargs)
|
return subprocess.run(['lxc-execute', '-P', config.CONTAINERS_DIR]+uidgid_param+[self.name, '--']+cmd, **kwargs)
|
||||||
elif state == ContainerState.RUNNING:
|
elif state == ContainerState.RUNNING:
|
||||||
return subprocess.run(['lxc-attach', '-P', CONTAINERS_DIR, '--clear-env']+uidgid_param+[self.name, '--']+cmd, **kwargs)
|
return subprocess.run(['lxc-attach', '-P', config.CONTAINERS_DIR, '--clear-env']+uidgid_param+[self.name, '--']+cmd, **kwargs)
|
||||||
else:
|
else:
|
||||||
raise InvalidContainerStateError(self.name, state)
|
raise InvalidContainerStateError(self.name, state)
|
||||||
|
|
||||||
|
@ -6,17 +6,14 @@ import shutil
|
|||||||
import tarfile
|
import tarfile
|
||||||
import urllib.parse
|
import urllib.parse
|
||||||
|
|
||||||
from . import repo_local
|
from . import config, repo_local, repo_online, repo_publish
|
||||||
from . import repo_online
|
|
||||||
from . import repo_publish
|
|
||||||
from .config import LAYERS_DIR, ONLINE_LAYERS_URL, PUB_LAYERS_DIR, TMP_LAYERS_DIR
|
|
||||||
|
|
||||||
DEFINITION_MEMBERS = {'layers', 'env', 'uid', 'gid', 'cmd', 'cwd', 'ready', 'halt'}
|
DEFINITION_MEMBERS = {'layers', 'env', 'uid', 'gid', 'cmd', 'cwd', 'ready', 'halt'}
|
||||||
|
|
||||||
class Image:
|
class Image:
|
||||||
def __init__(self, name, load_from_repo=True):
|
def __init__(self, name, load_from_repo=True):
|
||||||
self.name = name
|
self.name = name
|
||||||
self.layer_path = os.path.join(LAYERS_DIR, name)
|
self.layer_path = os.path.join(config.LAYERS_DIR, name)
|
||||||
self.layers = [name]
|
self.layers = [name]
|
||||||
self.env = {}
|
self.env = {}
|
||||||
self.uid = None
|
self.uid = None
|
||||||
@ -60,9 +57,9 @@ class Image:
|
|||||||
|
|
||||||
def download(self, observer=None):
|
def download(self, observer=None):
|
||||||
# Download the archive with layer data
|
# Download the archive with layer data
|
||||||
os.makedirs(TMP_LAYERS_DIR, 0o700, True)
|
os.makedirs(config.TMP_LAYERS_DIR, 0o700, True)
|
||||||
archive_url = urllib.parse.urljoin(ONLINE_LAYERS_URL, f'{self.name}.tar.xz')
|
archive_url = urllib.parse.urljoin(config.ONLINE_LAYERS_URL, f'{self.name}.tar.xz')
|
||||||
archive_path = os.path.join(TMP_LAYERS_DIR, f'{self.name}.tar.xz')
|
archive_path = os.path.join(config.TMP_LAYERS_DIR, f'{self.name}.tar.xz')
|
||||||
definition = repo_online.get_image(self.name)
|
definition = repo_online.get_image(self.name)
|
||||||
if observer:
|
if observer:
|
||||||
observer.units_total = definition['dlsize']
|
observer.units_total = definition['dlsize']
|
||||||
@ -70,19 +67,19 @@ class Image:
|
|||||||
|
|
||||||
def unpack_downloaded(self, observer=None):
|
def unpack_downloaded(self, observer=None):
|
||||||
# Unpack downloaded archive with layer data
|
# Unpack downloaded archive with layer data
|
||||||
archive_path = os.path.join(TMP_LAYERS_DIR, f'{self.name}.tar.xz')
|
archive_path = os.path.join(config.TMP_LAYERS_DIR, f'{self.name}.tar.xz')
|
||||||
definition = repo_online.get_image(self.name)
|
definition = repo_online.get_image(self.name)
|
||||||
if observer:
|
if observer:
|
||||||
observer.units_total = definition['size']
|
observer.units_total = definition['size']
|
||||||
repo_online.unpack_archive(archive_path, LAYERS_DIR, definition['hash'], observer)
|
repo_online.unpack_archive(archive_path, config.LAYERS_DIR, definition['hash'], observer)
|
||||||
self.set_definition(definition)
|
self.set_definition(definition)
|
||||||
repo_local.register_image(self.name, definition)
|
repo_local.register_image(self.name, definition)
|
||||||
|
|
||||||
def publish(self):
|
def publish(self):
|
||||||
# Create layer archive and register to publish repository
|
# Create layer archive and register to publish repository
|
||||||
os.makedirs(PUB_LAYERS_DIR, 0o755, True)
|
os.makedirs(config.PUB_LAYERS_DIR, 0o755, True)
|
||||||
files = repo_publish.TarSizeCounter()
|
files = repo_publish.TarSizeCounter()
|
||||||
archive_path = os.path.join(PUB_LAYERS_DIR, f'{self.name}.tar.xz')
|
archive_path = os.path.join(config.PUB_LAYERS_DIR, f'{self.name}.tar.xz')
|
||||||
with tarfile.open(archive_path, 'w:xz') as tar:
|
with tarfile.open(archive_path, 'w:xz') as tar:
|
||||||
tar.add(self.layer_path, self.name, filter=files.add_file)
|
tar.add(self.layer_path, self.name, filter=files.add_file)
|
||||||
definition = self.get_definition()
|
definition = self.get_definition()
|
||||||
@ -95,7 +92,7 @@ class Image:
|
|||||||
def unpublish(self):
|
def unpublish(self):
|
||||||
# Remove the layer from publish repository
|
# Remove the layer from publish repository
|
||||||
repo_publish.unregister_image(self.name)
|
repo_publish.unregister_image(self.name)
|
||||||
archive_path = os.path.join(PUB_LAYERS_DIR, f'{self.name}.tar.xz')
|
archive_path = os.path.join(config.PUB_LAYERS_DIR, f'{self.name}.tar.xz')
|
||||||
try:
|
try:
|
||||||
os.unlink(archive_path)
|
os.unlink(archive_path)
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
|
@ -10,7 +10,6 @@ import zipfile
|
|||||||
|
|
||||||
from .container import Container
|
from .container import Container
|
||||||
from .image import Image
|
from .image import Image
|
||||||
from .config import LAYERS_DIR
|
|
||||||
|
|
||||||
class ImageBuilder:
|
class ImageBuilder:
|
||||||
def build(self, image, filename):
|
def build(self, image, filename):
|
||||||
|
@ -6,7 +6,7 @@ import os
|
|||||||
import socket
|
import socket
|
||||||
import struct
|
import struct
|
||||||
|
|
||||||
from .config import HOSTS_FILE, HOSTS_LOCK_FILE, NETWORK_INTERFACE
|
from . import config
|
||||||
from .flock import locked
|
from .flock import locked
|
||||||
|
|
||||||
# ioctl magic constants taken from https://git.musl-libc.org/cgit/musl/tree/include/sys/ioctl.h (same as glibc)
|
# ioctl magic constants taken from https://git.musl-libc.org/cgit/musl/tree/include/sys/ioctl.h (same as glibc)
|
||||||
@ -16,15 +16,15 @@ IOCTL_SIOCGIFNETMASK = 0x891b
|
|||||||
leases = {}
|
leases = {}
|
||||||
mtime = None
|
mtime = None
|
||||||
|
|
||||||
@locked(HOSTS_LOCK_FILE)
|
@locked(config.HOSTS_LOCK_FILE)
|
||||||
def load_leases():
|
def load_leases():
|
||||||
# Read and parse all IP-hostname pairs from the global hosts file
|
# Read and parse all IP-hostname pairs from the global hosts file
|
||||||
global leases
|
global leases
|
||||||
global mtime
|
global mtime
|
||||||
try:
|
try:
|
||||||
file_mtime = os.stat(HOSTS_FILE).st_mtime
|
file_mtime = os.stat(config.HOSTS_FILE).st_mtime
|
||||||
if mtime != file_mtime:
|
if mtime != file_mtime:
|
||||||
with open(HOSTS_FILE, 'r') as f:
|
with open(config.HOSTS_FILE, 'r') as f:
|
||||||
leases = [lease.strip().split(None, 1) for lease in f]
|
leases = [lease.strip().split(None, 1) for lease in f]
|
||||||
leases = {ip: hostname for ip, hostname in leases}
|
leases = {ip: hostname for ip, hostname in leases}
|
||||||
mtime = file_mtime
|
mtime = file_mtime
|
||||||
@ -32,20 +32,20 @@ def load_leases():
|
|||||||
interface = get_bridge_interface()
|
interface = get_bridge_interface()
|
||||||
leases = {str(interface.ip): 'host'}
|
leases = {str(interface.ip): 'host'}
|
||||||
|
|
||||||
@locked(HOSTS_LOCK_FILE)
|
@locked(config.HOSTS_LOCK_FILE)
|
||||||
def save_leases():
|
def save_leases():
|
||||||
# write all IP-hostname pairs to the global hosts file
|
# write all IP-hostname pairs to the global hosts file
|
||||||
global mtime
|
global mtime
|
||||||
with open(HOSTS_FILE, 'w') as f:
|
with open(config.HOSTS_FILE, 'w') as f:
|
||||||
for ip, hostname in sorted(leases.items(), key=lambda lease: socket.inet_aton(lease[0])):
|
for ip, hostname in sorted(leases.items(), key=lambda lease: socket.inet_aton(lease[0])):
|
||||||
f.write(f'{ip} {hostname}\n')
|
f.write(f'{ip} {hostname}\n')
|
||||||
mtime = os.stat(HOSTS_FILE).st_mtime
|
mtime = os.stat(config.HOSTS_FILE).st_mtime
|
||||||
|
|
||||||
def get_bridge_interface():
|
def get_bridge_interface():
|
||||||
# Returns bridge interface's IP address and netmask
|
# Returns bridge interface's IP address and netmask
|
||||||
with socket.socket(socket.AF_INET) as sock:
|
with socket.socket(socket.AF_INET) as sock:
|
||||||
# Get IPv4Interface for given interface name
|
# Get IPv4Interface for given interface name
|
||||||
packed_ifname = struct.pack('256s', NETWORK_INTERFACE.encode())
|
packed_ifname = struct.pack('256s', config.NETWORK_INTERFACE.encode())
|
||||||
ip = socket.inet_ntoa(fcntl.ioctl(sock.fileno(), IOCTL_SIOCGIFADDR, packed_ifname)[20:24])
|
ip = socket.inet_ntoa(fcntl.ioctl(sock.fileno(), IOCTL_SIOCGIFADDR, packed_ifname)[20:24])
|
||||||
netmask = socket.inet_ntoa(fcntl.ioctl(sock.fileno(), IOCTL_SIOCGIFNETMASK, packed_ifname)[20:24])
|
netmask = socket.inet_ntoa(fcntl.ioctl(sock.fileno(), IOCTL_SIOCGIFNETMASK, packed_ifname)[20:24])
|
||||||
return ipaddress.IPv4Interface(f'{ip}/{netmask}')
|
return ipaddress.IPv4Interface(f'{ip}/{netmask}')
|
||||||
|
@ -4,10 +4,14 @@ import fcntl
|
|||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
from . import config
|
||||||
from .exceptions import AppNotFoundError, ContainerNotFoundError, ImageNotFoundError
|
from .exceptions import AppNotFoundError, ContainerNotFoundError, ImageNotFoundError
|
||||||
from .config import REPO_FILE, REPO_LOCK_FILE, TYPE_APP, TYPE_CONTAINER, TYPE_IMAGE
|
|
||||||
from .flock import locked
|
from .flock import locked
|
||||||
|
|
||||||
|
TYPE_APP = 'apps'
|
||||||
|
TYPE_CONTAINER = 'containers'
|
||||||
|
TYPE_IMAGE = 'images'
|
||||||
|
|
||||||
data = {TYPE_IMAGE: {}, TYPE_CONTAINER: {}, TYPE_APP: {}}
|
data = {TYPE_IMAGE: {}, TYPE_CONTAINER: {}, TYPE_APP: {}}
|
||||||
mtime = 0
|
mtime = 0
|
||||||
|
|
||||||
@ -15,9 +19,9 @@ def load():
|
|||||||
global data
|
global data
|
||||||
global mtime
|
global mtime
|
||||||
try:
|
try:
|
||||||
file_mtime = os.stat(REPO_FILE).st_mtime
|
file_mtime = os.stat(config.REPO_FILE).st_mtime
|
||||||
if mtime != file_mtime:
|
if mtime != file_mtime:
|
||||||
with open(REPO_FILE) as f:
|
with open(config.REPO_FILE) as f:
|
||||||
data = json.load(f)
|
data = json.load(f)
|
||||||
mtime = file_mtime
|
mtime = file_mtime
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
@ -25,11 +29,11 @@ def load():
|
|||||||
|
|
||||||
def save():
|
def save():
|
||||||
global mtime
|
global mtime
|
||||||
with open(REPO_FILE, 'w') as f:
|
with open(config.REPO_FILE, 'w') as f:
|
||||||
json.dump(data, f, sort_keys=True, indent=4)
|
json.dump(data, f, sort_keys=True, indent=4)
|
||||||
mtime = os.stat(REPO_FILE).st_mtime
|
mtime = os.stat(config.REPO_FILE).st_mtime
|
||||||
|
|
||||||
@locked(REPO_LOCK_FILE)
|
@locked(config.REPO_LOCK_FILE)
|
||||||
def get_entries(entry_type):
|
def get_entries(entry_type):
|
||||||
load()
|
load()
|
||||||
return data[entry_type]
|
return data[entry_type]
|
||||||
@ -40,13 +44,13 @@ def get_entry(entry_type, name, exception):
|
|||||||
except KeyError as e:
|
except KeyError as e:
|
||||||
raise exception(name) from e
|
raise exception(name) from e
|
||||||
|
|
||||||
@locked(REPO_LOCK_FILE)
|
@locked(config.REPO_LOCK_FILE)
|
||||||
def add_entry(entry_type, name, definition):
|
def add_entry(entry_type, name, definition):
|
||||||
load()
|
load()
|
||||||
data[entry_type][name] = definition
|
data[entry_type][name] = definition
|
||||||
save()
|
save()
|
||||||
|
|
||||||
@locked(REPO_LOCK_FILE)
|
@locked(config.REPO_LOCK_FILE)
|
||||||
def delete_entry(entry_type, name):
|
def delete_entry(entry_type, name):
|
||||||
load()
|
load()
|
||||||
try:
|
try:
|
||||||
|
@ -12,15 +12,18 @@ from cryptography.hazmat.primitives import hashes
|
|||||||
from cryptography.hazmat.primitives.asymmetric import ec, utils
|
from cryptography.hazmat.primitives.asymmetric import ec, utils
|
||||||
from cryptography.hazmat.primitives.serialization import load_pem_public_key
|
from cryptography.hazmat.primitives.serialization import load_pem_public_key
|
||||||
|
|
||||||
|
from . import config
|
||||||
from .exceptions import AppNotFoundError, ImageNotFoundError
|
from .exceptions import AppNotFoundError, ImageNotFoundError
|
||||||
from .config import ONLINE_PUBKEY, ONLINE_REPO_URL, ONLINE_SIG_URL, TYPE_APP, TYPE_IMAGE
|
|
||||||
|
TYPE_APP = 'apps'
|
||||||
|
TYPE_IMAGE = 'images'
|
||||||
|
|
||||||
public_key = None
|
public_key = None
|
||||||
|
|
||||||
def get_public_key():
|
def get_public_key():
|
||||||
global public_key
|
global public_key
|
||||||
if not public_key:
|
if not public_key:
|
||||||
pem = f'-----BEGIN PUBLIC KEY-----\n{ONLINE_PUBKEY}\n-----END PUBLIC KEY-----'
|
pem = f'-----BEGIN PUBLIC KEY-----\n{config.ONLINE_PUBKEY}\n-----END PUBLIC KEY-----'
|
||||||
public_key = load_pem_public_key(pem.encode(), default_backend())
|
public_key = load_pem_public_key(pem.encode(), default_backend())
|
||||||
return public_key
|
return public_key
|
||||||
|
|
||||||
@ -91,10 +94,10 @@ def load(force=False):
|
|||||||
global data
|
global data
|
||||||
if not data or force:
|
if not data or force:
|
||||||
with requests.Session() as session:
|
with requests.Session() as session:
|
||||||
resource = session.get(ONLINE_REPO_URL, timeout=5)
|
resource = session.get(config.ONLINE_REPO_URL, timeout=5)
|
||||||
resource.raise_for_status()
|
resource.raise_for_status()
|
||||||
packages = resource.content
|
packages = resource.content
|
||||||
resource = session.get(ONLINE_SIG_URL, timeout=5)
|
resource = session.get(config.ONLINE_SIG_URL, timeout=5)
|
||||||
resource.raise_for_status()
|
resource.raise_for_status()
|
||||||
packages_sig = resource.content
|
packages_sig = resource.content
|
||||||
get_public_key().verify(packages_sig, packages, ec.ECDSA(hashes.SHA512()))
|
get_public_key().verify(packages_sig, packages, ec.ECDSA(hashes.SHA512()))
|
||||||
|
@ -8,10 +8,13 @@ from cryptography.hazmat.primitives import hashes
|
|||||||
from cryptography.hazmat.primitives.asymmetric import ec, utils
|
from cryptography.hazmat.primitives.asymmetric import ec, utils
|
||||||
from cryptography.hazmat.primitives.serialization import load_pem_private_key
|
from cryptography.hazmat.primitives.serialization import load_pem_private_key
|
||||||
|
|
||||||
|
from . import config
|
||||||
from .exceptions import AppNotFoundError, ImageNotFoundError
|
from .exceptions import AppNotFoundError, ImageNotFoundError
|
||||||
from .config import PUB_LOCK_FILE, PUB_PRIVKEY_FILE, PUB_REPO_FILE, PUB_SIG_FILE, TYPE_APP, TYPE_IMAGE
|
|
||||||
from .flock import locked
|
from .flock import locked
|
||||||
|
|
||||||
|
TYPE_APP = 'apps'
|
||||||
|
TYPE_IMAGE = 'images'
|
||||||
|
|
||||||
class TarSizeCounter:
|
class TarSizeCounter:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.size = 0
|
self.size = 0
|
||||||
@ -30,7 +33,7 @@ def sign_file(file_path):
|
|||||||
if not data:
|
if not data:
|
||||||
break
|
break
|
||||||
hasher.update(data)
|
hasher.update(data)
|
||||||
with open(PUB_PRIVKEY_FILE, 'rb') as f:
|
with open(config.PUB_PRIVKEY_FILE, 'rb') as f:
|
||||||
private_key = load_pem_private_key(f.read(), None, default_backend())
|
private_key = load_pem_private_key(f.read(), None, default_backend())
|
||||||
return private_key.sign(hasher.finalize(), ec.ECDSA(utils.Prehashed(sha512)))
|
return private_key.sign(hasher.finalize(), ec.ECDSA(utils.Prehashed(sha512)))
|
||||||
|
|
||||||
@ -41,9 +44,9 @@ def load():
|
|||||||
global data
|
global data
|
||||||
global mtime
|
global mtime
|
||||||
try:
|
try:
|
||||||
file_mtime = os.stat(PUB_REPO_FILE).st_mtime
|
file_mtime = os.stat(config.PUB_REPO_FILE).st_mtime
|
||||||
if mtime != file_mtime:
|
if mtime != file_mtime:
|
||||||
with open(PUB_REPO_FILE) as f:
|
with open(config.PUB_REPO_FILE) as f:
|
||||||
data = json.load(f)
|
data = json.load(f)
|
||||||
mtime = file_mtime
|
mtime = file_mtime
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
@ -52,15 +55,15 @@ def load():
|
|||||||
def save():
|
def save():
|
||||||
global mtime
|
global mtime
|
||||||
# Open the repository file in read + write mode using exclusive lock
|
# Open the repository file in read + write mode using exclusive lock
|
||||||
with open(PUB_REPO_FILE, 'w') as f:
|
with open(config.PUB_REPO_FILE, 'w') as f:
|
||||||
json.dump(data, f, sort_keys=True, indent=4)
|
json.dump(data, f, sort_keys=True, indent=4)
|
||||||
mtime = os.stat(PUB_REPO_FILE).st_mtime
|
mtime = os.stat(config.PUB_REPO_FILE).st_mtime
|
||||||
# Cryptographically sign the repository file
|
# Cryptographically sign the repository file
|
||||||
signature = sign_file(PUB_REPO_FILE)
|
signature = sign_file(config.PUB_REPO_FILE)
|
||||||
with open(PUB_SIG_FILE, 'wb') as f:
|
with open(config.PUB_SIG_FILE, 'wb') as f:
|
||||||
f.write(signature)
|
f.write(signature)
|
||||||
|
|
||||||
@locked(PUB_LOCK_FILE)
|
@locked(config.PUB_LOCK_FILE)
|
||||||
def get_entries(entry_type):
|
def get_entries(entry_type):
|
||||||
load()
|
load()
|
||||||
return data[entry_type]
|
return data[entry_type]
|
||||||
@ -71,13 +74,13 @@ def get_entry(entry_type, name, exception):
|
|||||||
except KeyError as e:
|
except KeyError as e:
|
||||||
raise exception(name) from e
|
raise exception(name) from e
|
||||||
|
|
||||||
@locked(PUB_LOCK_FILE)
|
@locked(config.PUB_LOCK_FILE)
|
||||||
def add_entry(entry_type, name, definition):
|
def add_entry(entry_type, name, definition):
|
||||||
load()
|
load()
|
||||||
data[entry_type][name] = definition
|
data[entry_type][name] = definition
|
||||||
save()
|
save()
|
||||||
|
|
||||||
@locked(PUB_LOCK_FILE)
|
@locked(config.PUB_LOCK_FILE)
|
||||||
def delete_entry(entry_type, name):
|
def delete_entry(entry_type, name):
|
||||||
load()
|
load()
|
||||||
try:
|
try:
|
||||||
|
Loading…
Reference in New Issue
Block a user