From 200354703be4c1a7d3179acacc9a64a84f740aed Mon Sep 17 00:00:00 2001 From: Devon Bautista Date: Mon, 10 Feb 2025 11:44:38 -0700 Subject: [PATCH 01/12] fix: use temporary dir for package manager files Instead of relying on a fixed directory in /tmp that may or may not exist to write package manager logs to, use the tempfile library to create a temporary directory within the container for these at runtime. Also, move the functions in installer.py to a new Installer class in order to store and reuse the buildah container and mount names, the package manager, as well as the temporary directory which is created on init. --- dockerfiles/dnf/Dockerfile | 4 - src/installer.py | 374 +++++++++++++++++++------------------ src/layer.py | 28 ++- src/pathmod.py | 10 + 4 files changed, 225 insertions(+), 191 deletions(-) create mode 100644 src/pathmod.py diff --git a/dockerfiles/dnf/Dockerfile b/dockerfiles/dnf/Dockerfile index ad8691e..0727284 100644 --- a/dockerfiles/dnf/Dockerfile +++ b/dockerfiles/dnf/Dockerfile @@ -20,10 +20,6 @@ RUN pip3.11 install ansible ansible-base ansible-bender boto3 dnspython requests COPY src/ /usr/local/bin/ RUN chmod -R 0755 /usr/local/bin/ -RUN mkdir -p /tmp/dnf/log && \ - mkdir /tmp/dnf/cache && \ - mkdir /tmp/dnf/repos.d - # Allow non-root to run buildah commands RUN setcap cap_setuid=ep "$(command -v newuidmap)" && \ setcap cap_setgid=ep "$(command -v newgidmap)" &&\ diff --git a/src/installer.py b/src/installer.py index f305285..b2833c0 100755 --- a/src/installer.py +++ b/src/installer.py @@ -1,194 +1,210 @@ import subprocess import logging import os +import pathmod +import tempfile # Written Modules from utils import cmd -def install_repos(mname, cname, repos, repo_dest, pkg_man, proxy): - # check if there are repos passed for install - if len(repos) == 0: - logging.info("REPOS: no repos passed to install\n") - return +class Installer: + def __init__(self, pkg_man, cname, mname): + self.pkg_man = pkg_man + self.cname = cname + self.mname = mname + + # Create temporary directory for logs, cache, etc. for package manager + os.makedirs(os.path.join(mname, "tmp")) + self.tdir = tempfile.mkdtemp(dir=os.path.join(self.mname, "tmp"), prefix="image-build-") + + if pkg_man == "dnf": + # DNF complains if the log directory is not present + os.makedirs(os.path.join(self.tdir, "dnf/log")) + + def install_repos(self, repos, repo_dest, proxy): + # check if there are repos passed for install + if len(repos) == 0: + logging.info("REPOS: no repos passed to install\n") + return + + logging.info(f"REPOS: Installing these repos to {self.cname}") + for r in repos: + args = [] + logging.info(r['alias'] + ': ' + r['url']) + if self.pkg_man == "zypper": + args.append("-D") + args.append(os.path.join(self.mname, pathmod.sep_strip(repo_dest))) + args.append("addrepo") + args.append("-f") + args.append("-p") + if 'priority' in r: + args.append(r['priority']) + else: + args.append('99') + args.append(r['url']) + args.append(r['alias']) + elif self.pkg_man == "dnf": + args.append("--setopt=reposdir="+os.path.join(self.mname, pathmod.sep_strip(repo_dest))) + args.append("--setopt=logdir="+os.path.join(self.tdir, self.pkg_man, "log")) + args.append("--setopt=cachedir="+os.path.join(self.tdir, self.pkg_man, "cache")) + if proxy != "": + args.append("--setopt=proxy="+proxy) + args.append("config-manager") + args.append("--save") + args.append("--add-repo") + args.append(r['url']) + + rc = cmd([self.pkg_man] + args) + if rc != 0: + raise Exception("Failed to install repo", r['alias'], r['url']) + + if proxy != "": + if r['url'].endswith('.repo'): + repo_name = r['url'].split('/')[-1].split('.repo')[0] + "*" + elif r['url'].startswith('https'): + repo_name = r['url'].split('https://')[1].replace('/','_') + elif r['url'].startswith('http'): + repo_name = r['url'].split('http://')[1].replace('/','_') + args = [] + args.append('config-manager') + args.append('--save') + args.append("--setopt=reposdir="+os.path.join(self.mname, pathmod.sep_strip(repo_dest))) + args.append("--setopt=logdir="+os.path.join(self.tdir, self.pkg_man, "log")) + args.append("--setopt=cachedir="+os.path.join(self.tdir, self.pkg_man, "cache")) + args.append('--setopt=*.proxy='+proxy) + args.append(repo_name) + + rc = cmd([self.pkg_man] + args) + if rc != 0: + raise Exception("Failed to set proxy for repo", r['alias'], r['url'], proxy) + + if "gpg" in r: + # Using rpm apparently works for both Yum- and Zypper-based distros. + args = [] + if proxy != "": + arg_env = os.environ.copy() + arg_env['https_proxy'] = proxy + args.append("--root="+self.mname) + args.append("--import") + args.append(r["gpg"]) + + rc = cmd(["rpm"] + args) + if rc != 0: + raise Exception("Failed to install gpg key for", r['alias'], "at URL", r['gpg']) + + def install_base_packages(self, packages, registry_loc, proxy): + # check if there are packages to install + if len(packages) == 0: + logging.warn("PACKAGES: no packages passed to install\n") + return + + logging.info(f"PACKAGES: Installing these packages to {self.cname}") + logging.info("\n".join(packages)) - logging.info(f"REPOS: Installing these repos to {cname}") - for r in repos: args = [] - logging.info(r['alias'] + ': ' + r['url']) - if pkg_man == "zypper": + if self.pkg_man == "zypper": + args.append("-n") args.append("-D") - args.append(repo_dest) - args.append("addrepo") - args.append("-f") - args.append("-p") - if 'priority' in r: - args.append(r['priority']) - else: - args.append('99') - args.append(r['url']) - args.append(r['alias']) - elif pkg_man == "dnf": - args.append("--setopt=reposdir="+repo_dest) - args.append("--setopt=logdir=/tmp/dnf_test/log") - args.append("--setopt=cachedir=/tmp/dnf_test/cache") + args.append(os.path.join(self.mname, pathmod.sep_strip(registry_loc))) + args.append("-C") + args.append(self.tdir) + args.append("--no-gpg-checks") + args.append("--installroot") + args.append(self.mname) + args.append("install") + args.append("-l") + args.extend(packages) + elif self.pkg_man == "dnf": + args.append("--setopt=reposdir="+os.path.join(self.mname, pathmod.sep_strip(registry_loc))) + args.append("--setopt=logdir="+os.path.join(self.tdir, self.pkg_man, "log")) + args.append("--setopt=cachedir="+os.path.join(self.tdir, self.pkg_man, "cache")) if proxy != "": args.append("--setopt=proxy="+proxy) - args.append("config-manager") - args.append("--save") - args.append("--add-repo") - args.append(r['url']) - - rc = cmd([pkg_man] + args) - if rc != 0: - raise Exception("Failed to install repo", r['alias'], r['url']) - - if proxy != "": - if r['url'].endswith('.repo'): - repo_name = r['url'].split('/')[-1].split('.repo')[0] + "*" - elif r['url'].startswith('https'): - repo_name = r['url'].split('https://')[1].replace('/','_') - elif r['url'].startswith('http'): - repo_name = r['url'].split('http://')[1].replace('/','_') - args = [] - args.append('config-manager') - args.append('--save') - args.append("--setopt=reposdir="+repo_dest) - args.append("--setopt=logdir=/tmp/dnf_test/log") - args.append("--setopt=cachedir=/tmp/dnf_test/cache") - args.append('--setopt=*.proxy='+proxy) - args.append(repo_name) - - rc = cmd([pkg_man] + args) - if rc != 0: - raise Exception("Failed to set proxy for repo", r['alias'], r['url'], proxy) + args.append("install") + args.append("-y") + args.append("--nogpgcheck") + args.append("--installroot") + args.append(self.mname) + args.extend(packages) + + rc = cmd([self.pkg_man] + args) + if rc == 104: + raise Exception("Installing base packages failed") + + if rc == 107: + logging.warn("one or more RPM postscripts failed to run") + + def remove_base_packages(self, remove_packages): + # check if there are packages to remove + if len(remove_packages) == 0: + logging.warn("REMOVE PACKAGES: no package passed to remove\n") + return + + logging.info(f"REMOVE PACKAGES: removing these packages from container {self.cname}") + logging.info("\n".join(remove_packages)) + for p in remove_packages: + args = [self.cname, '--', 'rpm', '-e', '--nodeps', p] + cmd(["buildah","run"] + args) + + def install_base_package_groups(self, package_groups, registry_loc, proxy): + # check if there are packages groups to install + if len(package_groups) == 0: + logging.warn("PACKAGE GROUPS: no package groups passed to install\n") + return + + logging.info(f"PACKAGE GROUPS: Installing these package groups to {self.cname}") + logging.info("\n".join(package_groups)) + args = [] - if "gpg" in r: - # Using rpm apparently works for both Yum- and Zypper-based distros. - args = [] + if self.pkg_man == "zypper": + logging.warn("zypper does not support package groups") + elif self.pkg_man == "dnf": + args.append("--setopt=reposdir="+os.path.join(self.mname, pathmod.sep_strip(registry_loc))) + args.append("--setopt=logdir="+os.path.join(self.tdir, self.pkg_man, "log")) + args.append("--setopt=cachedir="+os.path.join(self.tdir, self.pkg_man, "cache")) if proxy != "": - arg_env = os.environ.copy() - arg_env['https_proxy'] = proxy - args.append("--root="+mname) - args.append("--import") - args.append(r["gpg"]) - - rc = cmd(["rpm"] + args) - if rc != 0: - raise Exception("Failed to install gpg key for", r['alias'], "at URL", r['gpg']) - -def install_base_packages(cname, packages, registry_loc, package_dest, pkg_man, proxy): - # check if there are packages to install - if len(packages) == 0: - logging.warn("PACKAGES: no packages passed to install\n") - return - - logging.info(f"PACKAGES: Installing these packages to {cname}") - logging.info("\n".join(packages)) - - args = [] - if pkg_man == "zypper": - args.append("-n") - args.append("-D") - args.append(registry_loc) - args.append("-C") - args.append("/tmp/image-build") - args.append("--no-gpg-checks") - args.append("--installroot") - args.append(package_dest) - args.append("install") - args.append("-l") - args.extend(packages) - elif pkg_man == "dnf": - args.append("--setopt=reposdir="+package_dest+"/etc/yum.repos.d") - args.append("--setopt=logdir=/tmp/dnf_test/log") - args.append("--setopt=cachedir=/tmp/dnf_test/cache") - if proxy != "": - args.append("--setopt=proxy="+proxy) - args.append("install") - args.append("-y") - args.append("--nogpgcheck") - args.append("--installroot") - args.append(package_dest) - args.extend(packages) - - rc = cmd([pkg_man] + args) - if rc == 104: - raise Exception("Installing base packages failed") - - if rc == 107: - logging.warn("one or more RPM postscripts failed to run") - -def remove_base_packages(cname, remove_packages): - # check if there are packages to remove - if len(remove_packages) == 0: - logging.warn("REMOVE PACKAGES: no package passed to remove\n") - return - - logging.info(f"REMOVE PACKAGES: removing these packages from container {cname}") - logging.info("\n".join(remove_packages)) - for p in remove_packages: - args = [cname, '--', 'rpm', '-e', '--nodeps', p] - cmd(["buildah","run"] + args) - -def install_base_package_groups(cname, package_groups, registry_loc, package_dest, pkg_man, proxy): - # check if there are packages groups to install - if len(package_groups) == 0: - logging.warn("PACKAGE GROUPS: no package groups passed to install\n") - return - - logging.info(f"PACKAGE GROUPS: Installling these package groups to {cname}") - logging.info("\n".join(package_groups)) - args = [] - - if pkg_man == "zypper": - logging.warn("zypper does not support package groups") - elif pkg_man == "dnf": - args.append("--setopt=reposdir="+package_dest+"/etc/yum.repos.d") - args.append("--setopt=logdir=/tmp/dnf_test/log") - args.append("--setopt=cachedir=/tmp/dnf_test/cache") - if proxy != "": - args.append("--setopt=proxy="+proxy) - args.append("groupinstall") - args.append("-y") - args.append("--nogpgcheck") - args.append("--installroot") - args.append(package_dest) - args.extend(package_groups) - - rc = cmd([pkg_man] + args) - if rc == 104: - raise Exception("Installing base packages failed") - -def install_base_commands(cname, commands): - # check if there are commands to install - if len(commands) == 0: - logging.warn("COMMANDS: no commands passed to run\n") - return - - logging.info(f"COMMANDS: running these commands in {cname}") - for c in commands: - logging.info(c['cmd']) - args = [cname, '--', 'bash', '-c', c['cmd']] - if 'loglevel' in c: - if c['loglevel'].upper() == "INFO": - loglevel = logging.info - elif c['loglevel'].upper() == "WARN": - loglevel = logging.warn + args.append("--setopt=proxy="+proxy) + args.append("groupinstall") + args.append("-y") + args.append("--nogpgcheck") + args.append("--installroot") + args.append(self.mname) + args.extend(package_groups) + + rc = cmd([self.pkg_man] + args) + if rc == 104: + raise Exception("Installing base packages failed") + + def install_base_commands(self, commands): + # check if there are commands to install + if len(commands) == 0: + logging.warn("COMMANDS: no commands passed to run\n") + return + + logging.info(f"COMMANDS: running these commands in {self.cname}") + for c in commands: + logging.info(c['cmd']) + args = [self.cname, '--', 'bash', '-c', c['cmd']] + if 'loglevel' in c: + if c['loglevel'].upper() == "INFO": + loglevel = logging.info + elif c['loglevel'].upper() == "WARN": + loglevel = logging.warn + else: + loglevel = logging.error else: loglevel = logging.error - else: - loglevel = logging.error - out = cmd(["buildah","run"] + args, stderr_handler=loglevel) - -def install_base_copyfiles(cname, copyfiles): - if len(copyfiles) == 0: - logging.warn("COPYFILES: no files to copy\n") - return - logging.info(f"COPYFILES: copying these files to {cname}") - for f in copyfiles: - args = [] - if 'opts' in f: - for o in f['opts']: - args.extend(o.split()) - logging.info(f['src'] + ' -> ' + f['dest']) - args += [ cname, f['src'], f['dest'] ] - out=cmd(["buildah","copy"] + args) + out = cmd(["buildah","run"] + args, stderr_handler=loglevel) + + def install_base_copyfiles(self, copyfiles): + if len(copyfiles) == 0: + logging.warn("COPYFILES: no files to copy\n") + return + logging.info(f"COPYFILES: copying these files to {self.cname}") + for f in copyfiles: + args = [] + if 'opts' in f: + for o in f['opts']: + args.extend(o.split()) + logging.info(f['src'] + ' -> ' + f['dest']) + args += [ self.cname, f['src'], f['dest'] ] + out=cmd(["buildah","copy"] + args) diff --git a/src/layer.py b/src/layer.py index de71cb8..ed27b32 100755 --- a/src/layer.py +++ b/src/layer.py @@ -37,15 +37,27 @@ def buildah_handler(line): self.logger.info(f"Container: {cname} mounted at {mname}") if self.args['pkg_man'] == "zypper": - repo_dest = mname+"/etc/zypp/repos.d" + repo_dest = "/etc/zypp/repos.d" elif self.args['pkg_man'] == "dnf": - repo_dest = mname+"/etc/yum.repos.d" + repo_dest = "/etc/yum.repos.d" else: self.logger.error("unsupported package manager") + inst = None + try: + inst = installer.Installer(self.args['pkg_man'], cname, mname) + except Exception as e: + self.logger.error(f"Error preparing installer: {e}") + cmd(["buildah","rm"] + [cname]) + sys.exit("Exiting now ...") + except KeyboardInterrupt: + self.logger.error(f"Keyboard Interrupt") + cmd(["buildah","rm"] + [cname]) + sys.exit("Exiting now ...") + # Install Repos try: - installer.install_repos(mname, cname, repos, repo_dest, self.args['pkg_man'], self.args['proxy']) + inst.install_repos(repos, repo_dest, self.args['proxy']) except Exception as e: self.logger.error(f"Error installing repos: {e}") cmd(["buildah","rm"] + [cname]) @@ -58,11 +70,11 @@ def buildah_handler(line): # Install Packages try: # Base Package Groups - installer.install_base_package_groups(cname, package_groups, repo_dest, mname, self.args['pkg_man'], self.args['proxy']) + inst.install_base_package_groups(package_groups, repo_dest, self.args['proxy']) # Packages - installer.install_base_packages(cname, packages, repo_dest, mname, self.args['pkg_man'], self.args['proxy']) + inst.install_base_packages(packages, repo_dest, self.args['proxy']) # Remove Packages - installer.remove_base_packages(cname, remove_packages) + inst.remove_base_packages(remove_packages) except Exception as e: self.logger.error(f"Error installing packages: {e}") cmd(["buildah","rm"] + [cname]) @@ -74,7 +86,7 @@ def buildah_handler(line): # Copy Files try: - installer.install_base_copyfiles(cname, copyfiles) + inst.install_base_copyfiles(copyfiles) except Exception as e: self.logger.error(f"Error running commands: {e}") cmd(["buildah","rm"] + [cname]) @@ -86,7 +98,7 @@ def buildah_handler(line): # Run Commands try: - installer.install_base_commands(cname, commands) + inst.install_base_commands(commands) if os.path.islink(mname + '/etc/resolv.conf'): self.logger.info("removing resolv.conf link (this link breaks running a container)") os.unlink(mname + '/etc/resolv.conf') diff --git a/src/pathmod.py b/src/pathmod.py new file mode 100644 index 0000000..d9aaab5 --- /dev/null +++ b/src/pathmod.py @@ -0,0 +1,10 @@ +import os + +def sep_strip(path): + """Strips the leading path separator from a path, if present.""" + if not path: + return path + + while path.startswith(os.sep): + path = path[1:] + return path From 832c27015fb7c6232e5d9b05d9ce9d73928769a2 Mon Sep 17 00:00:00 2001 From: Devon Bautista Date: Tue, 11 Feb 2025 09:33:34 -0700 Subject: [PATCH 02/12] fix: create tempdir outside of container image-build creates If an error occurs in image-build, the buildah container it creates is unmounted and deleted. This can be rather unhelpful if one wants to examine package manager logs to find out more about any errors that may have occurred. Thus, this commit causes the installer to create the image-build tempdir (where the package manager cache/log files get stored) on the machine (host or container) where image-build is running. That way even if/when the buildah container is destroyed, the tempdir with the logs, etc. remains for inspection. --- src/installer.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/installer.py b/src/installer.py index b2833c0..f00ea25 100755 --- a/src/installer.py +++ b/src/installer.py @@ -14,7 +14,8 @@ def __init__(self, pkg_man, cname, mname): # Create temporary directory for logs, cache, etc. for package manager os.makedirs(os.path.join(mname, "tmp")) - self.tdir = tempfile.mkdtemp(dir=os.path.join(self.mname, "tmp"), prefix="image-build-") + self.tdir = tempfile.mkdtemp(prefix="image-build-") + logging.info(f'Installer: Temporary directory for {self.pkg_man} created at {self.tdir}') if pkg_man == "dnf": # DNF complains if the log directory is not present From 23ee92860340fd409c47cd10732c91680d0be813 Mon Sep 17 00:00:00 2001 From: Devon Bautista Date: Fri, 21 Feb 2025 13:44:13 -0700 Subject: [PATCH 03/12] fix: do not err if /tmp already exists --- src/installer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/installer.py b/src/installer.py index f00ea25..2e8a1f6 100755 --- a/src/installer.py +++ b/src/installer.py @@ -13,7 +13,7 @@ def __init__(self, pkg_man, cname, mname): self.mname = mname # Create temporary directory for logs, cache, etc. for package manager - os.makedirs(os.path.join(mname, "tmp")) + os.makedirs(os.path.join(mname, "tmp"), exist_ok=True) self.tdir = tempfile.mkdtemp(prefix="image-build-") logging.info(f'Installer: Temporary directory for {self.pkg_man} created at {self.tdir}') From c305800869c6e936a17be6e20e1635911ad6afcd Mon Sep 17 00:00:00 2001 From: Devon Bautista Date: Fri, 21 Feb 2025 13:32:35 -0700 Subject: [PATCH 04/12] build: use requirements.txt in dnf Dockerfile --- dockerfiles/dnf/Dockerfile | 3 ++- requirements.txt | 5 +++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/dockerfiles/dnf/Dockerfile b/dockerfiles/dnf/Dockerfile index 0727284..bdd3a64 100644 --- a/dockerfiles/dnf/Dockerfile +++ b/dockerfiles/dnf/Dockerfile @@ -15,7 +15,8 @@ RUN dnf install -y \ squashfs-tools \ fuse-overlayfs -RUN pip3.11 install ansible ansible-base ansible-bender boto3 dnspython requests jinja2_ansible_filters +COPY requirements.txt / +RUN pip3.11 install -r /requirements.txt COPY src/ /usr/local/bin/ RUN chmod -R 0755 /usr/local/bin/ diff --git a/requirements.txt b/requirements.txt index 5dd9421..47bef45 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,8 @@ ansible +ansible-base +ansible-bender boto3 +dnspython +jinja2_ansible_filters PyYAML +requests From fe550c40cdc8d70f483f3acea389197338d550a5 Mon Sep 17 00:00:00 2001 From: Devon Bautista Date: Fri, 21 Feb 2025 13:37:08 -0700 Subject: [PATCH 05/12] build: use fully-qualified container path in dnf Dockerfile --- dockerfiles/dnf/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dockerfiles/dnf/Dockerfile b/dockerfiles/dnf/Dockerfile index bdd3a64..361aa6d 100644 --- a/dockerfiles/dnf/Dockerfile +++ b/dockerfiles/dnf/Dockerfile @@ -1,4 +1,4 @@ -FROM almalinux:8.8 +FROM docker.io/library/almalinux:8.8 RUN dnf clean all && \ dnf update --nogpgcheck -y && \ From 837df74a930017db34f19171bb280f8ec589b776 Mon Sep 17 00:00:00 2001 From: Devon Bautista Date: Fri, 21 Feb 2025 10:21:18 -0700 Subject: [PATCH 06/12] chore: add .gitignore to ignore __pycache__/ --- .gitignore | 1 + 1 file changed, 1 insertion(+) create mode 100644 .gitignore diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..61f2dc9 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +**/__pycache__/ From 1f0f25d9ed8436cd5ce164011fd88cc8936dd4d9 Mon Sep 17 00:00:00 2001 From: Devon Bautista Date: Fri, 21 Feb 2025 10:22:15 -0700 Subject: [PATCH 07/12] chore: remove execute permissions from *.py files --- src/arguments.py | 0 src/image_config.py | 0 src/installer.py | 0 src/layer.py | 0 src/publish.py | 0 src/s3.py | 0 src/utils.py | 0 7 files changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 src/arguments.py mode change 100755 => 100644 src/image_config.py mode change 100755 => 100644 src/installer.py mode change 100755 => 100644 src/layer.py mode change 100755 => 100644 src/publish.py mode change 100755 => 100644 src/s3.py mode change 100755 => 100644 src/utils.py diff --git a/src/arguments.py b/src/arguments.py old mode 100755 new mode 100644 diff --git a/src/image_config.py b/src/image_config.py old mode 100755 new mode 100644 diff --git a/src/installer.py b/src/installer.py old mode 100755 new mode 100644 diff --git a/src/layer.py b/src/layer.py old mode 100755 new mode 100644 diff --git a/src/publish.py b/src/publish.py old mode 100755 new mode 100644 diff --git a/src/s3.py b/src/s3.py old mode 100755 new mode 100644 diff --git a/src/utils.py b/src/utils.py old mode 100755 new mode 100644 From f7ee264e565fba9787f169af5836f1e34f7e3455 Mon Sep 17 00:00:00 2001 From: Devon Bautista Date: Fri, 21 Feb 2025 11:44:33 -0700 Subject: [PATCH 08/12] doc: correct example config; use actual repo URLs --- README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 86dd369..ec78962 100644 --- a/README.md +++ b/README.md @@ -55,13 +55,13 @@ An example config file: ``` repos: - alias: 'Rock_BaseOS' - url: 'http:///repo/pub/rocky/8/BaseOS/x86_64/os' + url: 'http://dl.rockylinux.org/pub/rocky/8/BaseOS/x86_64/os' - alias: 'Rock_AppStream' - url: 'http:///repo/pub/rocky/8/AppStream/x86_64/os' + url: 'http://dl.rockylinux.org/pub/rocky/8/AppStream/x86_64/os' - alias: 'Rock_PowerTools' - url: 'http:///repo/pub/rocky/8/PowerTools/x86_64/os' + url: 'http://dl.rockylinux.org/pub/rocky/8/PowerTools/x86_64/os' - alias: 'Epel' - url: 'http:///repo/pub/rocky/epel/8/Everything/x86_64/' + url: 'http://dl.fedoraproject.org/pub/epel/8/Everything/x86_64/' package_groups: - 'Minimal Install' @@ -72,7 +72,7 @@ packages: - wget cmds: - - 'echo hello' + - cmd: 'echo hello' ``` Then you can use this config file to build an "base" layer: From 71f73106eb6663c3fe427f3757f7cbe1811de559 Mon Sep 17 00:00:00 2001 From: Devon Bautista Date: Fri, 21 Feb 2025 13:21:54 -0700 Subject: [PATCH 09/12] doc: elaborate on publishing mechanisms --- README.md | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index ec78962..3ee3a5c 100644 --- a/README.md +++ b/README.md @@ -21,7 +21,7 @@ podman run \ image-build --config config.yaml ``` -If the config.yaml pushes to S3, specify the credentials by adding `-e S3_ACCESS=` and `-e S3_SECRET=` to the command above. +If the config.yaml pushes to S3, specify the credentials by adding `-e S3_ACCESS=` and `-e S3_SECRET=` to the command above. See [S3](#s3) below. ## Bare Metal @@ -87,6 +87,8 @@ image-build --name base-os \ You can then build on top of this base os with a new config file, just point the `--parent` flag at the base os container image +See [Publishing Images](#publishing-images) below for more explanation on how `image-build` publishes images. + ## Ansible Type Layer @@ -111,13 +113,16 @@ The `image-build` tool can publish the image layers to a few kinds of endpoints ## S3 -using the `--publish-s3 ` option will push to an s3 endpoint defined in an ENV variable: `S3_URL`. -You can also set the access and secret values with `S3_ACCESS` and `S3_SECRET` respectively +Using the `--publish-s3 ` flag or `publish-s3` config key will push to an S3 endpoint. + +Credentials for S3 can be set via environment variables. Use `S3_ACCESS` for the username and `S3_SECRET` for the password. ## Registry -Using the `--publish-registry ` option will push to a docker registry defined in an ENV variable: `REGISTRY_EP`. You can point to a certs directory by setting `REGISTRY_CERTS_DIR`. +Using the `--publish-registry ` flag or `publish-registry` config key will push to the passed registry base URL (not including image tag). Use `--registry-opts-push`/`registry-opts-push` to specify flags/args to pass to the `buildah push` command to push. + +There is an equivalent flag/config option `--registry-opts-pull`/`registry-opts-pull` whose value is passed to the `buildah push` command to pull the parent OCI image. ## Local -Using the `--publish-local` option will squash the layer and copy it to a destination defined in `--publish-dest`. +Using the `--publish-local` flag or `publish-local` config key will push the resulting OCI image to the local podman registry using `buildah commit`. From 211f9e5930c048af7771d96b37cd0c285cbe56f0 Mon Sep 17 00:00:00 2001 From: Devon Bautista Date: Fri, 21 Feb 2025 13:22:14 -0700 Subject: [PATCH 10/12] doc: elaborate on example config for base layer; use container to build --- README.md | 98 ++++++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 83 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index 3ee3a5c..d5c05dd 100644 --- a/README.md +++ b/README.md @@ -51,44 +51,112 @@ buildah bud -t ghcr.io/openchami/image-buildi:latest -f src/dockerfiles/Dockerfi The premise here is very simple. The `image-build` tool builds a base layer by starting a container, then using the provided package manager to install repos and packages. There is limited support for running basic commands inside the container. These settings are provided in a config file and command line options -An example config file: -``` +An example config file that builds a base OS image based on Rocky 8.10: + +```yaml +# Example image-build config for a base-type image. + +# Global image-build options for this image +options: + # Build a "normal" layer (as opposed to an Ansible-type layer) + layer_type: 'base' + + # Name and tag for this image, used in publishing to OCI registries + # and S3 for identification. + name: 'rocky-base' + publish_tags: '8.10' + + # Distribution flavor of image. + pkg_manager: 'dnf' + + # Starting filesystem of image. 'scratch' means to start with a blank + # filesystem. Currently, only OCI images can be used as parents. In + # this example, the image is pushed to: + # + # registry.mysite.tld/my-images/rocky-base:8.10 + # + # This value can be used as the value to 'parent' if one wished to use + # the 'rocky-base:8.10' image as a parent. + parent: 'scratch' + + # Publish OCI image to local podman registry. Note that if running + # the image-build container, this option will not be a benefit if + # the container is removed after running, since the container gets + # deleted after the build process exits. + #publish_local: true + + # Publish OCI image to container registry. This image can be used + # as a parent for child images. Use this when this image should + # be used as a parent for subsequent images. + # + # The below config, combined with 'name' and 'publish_tags', will + # publish this OCI image to: + # + # registry.mysite.tld/my-images/rocky-base:8.10 + # + publish_registry: 'registry.mysite.tld/my-images' + registry_opts_push: + - '--tls-verify=false' + + # Publish to S3 instance. This image be used for booting. Use this + # if an image is to be used for booting. + # + # The below config, combined with 'name' and 'publish_tags', will + # publish this SquashFS image to: + # + # http://s3.mysite.tld/boot-images/compute/base/rocky8.10-rocky-base-8.10 + # + publish_s3: 'http://s3.mysite.tld' + s3_prefix: 'compute/base/' + s3_bucket: 'boot-images' + +# Package repositories to add. This example uses YUM/DNF repositories. repos: - - alias: 'Rock_BaseOS' + - alias: 'rocky-baseos' url: 'http://dl.rockylinux.org/pub/rocky/8/BaseOS/x86_64/os' - - alias: 'Rock_AppStream' + - alias: 'rock_appstream' url: 'http://dl.rockylinux.org/pub/rocky/8/AppStream/x86_64/os' - - alias: 'Rock_PowerTools' + - alias: 'rock_powertools' url: 'http://dl.rockylinux.org/pub/rocky/8/PowerTools/x86_64/os' - - alias: 'Epel' + - alias: 'epel' url: 'http://dl.fedoraproject.org/pub/epel/8/Everything/x86_64/' +# Package groups to install, in this example YUM/DNF package groups. package_groups: - 'Minimal Install' - 'Development Tools' +# List of packages to install after repos get added. These names get passed +# straight to the package manager. packages: - kernel - wget +# List of commands to run after package management steps get run. Each +# command gets passed to the shell, so redirection can be used. Besides +# 'cmd', an optional 'loglevel` can be passed (e.g. 'INFO', 'DEBUG') to +# control command verbosity. By default, it is 'INFO'. cmds: - cmd: 'echo hello' ``` -Then you can use this config file to build an "base" layer: +Then you can use this config file to build a "base" layer (make sure the `S3_ACCESS` and `S3_SECRET` environment variables are set to the S3 credentials if being used): + ``` -image-build --name base-os \ - --config base.yaml \ - --pkg-manager dnf \ - --parent scratch \ - --publish-tags 8.8 \ - --layer-type base +podman run \ + --rm \ + --device /dev/fuse \ + -v /path/to/config.yaml:/home/builder/config.yaml:Z \ + -e "S3_ACCESS=${S3_ACCESS}" \ + -e "S3_SECRET=${S3_SECRET}" \ + ghcr.io/openchami/image-build \ + image-build --config config.yaml --log-level DEBUG ``` -You can then build on top of this base os with a new config file, just point the `--parent` flag at the base os container image - See [Publishing Images](#publishing-images) below for more explanation on how `image-build` publishes images. +You can then build on top of this base os with a new config file, just point the `parent` key at the base os container image, in the above example, `registry.mysite.tld/my-images/rocky-base:8.10`. + ## Ansible Type Layer From 311b34d820a2943eaf3943e93c652c1f13660006 Mon Sep 17 00:00:00 2001 From: Devon Bautista Date: Fri, 21 Feb 2025 14:09:57 -0700 Subject: [PATCH 11/12] docs: remove bare-metal support; explicitly use Podman --- README.md | 22 ++-------------------- 1 file changed, 2 insertions(+), 20 deletions(-) diff --git a/README.md b/README.md index d5c05dd..47d2cfe 100644 --- a/README.md +++ b/README.md @@ -5,11 +5,8 @@ There are two supported modes at the moment, a "base" type layer and an "ansible # Running -The recommended way to run `image-build` is through the container as it avoids any Python dependency troubles. +The recommended and official way to run `image-build` is using the `ghcr.io/openchami/image-build` container (specifically using [Podman](https://podman.io)) as it avoids Python versioning/dependency troubles. Running bare-metal is not officially supported, though it is possible to do at one's own risk. Using Docker has caused issues and is not officially supported, though it is probably possible (again, at one's own risk) with some tweaking. -## Container - -The supported way for running the container is via [Podman](https://podman.io/). To build an image using the container, the config file needs to be mapped into the container, as well as the FUSE filesystem device: ``` @@ -23,26 +20,11 @@ podman run \ If the config.yaml pushes to S3, specify the credentials by adding `-e S3_ACCESS=` and `-e S3_SECRET=` to the command above. See [S3](#s3) below. -## Bare Metal - -> [!WARNING] -> Python >= 3.7 is required! - -Install the Python package dependencies: -``` -pip install -r requirements.txt -``` - -Run the tool: -``` -image-build --config /path/to/config.yaml -``` - # Building Container From the root of the repository: ``` -buildah bud -t ghcr.io/openchami/image-buildi:latest -f src/dockerfiles/Dockerfile . +buildah bud -t ghcr.io/openchami/image-build:latest -f src/dockerfiles/Dockerfile . ``` # Configuration From c8811c0f658d6aaf3c982fef93b4753db66dac43 Mon Sep 17 00:00:00 2001 From: Devon Bautista Date: Fri, 21 Feb 2025 14:50:52 -0700 Subject: [PATCH 12/12] docs: add config example and podman command for Ansible layer --- README.md | 72 +++++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 57 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index 47d2cfe..50ede2f 100644 --- a/README.md +++ b/README.md @@ -46,6 +46,8 @@ options: # Name and tag for this image, used in publishing to OCI registries # and S3 for identification. name: 'rocky-base' + # One or more tags to publish image with. If one, value is a string. + # If multiple, the value is a YAML array of strings. publish_tags: '8.10' # Distribution flavor of image. @@ -55,7 +57,7 @@ options: # filesystem. Currently, only OCI images can be used as parents. In # this example, the image is pushed to: # - # registry.mysite.tld/my-images/rocky-base:8.10 + # registry.mysite.tld/openchami/rocky-base:8.10 # # This value can be used as the value to 'parent' if one wished to use # the 'rocky-base:8.10' image as a parent. @@ -74,9 +76,9 @@ options: # The below config, combined with 'name' and 'publish_tags', will # publish this OCI image to: # - # registry.mysite.tld/my-images/rocky-base:8.10 + # registry.mysite.tld/openchami/rocky-base:8.10 # - publish_registry: 'registry.mysite.tld/my-images' + publish_registry: 'registry.mysite.tld/openchami' registry_opts_push: - '--tls-verify=false' @@ -137,25 +139,65 @@ podman run \ See [Publishing Images](#publishing-images) below for more explanation on how `image-build` publishes images. -You can then build on top of this base os with a new config file, just point the `parent` key at the base os container image, in the above example, `registry.mysite.tld/my-images/rocky-base:8.10`. +You can then build on top of this base os with a new config file, just point the `parent` key at the base os container image, in the above example, `registry.mysite.tld/openchami/rocky-base:8.10`. ## Ansible Type Layer -You can also run an ansible playbook against a buildah container. This type using the Buildah connection plugin in ansible to treat the container as a host. -``` -image-build \ - --name ansible-layer \ - --parent base-os \ - --groups compute \ - --pb playbook.yaml \ - --inventory my_inventory/ \ - --publish-tags v1 \ - --layer-type ansible +You can also run an Ansible playbook against a buildah container. This type of layer uses the Buildah connection plugin in Ansible to treat the container as a host. + +Configuration for an Ansible-type layer is largely the same as a base-type layer configuration with a few differences. + +```yaml +# An Ansible-type layer only needs the global options block. +options: + # Layer type us 'ansible' instead of 'base' + layer_type: 'ansible' + + # Ansible-specific options. + # + # 'groups' defines the Ansible groups in the passed inventory to run the + # playbook(s) on. + groups: + - 'img_ochami_compute' + - 'img_ochami' + # + # The playbook(s) to run against the image. + playbooks: 'playbooks/images/compute.yaml' + # + # The Ansible inventory to pass corresponding with the playbook(s). + inventory: 'inventory/' + + # Everything else is the same format as base layer. + name: 'ansible-layer' + publish_tags: '8.10' + parent: 'registry.mysite.tld/openchami/rocky-base:8.10' + publish_registry: 'registry.mysite.tld/openchami' + registry_opts_push: + - '--tls-verify=false' + publish_s3: 'http://s3.mysite.tld' + s3_prefix: 'compute/ansible/' + s3_bucket: 'boot-images' ``` -This requires the parent to be setup to run ansible tasks +Build the image with: + +``` +podman run \ + --rm \ + --device /dev/fuse \ + -v /path/to/config.yaml:/home/builder/config.yaml:Z \ + -v /path/to/ansible/inventory/:/home/builder/inventory/:Z \ + -v /path/to/ansible/playbooks/:/home/builder/playbooks/:Z \ + -e "S3_ACCESS=${S3_ACCESS}" \ + -e "S3_SECRET=${S3_SECRET}" \ + ghcr.io/openchami/image-build \ + image-build --config config.yaml --log-level DEBUG +``` +> [!NOTE] +> In order to be able to use Ansible on the image, the parent must be set up to +> use Ansible (e.g. Ansible must be installed, etc.). # Publishing Images