Skip to content

(Commit by workflow script) Update generated documentation. #566

(Commit by workflow script) Update generated documentation.

(Commit by workflow script) Update generated documentation. #566

Workflow file for this run

name: Flow-IPC pipeline
on:
push:
branches:
- main
pull_request:
branches:
- main
# To create the button that runs a workflow manually (has to be in `main` at least):
workflow_dispatch:
jobs:
# Impetus behind this is to set up at least one magic string used in 2+ places.
# Unfortunately simply using `env:` does not work, as jobs.<id>.if refuses to access the workflow-global `env.<id>`.
# Ridiculous. TODO: Revisit.
#
# Folding this into `setup` would have been fine, but as of this writing `setup` needs at least one constant
# from here, and getting the order of operations correct within that one job is non-trivial at best.
set-vars:
runs-on: ubuntu-latest
steps:
- name: Set variables/constants for subsequent use
run: |
# Set variables/constants for subsequent use.
outputs:
doc-commit-message: (Commit by workflow script) Update generated documentation.
# Impetus behind this is to gate whether the real jobs below actually need to run. Can of course also compute
# other things as needed.
setup:
needs: set-vars
runs-on: ubuntu-latest
steps:
- id: compute-proceed-else-not
name: Compute whether for main jobs to proceed or not
# For checking whether github.event.head_commit.message starts with needs.set-vars.outputs.doc-commit-message
# it is tempting to just use:
# '${{ github.event.head_commit.message }}' != '${{ needs.set-vars.outputs.doc-commit-message }}'*
# This works usually but is unsafe: If the head commit message contains, for example, a single-quote,
# then the shell syntax breaks down. One approach would be to use pipeline startsWith() before `run`,
# but apparently it cannot be done directly inside `run`; so it is a pain. Staying with shell scripting
# then we can just use here-doc syntax and a temp file, so if the commit message does not have the
# here-doc terminator token, then we're fine.
run: |
# Compute whether for main jobs to proceed or not.
TMP_MSG=/tmp/flow-ipc-pipeline-head-cmt-msg.txt
cat <<'FLOW_IPC_PIPELINE_HEAD_CMD_MSG_EOF' > $TMP_MSG
${{ github.event.head_commit.message }}
FLOW_IPC_PIPELINE_HEAD_CMD_MSG_EOF
if [ '${{ github.ref }}' != 'refs/heads/main' ] || \
[ '${{ github.event_name }}' != 'push' ] || \
! { head --lines=1 $TMP_MSG | grep -Fxq '${{ needs.set-vars.outputs.doc-commit-message }}'; }; then
echo 'proceed-else-not=true' >> $GITHUB_OUTPUT
else
echo 'proceed-else-not=false' >> $GITHUB_OUTPUT
echo 'The real jobs will not run: earlier `doc` job checked-in generated docs to `main`.'
echo 'That is not a source change and requires no actual pipeline to execute.'
fi
outputs:
proceed-else-not: ${{ steps.compute-proceed-else-not.outputs.proceed-else-not }}
build:
needs: [setup, set-vars]
if: |
needs.setup.outputs.proceed-else-not == 'true'
strategy:
fail-fast: false
matrix:
compiler:
- id: gcc-9
name: gcc
version: 9
c-path: /usr/bin/gcc-9
cpp-path: /usr/bin/g++-9
- id: gcc-10
name: gcc
version: 10
c-path: /usr/bin/gcc-10
cpp-path: /usr/bin/g++-10
- id: gcc-11
name: gcc
version: 11
c-path: /usr/bin/gcc-11
cpp-path: /usr/bin/g++-11
- id: gcc-13
name: gcc
version: 13
c-path: /usr/bin/gcc-13
cpp-path: /usr/bin/g++-13
- id: clang-13
name: clang
version: 13
c-path: /usr/bin/clang-13
cpp-path: /usr/bin/clang++-13
- id: clang-15
name: clang
version: 15
c-path: /usr/bin/clang-15
cpp-path: /usr/bin/clang++-15
- id: clang-16
name: clang
version: 16
c-path: /usr/bin/clang-16
cpp-path: /usr/bin/clang++-16
install: True
- id: clang-17
name: clang
version: 17
c-path: /usr/bin/clang-17
cpp-path: /usr/bin/clang++-17
install: True
build-test-cfg:
- id: debug
conan-profile-build-type: Debug
conan-profile-jemalloc-build-type: Debug
# In any case Debug, at the CMake script (in meta-project ./, and in flow/, ipc_*/) level,
# means LTO will be ignored (only *Rel* build-types enable LTO, if so instructed).
# Still keeping this here to make that clear to the reader/maintainer. Could remove it though.
no-lto: True
- id: release
conan-profile-build-type: Release
conan-profile-jemalloc-build-type: Release
# Leaving no-lto at default (false); full-on-optimized-no-debug is the quentessential LTO use case.
- id: relwithdebinfo
conan-profile-build-type: RelWithDebInfo
conan-profile-jemalloc-build-type: Release
# As of this writing RelWithDebInfo (in CMake, and Conan in our case at least doesn't override it)
# defaults to -O2 (not -O3), which isn't a super-effective way of deploying LTO. Plus
# we can use a test of non-LTO building.
# TODO: Perhaps this should be a separate matrix dimension (LTO on, LTO off). Number of configs will
# jump up, but it is more methodical and nice.
no-lto: True
- id: minsizerel
conan-profile-build-type: MinSizeRel
conan-profile-jemalloc-build-type: Release
# Leaving no-lto at default (false); -Os (size-optimizing) with LTO on is pretty realistic.
- id: relwithdebinfo-asan
conan-profile-build-type: RelWithDebInfo
conan-profile-jemalloc-build-type: Release
conan-profile-custom-conf: |
# no-omit-frame-pointer recommended in (A|UB|M)SAN docs for nice stack traces.
tools.build:cflags = ["-fsanitize=address", "-fno-omit-frame-pointer"]
tools.build:cxxflags = ["-fsanitize=address", "-fno-omit-frame-pointer"]
tools.build:sharedlinkflags = ["-fsanitize=address"]
tools.build:exelinkflags = ["-fsanitize=address"]
# jemalloc recipe needs this as of this writing (even though it dupes the stuff just-above conceptually).
conan-profile-custom-buildenv: |
CXXFLAGS = -fsanitize=address -fno-omit-frame-pointer
CFLAGS = -fsanitize=address -fno-omit-frame-pointer
LDFLAGS = -fsanitize=address
conan-profile-custom-settings: |
compiler.sanitizer = address
conan-custom-settings-defs: | # Could we not copy/paste these 4x?
data['compiler']['gcc']['sanitizer'] = ['None', 'address', 'thread', 'memory', 'undefined']
data['compiler']['clang']['sanitizer'] = ['None', 'address', 'thread', 'memory', 'undefined']
sanitizer-name: asan # Used as internal enum of sorts + name of sanitizer-related dirs.
# At least ASAN with clang + LTO => cryptic link error.
# Regardless regular RelWithDebInfo already has no-lto=true; so we would follow suit. Just be aware
# that changing it to false for whatever reason => ASAN probably breaks.
no-lto: True
- id: relwithdebinfo-ubsan
conan-profile-build-type: RelWithDebInfo
conan-profile-jemalloc-build-type: Release
conan-profile-custom-conf: |
tools.build:cflags = ["-fsanitize=undefined", "-fno-omit-frame-pointer"]
tools.build:cxxflags = ["-fsanitize=undefined", "-fno-omit-frame-pointer"]
tools.build:sharedlinkflags = ["-fsanitize=undefined"]
tools.build:exelinkflags = ["-fsanitize=undefined"]
conan-profile-custom-buildenv: |
CXXFLAGS = -fsanitize=undefined -fno-omit-frame-pointer
CFLAGS = -fsanitize=undefined -fno-omit-frame-pointer
LDFLAGS = -fsanitize=undefined
conan-profile-custom-settings: |
compiler.sanitizer = undefined
conan-custom-settings-defs: |
data['compiler']['gcc']['sanitizer'] = ['None', 'address', 'thread', 'memory', 'undefined']
data['compiler']['clang']['sanitizer'] = ['None', 'address', 'thread', 'memory', 'undefined']
sanitizer-name: ubsan
# While UBSAN might work with LTO, I do not want the aggravation/entropy. Turn it off.
# Plus inheriting from regular RelWithDebInfo anyway.
no-lto: True
- id: relwithdebinfo-tsan
conan-profile-build-type: RelWithDebInfo
conan-profile-jemalloc-build-type: Release
conan-profile-custom-conf: |
# no-omit-frame-pointer recommended in (A|UB|M)SAN docs for nice stack traces; TSAN docs do not
# mention it. Given the various symbolizer problems mentioned elsewhere in comments in this file,
# it seemed prudent to keep this consistent with those other *SAN.
tools.build:cflags = ["-fsanitize=thread", "-fno-omit-frame-pointer"]
tools.build:cxxflags = ["-fsanitize=thread", "-fno-omit-frame-pointer"]
tools.build:sharedlinkflags = ["-fsanitize=thread", "-fno-omit-frame-pointer"]
tools.build:exelinkflags = ["-fsanitize=thread", "-fno-omit-frame-pointer"]
conan-profile-custom-buildenv: |
CXXFLAGS = -fsanitize=thread -fno-omit-frame-pointer
CFLAGS = -fsanitize=thread -fno-omit-frame-pointer
LDFLAGS = -fsanitize=thread -fno-omit-frame-pointer
conan-profile-custom-settings: |
compiler.sanitizer = thread
conan-custom-settings-defs: |
data['compiler']['gcc']['sanitizer'] = ['None', 'address', 'thread', 'memory', 'undefined']
data['compiler']['clang']['sanitizer'] = ['None', 'address', 'thread', 'memory', 'undefined']
sanitizer-name: tsan
# While TSAN might work with LTO, I do not want the aggravation/entropy. Turn it off.
# Also, for some clangs, there are TSAN WARNINGs at times about too-small symbolizer buffer or something;
# throwing LTO into the mix seems like unnecessary entropy.
# Plus inheriting from regular RelWithDebInfo anyway.
no-lto: True
- id: relwithdebinfo-msan
conan-profile-build-type: RelWithDebInfo
conan-profile-jemalloc-build-type: Release
conan-profile-custom-conf: |
tools.build:cflags = ["-fsanitize=memory", "-fno-omit-frame-pointer", "-fsanitize-ignorelist=/tmp/msan_ignore_list.cfg"]
tools.build:cxxflags = ["-fsanitize=memory", "-fno-omit-frame-pointer", "-fsanitize-ignorelist=/tmp/msan_ignore_list.cfg"]
tools.build:sharedlinkflags = ["-fsanitize=memory"]
tools.build:exelinkflags = ["-fsanitize=memory"]
conan-profile-custom-buildenv: |
CXXFLAGS = -fsanitize=memory -fno-omit-frame-pointer -fsanitize-ignorelist=/tmp/msan_ignore_list.cfg
CFLAGS = -fsanitize=memory -fno-omit-frame-pointer -fsanitize-ignorelist=/tmp/msan_ignore_list.cfg
LDFLAGS = -fsanitize=memory
conan-profile-custom-settings: |
compiler.sanitizer = memory
conan-custom-settings-defs: |
data['compiler']['gcc']['sanitizer'] = ['None', 'address', 'thread', 'memory', 'undefined']
data['compiler']['clang']['sanitizer'] = ['None', 'address', 'thread', 'memory', 'undefined']
sanitizer-name: msan
# While MSAN might work with LTO, I do not want the aggravation/entropy. Turn it off.
# Plus inheriting from regular RelWithDebInfo anyway.
no-lto: True
# We concentrate on clang sanitizers; they are newer/nicer; also MSAN is clang-only. So gcc ones excluded.
# Attention! Excluding some sanitizer job(s) (with these reasons):
# - MSAN: MSAN protects against reads of ununitialized memory; it is clang-only (not gcc), unlike the other
# *SAN. Its mission overlaps at least partially with UBSAN's; for example for sure there were a couple of
# uninitialized reads in test code which UBSAN caught. Its current state -- if not excluded -- is as
# follows: 1, due to (as of this writing) building dependencies, including the capnp compiler binary used
# during our build process, with the same compiler build config as the real code, ignore-list entires had
# to be added to get past these problems. 2, before main() in *all* our demos/tests Boost was doing some
# global init which MSAN did not like and hence aborted before main(); these are now ignored as well.
# 3, this got us into main() at least, but immediately cryptic aborts started, seemingly again originating
# in Boost (but requires detailed investigation to really understand). At this point I (ygoldfel)
# disabled MSAN and filed a ticket. The overall status: MSAN is said to be useful, even with UBSAN active,
# but all resources including official docs indicate that it is a high-maintenance tool:
# *All* linked code, including libc and libstdc++/libc++ (the former in our case as of this writing),
# must be instrumented to avoid cryptic false positives. The good news is we do build other things
# instrumented (Boost libs, jemalloc, capnp/kj, gtest); but not libstdc++ (which official docs recommend);
# this can be done but requires more work (I would suggest switching to libc++ in that case from the
# start, as the clang people made it and themselves build it instrumented for their testing).
# So the bottom line: MSAN is a tough cookie and requires more work before enabling. In the meantime we
# have many layers of protection, including ASAN and UBSAN and lots of unit and integration tests.
# So this status quo is pretty good. TODO: Do the work/get MSAN functional/useful; un-exclude it then.
# - *SAN with gcc: We concentrate on clang sanitizers; they are newer/nicer; having to worry about differences
# between them is an excessive burden. So excluding gcc ASAN/UBSAN/TSAN.
# - TODO: Consider reducing to the newest or most stable clang. Generally they just keep improving; the
# chances of something being uncaught (incorrectly) with a later version are slim. Not impossible though.
# Look into it. Update: As of this writing transport_test exercise mode/SHM-jemalloc sub-mode is
# disabled for TSAN clang-17 due to instability of TSAN itself. So clearly when it comes to TSAN
# (which is officially in beta as of clang-17), higher version does not mean everything is better.
# Hence perhaps this to-do is more of a longer-term thing, when all the sanitizers stabilize, or when
# we find a single very-stable config. Just remember we aren't testing the sanitizer or our code's
# ability to be sanitized; we just want to detect any problems in the code -- however we get there.
# - *SAN with clang-13 (but not 15+): As of this writing clang-13 produces some additional warnings,
# at least in TSAN, which appear to be related to nearby non-race messages like
# `==75454==WARNING: Symbolizer buffer too small`. As a result, we either have to eliminate the
# latter problem (TODO: look into it) or suppress false-positive race warnings that might only look
# like added problems due to the incomplete stack traces. In general scanning through clang-13-produced
# TSAN output, there is a chaotic feel to it, when it comes to stack output. Meanwhile so far
# there has been zero evidence that a lower-version clang has actual added problems versus the other
# compiler-versions, due to generating code differently. Since we have at least 3 other compiler-versions
# running all the *SAN, I (ygoldfel) decided to exclude clang-13 *SAN, until the chaotic *SAN messages can
# be at least reduced. In the meantime our *SAN coverage is still quite good.
exclude:
- build-test-cfg: { id: relwithdebinfo-msan }
- compiler: { id: gcc-9 }
build-test-cfg: { id: relwithdebinfo-asan }
- compiler: { id: gcc-10 }
build-test-cfg: { id: relwithdebinfo-asan }
- compiler: { id: gcc-11 }
build-test-cfg: { id: relwithdebinfo-asan }
- compiler: { id: gcc-13 }
build-test-cfg: { id: relwithdebinfo-asan }
- compiler: { id: clang-13 }
build-test-cfg: { id: relwithdebinfo-asan }
- compiler: { id: gcc-9 }
build-test-cfg: { id: relwithdebinfo-ubsan }
- compiler: { id: gcc-10 }
build-test-cfg: { id: relwithdebinfo-ubsan }
- compiler: { id: gcc-11 }
build-test-cfg: { id: relwithdebinfo-ubsan }
- compiler: { id: gcc-13 }
build-test-cfg: { id: relwithdebinfo-ubsan }
- compiler: { id: clang-13 }
build-test-cfg: { id: relwithdebinfo-ubsan }
- compiler: { id: gcc-9 }
build-test-cfg: { id: relwithdebinfo-tsan }
- compiler: { id: gcc-10 }
build-test-cfg: { id: relwithdebinfo-tsan }
- compiler: { id: gcc-11 }
build-test-cfg: { id: relwithdebinfo-tsan }
- compiler: { id: gcc-13 }
build-test-cfg: { id: relwithdebinfo-tsan }
- compiler: { id: clang-13 }
build-test-cfg: { id: relwithdebinfo-tsan }
# Not using ubuntu-latest, so as to avoid surprises with OS upgrades and such.
runs-on: ubuntu-22.04
name: ${{ matrix.compiler.id }}-${{ matrix.build-test-cfg.id }}
env:
build-dir: ${{ github.workspace }}/build/${{ matrix.build-test-cfg.conan-profile-build-type }}
install-root-dir: ${{ github.workspace }}/install/${{ matrix.build-test-cfg.conan-profile-build-type }}
# (Unfortunately cannot refer to earlier-assigned `env.` entries within subsequent ones.)
install-dir: ${{ github.workspace }}/install/${{ matrix.build-test-cfg.conan-profile-build-type }}/usr/local
# Target file, as read by sanitized executable being invoked.
san-suppress-cfg-file: ${{ github.workspace }}/install/${{ matrix.build-test-cfg.conan-profile-build-type }}/usr/local/bin/san_suppressions.cfg
# Relative path (including file name) to suppressions file in a given context (which is given as dir name off
# which this path works). Possible contexts as of this writing: ${{ github.workspace }}/<module>/src
# (suppressions endemic to lib<module>); ${{ github.workspace }}/.../<test's source code dir>
# (suppressions endemic to that test specifically). This suppressions file holds the compiler-version-independent
# entries. (Malformed and ignored if matrix.build-test-cfg is not `*san`, meaning not sanitizer-enabled.)
san-suppress-cfg-in-file1: sanitize/${{ matrix.build-test-cfg.sanitizer-name }}/suppressions_${{ matrix.compiler.name }}.cfg
# Same but contains compiler version-specific entries (on top of those in san-suppress-cfg-file).
san-suppress-cfg-in-file2: sanitize/${{ matrix.build-test-cfg.sanitizer-name }}/suppressions_${{ matrix.compiler.name }}_${{ matrix.compiler.version }}.cfg
# Run-time controls for various sanitizers. Invoke before running test but *after* assembling suppressions
# file ${{ env.san-suppress-cfg-file}} if any (clear it if needed, as it might be left-over from a previous
# test). The proper technique is: 1, which of the suppression contexts (see above) are relevant?
# (Safest is to specify all contexts; as you'll see just below, it's fine if there are no files in a given
# context. However it would make code tedious to specify that way everywhere; so it's fine to skip contexts where
# we know that these days there are no suppresisons.) Let the contexts' dirs be $DIR_A, $DIR_B, .... Then:
# 2, `{ cat $DIR_A/${{ env.san-suppress-cfg-in-file1 }} $DIR_A/${{ env.san-suppress-cfg-in-file2 }} \
# $DIR_B/${{ env.san-suppress-cfg-in-file1 }} $DIR_B/${{ env.san-suppress-cfg-in-file2 }} \
# ... \
# > ${{ env.san-suppress-cfg-file }} 2> /dev/null; } || true`
# 3, {{ env.setup-tests-env }}.
#
# Notes about items in *SAN_OPTIONS:
#
# Unclear if we need disable_coredump=0; it might be a gcc thing due to historic
# issues with core size with ASAN; it is not documented in the clang *SAN docs for any
# sanitizers; but it is accepted for ASAN and TSAN, seemingly, and maybe is at worst
# harmless with out compiler versions. TODO: Maybe revisit.
#
# print_stacktrace=1 for UBSAN is recommended by documentation for nice stack traces
# (also that is why we apply sanitizers to RelWithDebInfo; DebInfo part for the nice
# stack traces/etc.; Rel part to reduce the considerable slowdown from some of the
# sanitizers).
#
# second_deadlock_stack=1 for TSAN: TODO: Explain. Don't see it in clang TSAN docs.
#
# Caution! Setting multiple *SAN_OPTIONS (while invoking only 1 sanitizer, as we do)
# seems like it would be harmless and avoid the `if/elif`s... but is actually a
# nightmare; there is some interaction in clang (at least 13-17) which causes the
# wrong sanitizer reading suppressions from another one => suppression parse error =>
# none of the demos/tests get anywhere at all. So set just the right one!
setup-tests-env: |
if [ '${{ matrix.build-test-cfg.sanitizer-name }}' = asan ]; then
export ASAN_OPTIONS='disable_coredump=0'
echo "ASAN_OPTIONS = [$ASAN_OPTIONS]."
elif [ '${{ matrix.build-test-cfg.sanitizer-name }}' = ubsan ]; then
export SAN_SUPP=1
export SAN_SUPP_CFG=${{ github.workspace }}/install/${{ matrix.build-test-cfg.conan-profile-build-type }}/usr/local/bin/san_suppressions.cfg
export UBSAN_OPTIONS="disable_coredump=0 print_stacktrace=1 suppressions=$SAN_SUPP_CFG"
echo "UBSAN_OPTIONS = [$UBSAN_OPTIONS]."
elif [ '${{ matrix.build-test-cfg.sanitizer-name }}' = tsan ]; then
export SAN_SUPP=1
export SAN_SUPP_CFG=${{ github.workspace }}/install/${{ matrix.build-test-cfg.conan-profile-build-type }}/usr/local/bin/san_suppressions.cfg
export TSAN_OPTIONS="disable_coredump=0 second_deadlock_stack=1 suppressions=$SAN_SUPP_CFG"
echo "TSAN_OPTIONS = [$TSAN_OPTIONS]."
fi
if [ "$SAN_SUPP" != '' ]; then
echo 'Sanitizer [${{ matrix.build-test-cfg.sanitizer-name }}] suppressions cfg contents:'
echo '[[[ file--'
cat $SAN_SUPP_CFG
echo '--file ]]]'
fi
# TODO: Ideally would go, like other things, under github.workspace somewhere (maybe build/),
# but that var isn't available in `strategy` up above, where we specify the compiler option
# pointing to this file. Surely we could cook something up; but meanwhile /tmp works fine.
msan-ignore-list-cfg-file: /tmp/msan_ignore_list.cfg
steps:
- name: Update available software list for apt-get
run: |
# Update available software list for apt-get.
lsb_release -a
sudo apt-get update
- name: Checkout `ipc` repository and submodules (`flow`, `ipc_*`)
uses: actions/checkout@v4
with:
submodules: true
- name: Install clang compiler
if: |
matrix.compiler.install && (matrix.compiler.name == 'clang')
run: |
# Install clang compiler.
wget https://apt.llvm.org/llvm.sh
chmod u+x llvm.sh
sudo ./llvm.sh ${{ matrix.compiler.version }}
- name: Install gcc compiler
if: |
matrix.compiler.install && (matrix.compiler.name == 'gcc')
run: |
# Install gcc compiler.
sudo apt-get install -y software-properties-common
sudo add-apt-repository ppa:ubuntu-toolchain-r/test -y
sudo apt-get update
sudo apt-get install -y gcc-${{ matrix.compiler.version }} g++-${{ matrix.compiler.version }}
# We get highest 1.x, instead of 2+, because there are certain dependency recipe issues (namely for jemalloc)
# with 2+. TODO: 1, expound more clearly here or elsewhere what those issues are; 2, solve those issues and
# thus move to Conan 2+; Conan 1.x is officially on "deprecation path." This will also help resolve some other
# to-dos more easily (targeting different build settings at built product source versus tools used to build it
# = prime example).
- name: Install the latest version of Conan which is less than 2
run: pip install 'conan<2'
- name: Add custom settings for Conan packages
if: |
matrix.build-test-cfg.conan-custom-settings-defs
run: |
# Add custom settings for Conan packages.
conan config init
pip install PyYAML
CONAN_SETTINGS_PATH=$(conan config home)/settings.yml
python -c "
import yaml
with open('$CONAN_SETTINGS_PATH', 'r') as file:
data = yaml.safe_load(file)
${{ matrix.build-test-cfg.conan-custom-settings-defs }}
with open('$CONAN_SETTINGS_PATH', 'w') as file:
yaml.dump(data, file)
"
# Important info/somewhat important TODO: The C[XX]FLAGS and linker-flags are supplied via
# ${{ matrix.build-test-cfg.conan-profile-custom-conf }} and
# ${{ matrix.build-test-cfg.conan-profile-custom-buildenv }}. These affect not just our objects/libs/executables;
# but also at least: 3rd party libs (jemalloc, capnp/kj, boost, gtest), 3rd party executables
# (capnp compiler binary; temp binaries created by auto-tools configure scripts to test features).
# In the case of the libs that's usually good; in particular things like
# -fsanitize=address (ASAN) (for build-type relwithdebinfo-asan) ideally are applied to all built code
# uniformly. (There are other libs being built, one can see: at least openssl and zlib; they're not really
# involved in our actual product, at least not in a core way, so for those it arguably matters less either way.)
# In the case of the binaries it is usually bad; we want the fastest, most normal tools possible, not ones
# built weirdly with whatever settings we're trying to test with our own software. For one it makes those
# tools slower -- perhaps not the hugest deal in practice here -- but it also generally increases entropy; and
# it has also resulted in various pain:
# - Cannot use -fno-sanitize-recover with UBSAN (abort program on warning) if we wanted to: it causes some
# hidden configure-script-generated binary abort => capnp binary build fails.
# - capnp binary fails MSAN at startup; hence capnp-compilation of our .capnp schemas fails.
# We have worked around all these, so the thing altogether works. It's just somewhat odd and entropy-ridden;
# and might cause maintanability problems over time, as it has already in the past.
# The TODO is to be more judicious about it
# and only apply these things to the libs/executables we want it applied. It is probably not so simple;
# but worst-case it should be possible to use something like build-type-cflags-override to target our code;
# and per-recipe (Boost, jemalloc, gtest...) techniques to target others; and not use the aggressive
# conan-profile-custom-conf and conan-profile-custom-buildenv. That said, there will be interesting subtleties
# like: libcapnp/kj are linked into capnp compiler binary; *and* to our code. So we should instrument it with
# sanitizer (when applicable); now some of the capnp-compiler-binary is instrumented; some isn't. Would that
# work, or would it be better to build capnp twice then (once for the binary, once for the linked libraries)?
- name: Create Conan profile
run: |
# Create Conan profile.
cat <<'EOF' > conan_profile
[settings]
compiler = ${{ matrix.compiler.name }}
compiler.version = ${{ matrix.compiler.version }}
compiler.cppstd = 17
# TODO: Consider testing with LLVM-libc++ also (with clang anyway).
compiler.libcxx = libstdc++11
arch = x86_64
os = Linux
jemalloc:build_type = ${{ matrix.build-test-cfg.conan-profile-jemalloc-build-type }}
build_type = ${{ matrix.build-test-cfg.conan-profile-build-type }}
${{ matrix.build-test-cfg.conan-profile-custom-settings }}
[conf]
tools.env.virtualenv:auto_use = True
tools.build:compiler_executables = {"c": "${{ matrix.compiler.c-path }}", "cpp": "${{ matrix.compiler.cpp-path }}"}
${{ matrix.build-test-cfg.conan-profile-custom-conf }}
[buildenv]
CC = ${{ matrix.compiler.c-path }}
CXX = ${{ matrix.compiler.cpp-path }}
${{ matrix.build-test-cfg.conan-profile-custom-buildenv }}
[options]
flow:doc = False
flow:build = True
ipc:doc = False
ipc:build = True
EOF
# (Oddly enough `no-lto: True` and `...: False` do still evaluate as `true` and `false` respectively.
# Hence why this compares against `false` and not `False`....)
if [ '${{ matrix.build-test-cfg.no-lto }}' != '' ] && [ '${{ matrix.build-test-cfg.no-lto }}' != 'false' ]; then
echo 'ipc:build_no_lto = True' >> conan_profile
fi
if [ '${{ matrix.build-test-cfg.build-type-cflags-override }}' != '' ]; then
echo 'ipc:build_type_cflags_override = ${{ matrix.build-test-cfg.build-type-cflags-override }}' >> conan_profile
fi
# We need to prepare a sanitizer ignore-list in MSAN mode. Background for this is subtle and annoying:
# As it stands, whatever matrix compiler/build-type is chosen applies not just to our code (correct)
# and 3rd party libraries we link like lib{boost_*|capnp|kj|jemalloc} (semi-optional but good) but also
# unfortunately any items built from source during "Install Flow-IPC dependencies" step that we then
# use during during the build step for our own code subsequently. At a minimum this will slow down
# such programs. (For the time being we accept this as not-so-bad; to target this config at some things
# but not others is hard/a ticket.) In particular, though, the capnp compiler binary is built this way;
# and our "Build targets" step uses it to build key things (namely convert .capnp schemas into .c++
# and .h sources which are then themselves compiled/used in compilation). In the case of MSAN, this
# version of capnp compiler happens to trigger several MSAN failures (presumably they are not a true
# problem, and it's not our job really to sanitize capnp -- though we can file tickets for them and/or
# issue PRs; but I digress). So in MSAN mode our build step fails, when capnp compiler itself aborts
# with MSAN failures. One approach is to not apply MSAN to capnp compiler (no-go for now; it's a ticket
# as mentioned); another is to uses Conan to patch capnp package for them (not a bad idea; ticket filed);
# and lastly we can put the specific failures on the ignore-list for MSAN. For now we do that; while
# unpleasant it does get the job done. TODO: Revisit/resolve tickets/improve (see above).
- name: Prepare MSAN sanitizer compile-time config file(s)
if: |
(!cancelled()) && (matrix.build-test-cfg.sanitizer-name == 'msan')
run: |
# Prepare MSAN sanitizer compile-time config file(s).
cat <<'EOF' > $${ env.msan-ignore-list-cfg-file }}
[memory]
# Warning: In clang-18 there are breaking changes in how globs/regexes are interpreted. See docs.
# Currently assuming clang-17 or lower.
#
# capnp compiler MSAN failures suppressed:
src:*/kj/filesystem-disk-unix.*
src:*/bits/stl_tree.h
EOF
# Append to that the suppressed items from the source tree. These apply to the real code being
# built below, so just stylistically it is nicer to keep them there along with other
# sanitizers' suppressions. (Note, though, that MSAN does not have a run-time suppression
# system; only these ignore-lists. The others do also have ignore-lists though.
# The format is totally different between the 2 types of suppression.)
# Our MSAN support is budding compared to UBSAN/ASAN/TSAN; so just specify the one ingore-list file
# we have now. TODO: If/when MSAN support gets filled out like the others', then use a context system
# a-la env.setup-tests-env.
cat ${{ github.workspace }}/flow/src/sanitize/msan/ignore_list_${{ matrix.compiler.name }}.cfg \
>> $${ env.msan-ignore-list-cfg-file }}
echo 'The combined MSAN ignore-list config file follows:'
cat $${ env.msan-ignore-list-cfg-file }}
- name: Install Flow-IPC dependencies with Conan using the profile
run: |
# Install Flow-IPC dependencies with Conan using the profile.
conan editable add flow flow/1.0
conan install . \
--profile:build conan_profile --profile:host conan_profile --build missing
- name: Build libraries and demos/tests with Conan
run: conan build .
- name: Install built targets with Makefile
run: |
make install \
--directory ${{ env.build-dir }} DESTDIR=${{ env.install-root-dir }}
# Save runner space: blow away build dir after install.
rm -rf ${{ env.build-dir }}
# From now on use !cancelled() to try to run any test/demo that exists regardless
# of preceding failures if any. Same-ish (always()) with the log-upload at the end.
# Worst-case they'll all fail in super-basic ways and immediately; no extra harm done.
# From now on save logs in install-dir/bin/logs. They will be tarred up and uploaded
# as artifacts at the end. For those tests below that produce separate logs as files
# to begin-with, this is a no-brainer. For those (as of this writing the main one
# is unit_test; but also the link tests produce short logs too) who use stdout/stderr
# the reason to do this is 2-fold.
# - UBSAN in particular produces non-fatal error output about problems it detects.
# We need to analyze them after the fact and fail a step below in that case to
# alert developers who would then fix such new problems.
# - One can also build UBSAN-mode things with -fno-sanitize-recover which would
# cause abnormal program exit on *first* error. We do not do this for these
# reasons:
# - It is more convenient to let it continue and thus show all problems in one
# shot.
# - There is a side problem: At this time compile settings, including
# in UBSAN case `-fsanitize=undefined -fno-sanitize-recover`, are applied not
# just to our code or 3rd party libraries but also any other stuff built
# (due to setting C[XX]FLAGS in Conan profile). Targeting just the exact
# stuff we want with those is hard and a separate project/ticket.
# In the meantime -fno-sanitize-recover causes completely unrealted program
# halts during the very-early step, when building dependencies including
# capnp; some autotools configure.sh fails crazily, and nothing can work
# from that point on due to dependencies-install step failing. So at this
# time -fno-sanitize-recover is a no-go; it affects too much unrelated stuff.
# - It is more consistent/convenient to get all the bulky logs in one place as an
# artifact, rather than some in the pipeline output, others in the artifacts.
# (This preference is subjective, yes.)
# Here, as in all other tests below, we assemble a suppressions file in case this is a sanitized
# run; and we follow the procedure explained near setup-tests-env definition. To reiterate: to avoid
# tedium, but at the cost of mantainability of this file (meaning if a suppressions context is added then
# a few lines would need to be added here), we only list those contexts where *any* sanitizer has
# *any* suppression; otherwise we skip it for brevity. `find . -name 'suppressions*.cfg` is pretty useful
# to determine their presence in addition to whether the test itself has its specific suppressions of any kind.
- name: Run link test [`ipc_core` - Flow-IPC Core]
if: |
!cancelled()
run: |
# Run link test [`ipc_core` - Flow-IPC Core].
cd ${{ env.install-dir }}/bin
mkdir -p logs/ipc_core_link_test
SUPP_DIR_A=${{ github.workspace }}/flow/src
{ cat $SUPP_DIR_A/${{ env.san-suppress-cfg-in-file1 }} $SUPP_DIR_A/${{ env.san-suppress-cfg-in-file2 }} \
> ${{ env.san-suppress-cfg-file }} 2> /dev/null; } || true
${{ env.setup-tests-env }}
./ipc_core_link_test.exec > logs/ipc_core_link_test/console.log 2>&1
- name: Run link test [`ipc_transport_structured` - Flow-IPC Structured Transport]
if: |
!cancelled()
run: |
# Run link test [`ipc_transport_structured` - Flow-IPC Structured Transport].
cd ${{ env.install-dir }}/bin
mkdir -p logs/ipc_transport_structured_link_test
SUPP_DIR_A=${{ github.workspace }}/flow/src
{ cat $SUPP_DIR_A/${{ env.san-suppress-cfg-in-file1 }} $SUPP_DIR_A/${{ env.san-suppress-cfg-in-file2 }} \
> ${{ env.san-suppress-cfg-file }} 2> /dev/null; } || true
${{ env.setup-tests-env }}
./ipc_transport_structured_link_test.exec > logs/ipc_transport_structured_link_test/console.log 2>&1
- name: Run link test [`ipc_session` - Flow-IPC Sessions]
if: |
!cancelled()
run: |
# Run link test [`ipc_session` - Flow-IPC Sessions].
cd ${{ env.install-dir }}/bin
mkdir -p logs/ipc_session_link_test
SUPP_DIR_A=${{ github.workspace }}/flow/src
SUPP_DIR_B=${{ github.workspace }}/ipc_session/src
{ cat $SUPP_DIR_A/${{ env.san-suppress-cfg-in-file1 }} $SUPP_DIR_A/${{ env.san-suppress-cfg-in-file2 }} \
$SUPP_DIR_B/${{ env.san-suppress-cfg-in-file1 }} $SUPP_DIR_B/${{ env.san-suppress-cfg-in-file2 }} \
> ${{ env.san-suppress-cfg-file }} 2> /dev/null; } || true
${{ env.setup-tests-env }}
./ipc_session_link_test_srv.exec > logs/ipc_session_link_test/srv.console.log 2>&1 &
sleep 1
./ipc_session_link_test_cli.exec > logs/ipc_session_link_test/cli.console.log 2>&1
- name: Run link test [`ipc_shm` - Flow-IPC Shared Memory]
if: |
!cancelled()
run: |
# Run link test [`ipc_shm` - Flow-IPC Shared Memory].
cd ${{ env.install-dir }}/bin
mkdir -p logs/ipc_shm_link_test
SUPP_DIR_A=${{ github.workspace }}/flow/src
SUPP_DIR_B=${{ github.workspace }}/ipc_session/src
{ cat $SUPP_DIR_A/${{ env.san-suppress-cfg-in-file1 }} $SUPP_DIR_A/${{ env.san-suppress-cfg-in-file2 }} \
$SUPP_DIR_B/${{ env.san-suppress-cfg-in-file1 }} $SUPP_DIR_B/${{ env.san-suppress-cfg-in-file2 }} \
> ${{ env.san-suppress-cfg-file }} 2> /dev/null; } || true
${{ env.setup-tests-env }}
./ipc_shm_link_test_srv.exec > logs/ipc_shm_link_test/srv.console.log 2>&1 &
sleep 1
./ipc_shm_link_test_cli.exec > logs/ipc_shm_link_test/cli.console.log 2>&1
- name: Run link test [`ipc_shm_arena_lend` - Flow-IPC SHM-jemalloc]
if: |
!cancelled()
run: |
# Run link test [`ipc_shm_arena_lend` - Flow-IPC SHM-jemalloc].
cd ${{ env.install-dir }}/bin
mkdir -p logs/ipc_shm_arena_lend_link_test
SUPP_DIR_A=${{ github.workspace }}/flow/src
SUPP_DIR_B=${{ github.workspace }}/ipc_session/src
SUPP_DIR_C=${{ github.workspace }}/ipc_shm_arena_lend/src
{ cat $SUPP_DIR_A/${{ env.san-suppress-cfg-in-file1 }} $SUPP_DIR_A/${{ env.san-suppress-cfg-in-file2 }} \
$SUPP_DIR_B/${{ env.san-suppress-cfg-in-file1 }} $SUPP_DIR_B/${{ env.san-suppress-cfg-in-file2 }} \
$SUPP_DIR_C/${{ env.san-suppress-cfg-in-file1 }} $SUPP_DIR_C/${{ env.san-suppress-cfg-in-file2 }} \
> ${{ env.san-suppress-cfg-file }} 2> /dev/null; } || true
${{ env.setup-tests-env }}
./ipc_shm_arena_lend_link_test_srv.exec > logs/ipc_shm_arena_lend_link_test/srv.console.log 2>&1 &
sleep 1
./ipc_shm_arena_lend_link_test_cli.exec > logs/ipc_shm_arena_lend_link_test/cli.console.log 2>&1
- name: Run unit tests
if: |
!cancelled()
run: |
# Run unit tests.
cd ${{ env.install-dir }}/bin
# Some newline issues with the possible additional args; so need to make a wrapper script
# and then redirect, as desired, its output.
cat <<'EOF' > ${{ env.install-dir }}/bin/run_unit_test.sh
./libipc_unit_test.exec "$@"
EOF
RUN_IT='/usr/bin/bash -e ${{ env.install-dir }}/bin/run_unit_test.sh'
OUT_DIR=logs/libipc_unit_test
mkdir -p $OUT_DIR
SUPP_DIR_A=${{ github.workspace }}/flow/src
SUPP_DIR_B=${{ github.workspace }}/ipc_session/src
SUPP_DIR_C=${{ github.workspace }}/ipc_shm_arena_lend/src
# As of this writing there are TSAN suppressions for this test specifically. TODO: Revisit them; and then this.
SUPP_DIR_OWN=${{ github.workspace }}/test/suite/unit_test
{ cat $SUPP_DIR_A/${{ env.san-suppress-cfg-in-file1 }} $SUPP_DIR_A/${{ env.san-suppress-cfg-in-file2 }} \
$SUPP_DIR_B/${{ env.san-suppress-cfg-in-file1 }} $SUPP_DIR_B/${{ env.san-suppress-cfg-in-file2 }} \
$SUPP_DIR_C/${{ env.san-suppress-cfg-in-file1 }} $SUPP_DIR_C/${{ env.san-suppress-cfg-in-file2 }} \
$SUPP_DIR_OWN/${{ env.san-suppress-cfg-in-file1 }} $SUPP_DIR_OWN/${{ env.san-suppress-cfg-in-file2 }} \
> ${{ env.san-suppress-cfg-file }} 2> /dev/null; } || true
${{ env.setup-tests-env }}
if [ '${{ matrix.build-test-cfg.sanitizer-name }}' != tsan ]; then
# This is what we want to do normally.
$RUN_IT > $OUT_DIR/console.log 2>&1
else # if sanitizer-name is tsan:
# With TSAN -- which is officially in beta as of even clang-18 (as of this writing we are on 17 at best) --
# our tests hit some kind of limitation (possibly bug). What happens is the console prints a-la
# ThreadSanitizer: CHECK failed: sanitizer_deadlock_detector.h:67
# "((n_all_locks_)) < (((sizeof(all_locks_with_contexts_)/sizeof((all_locks_with_contexts_)[0]))))"
# (0x40, 0x40) (tid=4111122)
# and the program hangs indefinitely. This seems to be saying the # of locks reached 64, so TSAN gave up.
# A ticket has been filed to look into this; but in the meantime we work around it. Namely, we have
# observed that:
# - A particular test, even if run by itself, always triggers it. So we are forced to skip that one.
# - In clang-17 yet another test results in a different problem:
# LLVM ERROR: Sections with relocations should have an address of 0
# ...
# That one is also observed for transport_test exercise mode SHM-jemalloc sub-mode, which as of this
# writing is skipped for that compiler+TSAN for that reason. We do something similar here in skipping
# that particular test too in that situation. TODO: It's a test that starts tons of threads.
# Consider other approaches; maybe reduce number of threads or...? Ticket filed in any case.
# As for the other tests -- the vast majority -- outside of those 1-2:
# - A particular small subset of tests, if involved, triggers it. However if run separately from the
# the others -- even as a group -- then it is OK. So we run all but that group; then just that
# group.
LOCK_FATAL_TESTS=Jemalloc_shm_pool_collection_test.Multiprocess
if [ '${{ matrix.compiler.id }}' = 'clang-17' ]; then
LOCK_FATAL_TESTS=$LOCK_FATAL_TESTS:Jemalloc_shm_pool_collection_test.Multithread_load
fi
LOCK_HEAVY_TESTS='Shm_session_test.External_process_array:\
Shm_session_test.External_process_vector_offset_ptr:\
Shm_session_test.External_process_string_offset_ptr:\
Shm_session_test.External_process_list_offset_ptr:\
Shm_session_test.Multisession_external_process:\
Shm_session_test.Disconnected_external_process:\
Borrower_shm_pool_collection_test.Multiprocess:\
Shm_pool_collection_test.Multiprocess'
# Get rid of newlines/backslashes....
LOCK_HEAVY_TESTS="$(echo "$LOCK_HEAVY_TESTS" | tr -d '[:space:]\\')"
if $RUN_IT --gtest_filter=-$LOCK_HEAVY_TESTS:$LOCK_FATAL_TESTS > $OUT_DIR/console.minus-lock-heavy.log 2>&1; \
then EC_ALL=0; else EC_ALL=$?; fi
if $RUN_IT --gtest_filter=$LOCK_HEAVY_TESTS > $OUT_DIR/console.lock-heavy.log 2>&1; \
then EC_LH=0; else EC_LH=$?; fi
echo "Exit codes: all-minus-lock-heavy = $EC_ALL; lock-heavy = $EC_LH."
[ $EC_ALL -eq 0 ] && [ $EC_LH -eq 0 ]
fi
# For tests below where on failure it can be very helpful to look at higher-verbosity logs,
# any failing step is repeated with logging hiked up. Note that formally (and practically)
# higher-than-INFO verbosity is allowed to affect performance and thus is *not* right for
# the main integration test run. However on failure all bets are off, and we just want the
# info we can get. (Also higher-verbosity runs can take significantly longer; no one wants that.)
#
# As of this writing unit_test already runs with high verbosity, so we do not re-run on failure
# like these. The `link_test`s above are very basic tests, essentially there to ensure all built okay
# and run a sanity-check of a core feature of each of ours libs; so no re-run on failure there either.
# This follows the instructions in bin/transport_test/README.txt.
- name: Prepare run script for [transport_test - Scripted mode] variations below
if: |
!cancelled()
run: |
# Prepare run script for [transport_test - Scripted mode] variations below.
cat <<'EOF' > ${{ env.install-dir }}/bin/run_transport_test_sc.sh
echo "Log level: [$1]."
cd ${{ env.install-dir }}/bin/transport_test
OUT_DIR_NAME=log_level_$1
OUT_DIR=../logs/transport_test/scripted/$OUT_DIR_NAME
mkdir -p $OUT_DIR
SUPP_DIR_A=${{ github.workspace }}/flow/src
# In scripted mode ipc_session, while linked, is not invoked in any way; so skip its suppressions.
{ cat $SUPP_DIR_A/${{ env.san-suppress-cfg-in-file1 }} $SUPP_DIR_A/${{ env.san-suppress-cfg-in-file2 }} \
> ${{ env.san-suppress-cfg-file }} 2> /dev/null; } || true
${{ env.setup-tests-env }}
./transport_test.exec scripted $OUT_DIR/srv.log info $1 \
< srv-script.txt > $OUT_DIR/srv.console.log 2>&1 &
SRV_PID=$!
sleep 1
./transport_test.exec scripted $OUT_DIR/cli.log info $1 \
< cli-script.txt > $OUT_DIR/cli.console.log 2>&1 &
CLI_PID=$!
if wait $SRV_PID; then SRV_EC=0; else SRV_EC=$?; fi
echo "Server finished with code [$SRV_EC]."
if wait $CLI_PID; then CLI_EC=0; else CLI_EC=$?; fi
echo "Client finished with code [$CLI_EC]."
[ $SRV_EC -eq 0 ] && [ $CLI_EC -eq 0 ]
EOF
- name: Run integration test [transport_test - Scripted mode]
id: transport_test_scripted
if: |
!cancelled()
run: /usr/bin/bash -e ${{ env.install-dir }}/bin/run_transport_test_sc.sh info
- name: Re-run with increased logging, on failure only
if: |
(!cancelled()) && (steps.transport_test_scripted.outcome == 'failure')
run: /usr/bin/bash -e ${{ env.install-dir }}/bin/run_transport_test_sc.sh data
# The following [Exercise mode] tests follow the instructions in bin/transport_test/README.txt.
# Note that the creation of ~/bin/ex_..._run and placement of executables there, plus
# /tmp/var/run for run-time files (PID files and similar), is a necessary consequence of
# the ipc::session safety model for estabshing IPC conversations (sessions).
- name: Prepare IPC-session safety-friendly run-time environment for [transport_test - Exercise mode]
if: |
!cancelled()
run: |
# Prepare IPC-session safety-friendly run-time environment for [transport_test - Exercise mode].
mkdir -p ~/bin/ex_srv_run ~/bin/ex_cli_run
mkdir -p /tmp/var/run
cp -v ${{ env.install-dir }}/bin/transport_test/transport_test.exec \
~/bin/ex_srv.exec
cp -v ~/bin/ex_srv.exec ~/bin/ex_cli.exec
- name: Prepare run script for [transport_test - Exercise mode] variations below
if: |
!cancelled()
run: |
# Prepare run script for [transport_test - Exercise mode] variations below.
cat <<'EOF' > ${{ env.install-dir }}/bin/run_transport_test_ex.sh
# Script created by pipeline during job.
echo "Log level: [$1]."
echo "Exercise sub-mode: [$2]."
echo "Sub-mode snippet (none or '-shm-?'): [$3]."
OUT_DIR=${{ env.install-dir }}/bin/logs/transport_test/exercise/$2
mkdir -p $OUT_DIR
SUPP_DIR_A=${{ github.workspace }}/flow/src
if [ "$3" == '-shm-j' ]; then
# SHM-jemalloc mode => jemalloc is in fact exercised => suppress libjemalloc stuff.
# But, nicely, no transport_test-specific suppressions needed as of this writing.
SUPP_DIR_B=${{ github.workspace }}/ipc_shm_arena_lend/src
elif [ "$3" == '-shm-c' ]; then
SUPP_DIR_B=${{ github.workspace }}/test/suite/transport_test/shm-c
else # if [ "$3" == '' ]; then
SUPP_DIR_B=${{ github.workspace }}/test/suite/transport_test/heap
fi
SUPP_DIR_C=${{ github.workspace }}/ipc_session/src # Session stuff invoked regardless of sub-mode.
{ cat $SUPP_DIR_A/${{ env.san-suppress-cfg-in-file1 }} $SUPP_DIR_A/${{ env.san-suppress-cfg-in-file2 }} \
$SUPP_DIR_B/${{ env.san-suppress-cfg-in-file1 }} $SUPP_DIR_B/${{ env.san-suppress-cfg-in-file2 }} \
$SUPP_DIR_C/${{ env.san-suppress-cfg-in-file1 }} $SUPP_DIR_C/${{ env.san-suppress-cfg-in-file2 }} \
> ${{ env.san-suppress-cfg-file }} 2> /dev/null; } || true
${{ env.setup-tests-env }}
~/bin/ex_srv.exec exercise-srv$3 $OUT_DIR/srv.log info $1 \
> $OUT_DIR/srv.console.log 2>&1 &
SRV_PID=$!
sleep 5
~/bin/ex_cli.exec exercise-cli$3 $OUT_DIR/cli.log info $1 \
> $OUT_DIR/cli.console.log 2>&1 &
CLI_PID=$!
if wait $SRV_PID; then SRV_EC=0; else SRV_EC=$?; fi
echo "Server finished with code [$SRV_EC]."
if wait $CLI_PID; then CLI_EC=0; else CLI_EC=$?; fi
echo "Client finished with code [$CLI_EC]."
[ $SRV_EC -eq 0 ] && [ $CLI_EC -eq 0 ]
EOF
- name: Run integration test [transport_test - Exercise mode - Heap sub-mode]
id: transport_test_ex_heap
if: |
!cancelled()
run: /usr/bin/bash -e ${{ env.install-dir }}/bin/run_transport_test_ex.sh info heap
- name: Re-run with increased logging, on failure only
if: |
(!cancelled()) && (steps.transport_test_ex_heap.outcome == 'failure')
run: /usr/bin/bash -e ${{ env.install-dir }}/bin/run_transport_test_ex.sh data heap_log_level_data
- name: Run integration test [transport_test - Exercise mode - SHM-classic sub-mode]
id: transport_test_ex_shm_c
if: |
!cancelled()
run: /usr/bin/bash -e ${{ env.install-dir }}/bin/run_transport_test_ex.sh info shm_classic -shm-c
- name: Re-run with increased logging, on failure only
if: |
(!cancelled()) && (steps.transport_test_ex_shm_c.outcome == 'failure')
run: /usr/bin/bash -e ${{ env.install-dir }}/bin/run_transport_test_ex.sh data shm_classic_log_level_data -shm-c
# Disabling this particular test run for the specific case of clang-17 in TSAN (thread sanitizer) config
# (in particular at least 2 other clangs+TSAN are exercised, so the TSAN coverage is still good).
# First the reason in detail: This run semi-reliably (50%+) fails at this point in the server binary:
# 2023-12-20 11:36:11.322479842 +0000 [info]: Tguy: ex_srv.hpp:send_req_b(1428): App_session [0x7b3800008180]:
# Chan B[0]: Filling/send()ing payload (description = [reuse out-message + SHM-handle to modified (unless
# SHM-jemalloc) existing STL data]; alt-payload? = [0]; reusing msg? = [1]; reusing SHM payload? = [1]).
# LLVM ERROR: Sections with relocations should have an address of 0
# PLEASE submit a bug report to https://github.com/llvm/llvm-project/issues/ and include the crash backtrace.
# Stack dump:
# 0. Program arguments: /usr/bin/llvm-symbolizer-17 --demangle --inlines --default-arch=x86_64
# Stack dump without symbol names (ensure you have llvm-symbolizer in your PATH or set the environment var `LLVM_SYMBOLIZER_PATH` to point to it):
# ...
# ==77990==WARNING: Can't read from symbolizer at fd 599
# 2023-12-20 11:36:31.592293322 +0000 [info]: Tguy: ex_srv.hpp:send_req_b(1547): App_session [0x7b3800008180]: Chan B[0]: Filling done. Now to send.
# Sometimes the exact point is different, depending on timing; but in any case it is always the above
# TSAN/LLVM error, at which point the thread gets stuck for a long time (10+ seconds); but eventually gets
# unstuck; however transport_test happens to be testing a feature in a certain way so that a giant blocking
# operation in this thread delays certain processing, causes an internal timeout, and the test exits/fails.
# Sure, we could make some changes to the test for that to not happen, but that's beside the point: TSAN
# at run-time is trying to do something and fails terribly; I have no wish to try to work around that situation;
# literally it says "PLEASE submit a bug report [to clang devs]."
#
# TODO: Revisit; figure out how to not trigger this; re-enable. For the record, I (ygoldfel) cannot reproduce
# in a local clang-17, albeit with libc++ (LLVM STL) instead of libstdc++ (GNU STL). I've also tried to
# reduce optimization to -O1, as well as with and without LTO, and with and without -fno-omit-frame-pointer;
# same result.
- name: Run integration test [transport_test - Exercise mode - SHM-jemalloc sub-mode]
id: transport_test_ex_shm_j
if: |
(!cancelled()) && ((matrix.compiler.id != 'clang-17') || (matrix.build-test-cfg.sanitizer-name != 'tsan'))
run: /usr/bin/bash -e ${{ env.install-dir }}/bin/run_transport_test_ex.sh info shm_jemalloc -shm-j
- name: Re-run with increased logging, on failure only
if: |
(!cancelled()) && (steps.transport_test_ex_shm_j.outcome == 'failure')
run: /usr/bin/bash -e ${{ env.install-dir }}/bin/run_transport_test_ex.sh data shm_jemalloc_log_level_data -shm-j
# See earlier comment block about why we saved all the logs including for console-output-only tests/demos.
- name: Check test/demo logs for non-fatal sanitizer error(s)
if: |
(!cancelled()) && (matrix.build-test-cfg.sanitizer-name == 'ubsan')
run: |
cd ${{ env.install-dir }}/bin/logs
# grep returns 0 if 1+ found, 1 if none found, 2+ on error. So check results explicitly instead of -e.
# Namely, for us, the only OK result is an empty stdout and empty stderr.
# Otherwise either grep failed (unlikely) or found 1+ problems; either way redirection target will
# be not-empty, and we will force failure.
{ grep 'SUMMARY: UndefinedBehaviorSanitizer:' `find . -type f` > san_failure_summaries.txt 2>&1; } || true
if [ -s san_failure_summaries.txt ]; then
echo 'Error(s) found. Pipeline will fail. Failures summarized below.'
echo 'Please peruse uploaded log artifacts, resolve the issues, and run pipeline again.'
echo '[[[ file--'
cat san_failure_summaries.txt
echo '--file ]]]'
false
fi
echo 'No errors found in logs.'
- name: Package test/demo logs tarball
if: |
always()
run: |
# Package test/demo logs tarball.
cd ${{ env.install-dir }}/bin
tar cvzf logs.tgz logs
rm -rf logs # Save runner space.
- name: Upload test/demo logs (please inspect if failure(s) seen above)
if: |
always()
uses: actions/upload-artifact@v3
with:
name: ipc-test-logs-${{ matrix.compiler.id }}-${{ matrix.build-test-cfg.id }}
path: ${{ env.install-dir }}/bin/logs.tgz
# TODO: Look into the topic of debuggability in case of a crash. Is a core generated? Is it saved?
# Do we need to manually save it as an artifact? For that matter we would then need the binary and
# ideally the source. For now we save any logs that are not printed to console, and where possible
# our tests/demos keep it the console exclusively (but in some cases, such as transport_test, it
# is not practical due to parallel execution and other aspects). In general it is always better that
# logs are sufficient; but look into situations where they are not.
#
# Don't forget situation where program exits due to a sanitizer reporting problem (ASAN, etc.); is
# there a core? Do we need one? Etc.
#
# Possibly this is all handled beautifully automatically; then this should be deleted.
doc:
needs: [setup, set-vars]
if: |
needs.setup.outputs.proceed-else-not == 'true'
strategy:
fail-fast: false
matrix:
compiler:
# Pick a reasonably modern but pre-installed compiler for building Doxygen/etc.
- id: clang-15
name: clang
version: 15
c-path: /usr/bin/clang-15
cpp-path: /usr/bin/clang++-15
build-cfg:
- id: release
conan-profile-build-type: Release
conan-preset: release
runs-on: ubuntu-22.04
name: doc-${{ matrix.compiler.id }}-${{ matrix.build-cfg.id }}
steps:
- name: Update available software list for apt-get
run: sudo apt-get update
- name: Checkout `ipc` repository and submodules (like `flow`)
uses: actions/checkout@v4
with:
submodules: true
- name: Install Flow-IPC dependencies (like Graphviz) with apt-get
run: sudo apt-get install -y graphviz
- name: Install the latest version of Conan which is less than 2
run: pip install 'conan<2'
- name: Create Conan profile
run: |
# Create Conan profile.
cat <<'EOF' > conan_profile
[settings]
compiler = ${{ matrix.compiler.name }}
compiler.version = ${{ matrix.compiler.version }}
compiler.cppstd = 17
compiler.libcxx = libstdc++11
arch = x86_64
os = Linux
build_type = ${{ matrix.build-cfg.conan-profile-build-type }}
[conf]
tools.build:compiler_executables = {"c": "${{ matrix.compiler.c-path }}", "cpp": "${{ matrix.compiler.cpp-path }}"}
[buildenv]
CC = ${{ matrix.compiler.c-path }}
CXX = ${{ matrix.compiler.cpp-path }}
[options]
ipc:doc = True
ipc:build = False
EOF
- name: Install Flow-IPC dependencies (like Doxygen) with Conan using the profile
run: |
conan install . --profile:build conan_profile --profile:host conan_profile --build missing
- name: Generate code documentation using Conan and Doxygen
run: conan build .
- name: Create documentation tarball (full docs, API-only docs, landing page)
run: |
# Create documentation tarball (full docs, API-only docs, landing page).
cd ${{ github.workspace }}/doc/ipc_doc
${{ github.workspace }}/tools/doc/stage_generated_docs.sh \
${{ github.workspace }}/build/${{ matrix.build-cfg.conan-profile-build-type }}
- name: Upload documentation tarball
uses: actions/upload-artifact@v3
with:
name: ipc-doc
path: ${{ github.workspace }}/doc/ipc_doc.tgz
- name: (`main` branch only) Check-in generated documentation directly into source control
if: success() && (github.ref == 'refs/heads/main')
run: |
# (`main` branch only) Check-in generated documentation directly into source control.
echo 'generated/ docs have been added or replaced locally; mirroring this into checked-in tree.'
# These values informally recommended in:
# https://github.com/actions/checkout#push-a-commit-using-the-built-in-token
git config user.name github-actions
git config user.email [email protected]
# We are forced to use a Personal Access Token attached to a special bot user such that in repo Settings
# we've configured that "guy" as allowed to bypass the requirement to merge via PR. As of this writing
# there's no way to configure the default token to being able to do this.
# TODO: Keep an eye on that in case they provide for a better way:
# https://github.com/orgs/community/discussions/25305
git config --local http.https://github.com/.extraheader \
"AUTHORIZATION: basic $(echo -n x-access-token:${{ secrets.GIT_BOT_PAT }} | base64)"
cd ${{ github.workspace }}/doc/ipc_doc
git rm -r --cached generated || echo 'No generated/ currently checked in; no problem.'
git add generated
git commit -m '${{ needs.set-vars.outputs.doc-commit-message }}'
git push origin main
- name: Signal Pages if we have updated the generated documentation in source control (then web site should update)
if: success() && (steps.doc_check_in.outcome != 'skipped')
run: |
# Signal Pages that we have updated the generated documentation in source control (web site should update).
curl -X POST \
-H 'Accept: application/vnd.github.v3+json' \
-H 'Authorization: token ${{ secrets.GIT_BOT_PAT }}' \
'https://api.github.com/repos/Flow-IPC/flow-ipc.github.io/dispatches' \
-d '{"event_type": "flow-ipc-sync-doc-event"}'