Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(RHIDP-3671): introduce different RBAC policies #107

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 26 additions & 4 deletions ci-scripts/rhdh-setup/create_resource.sh
Original file line number Diff line number Diff line change
Expand Up @@ -186,11 +186,33 @@ create_group() {
fi
}

export RBAC_POLICY_ALL_GROUPS_ADMIN="all_groups_admin" #default
export RBAC_POLICY_STATIC="static"

create_rbac_policy() {
policy="${1:-$RBAC_POLICY_ALL_GROUPS_ADMIN}"
log_info "Generating RBAC policy: $policy"
case $policy in
"$RBAC_POLICY_ALL_GROUPS_ADMIN")
for i in $(seq 1 "$GROUP_COUNT"); do
echo " g, group:default/g${i}, role:default/a" >>"$TMP_DIR/group-rbac.yaml"
done
;;
"$RBAC_POLICY_STATIC")
for i in $(seq 1 "${RBAC_POLICY_SIZE:-$GROUP_COUNT}"); do
echo " g, group:default/g${i}, role:default/a" >>"$TMP_DIR/group-rbac.yaml"
done
;;
\?)
log_error "Invalid RBAC policy: ${policy}"
exit 1
;;
esac

}

create_groups() {
log_info "Creating Groups in Keycloak"
for i in $(seq 1 "$GROUP_COUNT"); do
echo " g, group:default/g${i}, role:default/a" >>"$TMP_DIR/group-rbac.yaml"
done
sleep 5
seq 1 "${GROUP_COUNT}" | xargs -n1 -P"${POPULATION_CONCURRENCY}" bash -c 'create_group'
}
Expand Down Expand Up @@ -356,5 +378,5 @@ get_token() {
rm -rf "$token_lockfile"
}

export -f keycloak_url backstage_url get_token keycloak_token rhdh_token create_group create_user log log_info log_warn log_error log_token log_token_info log_token_err
export -f keycloak_url backstage_url get_token keycloak_token rhdh_token create_rbac_policy create_group create_user log log_info log_warn log_error log_token log_token_info log_token_err
export kc_lockfile bs_lockfile token_lockfile
2 changes: 2 additions & 0 deletions ci-scripts/rhdh-setup/deploy.sh
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@ export KEYCLOAK_USER_PASS=${KEYCLOAK_USER_PASS:-$(mktemp -u XXXXXXXXXX)}
export AUTH_PROVIDER="${AUTH_PROVIDER:-''}"
export ENABLE_RBAC="${ENABLE_RBAC:-false}"
export ENABLE_PROFILING="${ENABLE_PROFILING:-false}"
export RBAC_POLICY="${RBAC_POLICY:-all_groups_admin}"

export PSQL_LOG="${PSQL_LOG:-true}"
export RHDH_METRIC="${RHDH_METRIC:-true}"
Expand Down Expand Up @@ -235,6 +236,7 @@ backstage_install() {
until $clin create configmap app-config-rhdh --from-file "app-config.rhdh.yaml=$TMP_DIR/app-config.yaml"; do $clin delete configmap app-config-rhdh --ignore-not-found=true; done
if ${ENABLE_RBAC}; then
cp template/backstage/rbac-config.yaml "${TMP_DIR}"
create_rbac_policy "$RBAC_POLICY"
cat "$TMP_DIR/group-rbac.yaml" >>"$TMP_DIR/rbac-config.yaml"
until $clin create -f "$TMP_DIR/rbac-config.yaml"; do $clin delete configmap rbac-policy --ignore-not-found=true; done
fi
Expand Down
8 changes: 8 additions & 0 deletions ci-scripts/runs-to-csv.sh
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,8 @@ API_COUNT,\
COMPONENT_COUNT,\
BACKSTAGE_USER_COUNT,\
GROUP_COUNT,\
RBAC_POLICY,\
RBAC_POLICY_SIZE,\
RHDH_DEPLOYMENT_REPLICAS,\
RHDH_RESOURCES_CPU_LIMITS,\
RHDH_RESOURCES_MEMORY_LIMITS,\
Expand All @@ -39,6 +41,8 @@ RHDH_CPU_Avg,\
RHDH_CPU_Max,\
RHDH_Memory_Avg,\
RHDH_Memory_Max,\
RHDH_Heap_Avg,\
RHDH_Heap_Max,\
RHDH_DB_Pods,\
RHDH_DB_CPU_Avg,\
RHDH_DB_CPU_Max,\
Expand Down Expand Up @@ -94,6 +98,8 @@ find "${1:-.}" -name benchmark.json -print0 | while IFS= read -r -d '' filename;
.metadata.env.COMPONENT_COUNT,
.metadata.env.BACKSTAGE_USER_COUNT,
.metadata.env.GROUP_COUNT,
.metadata.env.RBAC_POLICY,
.metadata.env.RBAC_POLICY_SIZE,
.metadata.env.RHDH_DEPLOYMENT_REPLICAS,
.metadata.env.RHDH_RESOURCES_CPU_LIMITS,
.metadata.env.RHDH_RESOURCES_MEMORY_LIMITS,
Expand All @@ -104,6 +110,8 @@ find "${1:-.}" -name benchmark.json -print0 | while IFS= read -r -d '' filename;
.measurements."rhdh-developer-hub".cpu.max,
.measurements."rhdh-developer-hub".memory.mean,
.measurements."rhdh-developer-hub".memory.max,
.measurements.nodejs.test.nodejs_heap_size_used_bytes.mean,
.measurements.nodejs.test.nodejs_heap_size_used_bytes.max,
.measurements."rhdh-postgresql".count_ready.mean,
.measurements."rhdh-postgresql".cpu.mean,
.measurements."rhdh-postgresql".cpu.max,
Expand Down
74 changes: 39 additions & 35 deletions ci-scripts/scalability/collect-results.sh
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@ read -ra active_users_spawn_rate <<<"${SCALE_ACTIVE_USERS_SPAWN_RATES:-1:1 200:4

read -ra bs_users_groups <<<"${SCALE_BS_USERS_GROUPS:-1:1 10000:2500}"

read -ra rbac_policy_size <<<"${SCALE_RBAC_POLICY_SIZE:-10000}"

read -ra catalog_apis_components <<<"${SCALE_CATALOG_SIZES:-1:1 10000:10000}"

read -ra replicas <<<"${SCALE_REPLICAS:-5}"
Expand All @@ -42,33 +44,34 @@ for w in "${workers[@]}"; do
IFS=":" read -ra tokens <<<"${bu_bg}"
bu="${tokens[0]}" # backstage users
[[ "${#tokens[@]}" == 1 ]] && bg="" || bg="${tokens[1]}" # backstage groups
for s in "${db_storages[@]}"; do
for au_sr in "${active_users_spawn_rate[@]}"; do
IFS=":" read -ra tokens <<<"${au_sr}"
active_users=${tokens[0]}
output="$ARTIFACT_DIR/scalability_c-${r}r-db_${s}-${bu}bu-${bg}bg-${w}w-${active_users}u.csv"
header="CatalogSize${csv_delim}Apis${csv_delim}Components${csv_delim}MaxActiveUsers${csv_delim}AverageRPS${csv_delim}MaxRPS${csv_delim}AverageRT${csv_delim}MaxRT${csv_delim}Failures${csv_delim}FailRate${csv_delim}DBStorageUsed${csv_delim}DBStorageAvailable${csv_delim}DBStorageCapacity"
for cr_cl in "${cpu_requests_limits[@]}"; do
IFS=":" read -ra tokens <<<"${cr_cl}"
cr="${tokens[0]}" # cpu requests
[[ "${#tokens[@]}" == 1 ]] && cl="" || cl="${tokens[1]}" # cpu limits
for mr_ml in "${memory_requests_limits[@]}"; do
IFS=":" read -ra tokens <<<"${mr_ml}"
mr="${tokens[0]}" # memory requests
[[ "${#tokens[@]}" == 1 ]] && ml="" || ml="${tokens[1]}" # memory limits
echo "$header" >"$output"
for a_c in "${catalog_apis_components[@]}"; do
IFS=":" read -ra tokens <<<"${a_c}"
a="${tokens[0]}" # apis
[[ "${#tokens[@]}" == 1 ]] && c="" || c="${tokens[1]}" # components
index="${r}r-db_${s}-${bu}bu-${bg}bg-${w}w-${cr}cr-${cl}cl-${mr}mr-${ml}ml-${a}a-${c}c"
iteration="${index}/test/${active_users}u"
echo "[$iteration] Looking for benchmark.json..."
benchmark_json="$(find "${ARTIFACT_DIR}" -name benchmark.json | grep "$iteration" || true)"
if [ -n "$benchmark_json" ]; then
benchmark_json="$(readlink -m "$benchmark_json")"
echo "[$iteration] Gathering data from $benchmark_json"
jq_cmd="\"$((a + c))\" \
for rbs in "${rbac_policy_size[@]}"; do
for s in "${db_storages[@]}"; do
for au_sr in "${active_users_spawn_rate[@]}"; do
IFS=":" read -ra tokens <<<"${au_sr}"
active_users=${tokens[0]}
output="$ARTIFACT_DIR/scalability_c-${r}r-db_${s}-${bu}bu-${bg}bg-${rbs}rbs-${w}w-${active_users}u.csv"
header="CatalogSize${csv_delim}Apis${csv_delim}Components${csv_delim}MaxActiveUsers${csv_delim}AverageRPS${csv_delim}MaxRPS${csv_delim}AverageRT${csv_delim}MaxRT${csv_delim}Failures${csv_delim}FailRate${csv_delim}DBStorageUsed${csv_delim}DBStorageAvailable${csv_delim}DBStorageCapacity"
for cr_cl in "${cpu_requests_limits[@]}"; do
IFS=":" read -ra tokens <<<"${cr_cl}"
cr="${tokens[0]}" # cpu requests
[[ "${#tokens[@]}" == 1 ]] && cl="" || cl="${tokens[1]}" # cpu limits
for mr_ml in "${memory_requests_limits[@]}"; do
IFS=":" read -ra tokens <<<"${mr_ml}"
mr="${tokens[0]}" # memory requests
[[ "${#tokens[@]}" == 1 ]] && ml="" || ml="${tokens[1]}" # memory limits
echo "$header" >"$output"
for a_c in "${catalog_apis_components[@]}"; do
IFS=":" read -ra tokens <<<"${a_c}"
a="${tokens[0]}" # apis
[[ "${#tokens[@]}" == 1 ]] && c="" || c="${tokens[1]}" # components
index="${r}r-db_${s}-${bu}bu-${bg}bg-${rbs}rbs-${w}w-${cr}cr-${cl}cl-${mr}mr-${ml}ml-${a}a-${c}c"
iteration="${index}/test/${active_users}u"
echo "[$iteration] Looking for benchmark.json..."
benchmark_json="$(find "${ARTIFACT_DIR}" -name benchmark.json | grep "$iteration" || true)"
if [ -n "$benchmark_json" ]; then
benchmark_json="$(readlink -m "$benchmark_json")"
echo "[$iteration] Gathering data from $benchmark_json"
jq_cmd="\"$((a + c))\" \
+ $csv_delim_quoted + \"${a}\" \
+ $csv_delim_quoted + \"${c}\" \
+ $csv_delim_quoted + (.results.locust_users.max | tostring) \
Expand All @@ -81,14 +84,15 @@ for w in "${workers[@]}"; do
+ $csv_delim_quoted + (.measurements.cluster.pv_stats.test.\"rhdh-postgresql\".used_bytes.max | tostring) \
+ $csv_delim_quoted + (.measurements.cluster.pv_stats.test.\"rhdh-postgresql\".available_bytes.min | tostring) \
+ $csv_delim_quoted + (.measurements.cluster.pv_stats.test.\"rhdh-postgresql\".capacity_bytes.max | tostring)"
sed -Ee 's/: ([0-9]+\.[0-9]*[X]+[0-9e\+-]*|[0-9]*X+[0-9]*\.[0-9e\+-]*|[0-9]*X+[0-9]*\.[0-9]*X+[0-9e\+-]+)/: "\1"/g' "$benchmark_json" | jq -rc "$jq_cmd" >>"$output"
else
echo "[$iteration] Unable to find benchmark.json"
for _ in $(seq 1 "$(echo "$header" | tr -cd "$csv_delim" | wc -c)"); do
echo -n ";" >>"$output"
done
echo >>"$output"
fi
sed -Ee 's/: ([0-9]+\.[0-9]*[X]+[0-9e\+-]*|[0-9]*X+[0-9]*\.[0-9e\+-]*|[0-9]*X+[0-9]*\.[0-9]*X+[0-9e\+-]+)/: "\1"/g' "$benchmark_json" | jq -rc "$jq_cmd" >>"$output"
else
echo "[$iteration] Unable to find benchmark.json"
for _ in $(seq 1 "$(echo "$header" | tr -cd "$csv_delim" | wc -c)"); do
echo -n ";" >>"$output"
done
echo >>"$output"
fi
done
done
done
done
Expand Down
Loading