Skip to content

Commit

Permalink
Merge pull request #6 from homeport/fix/strict-permission-mode
Browse files Browse the repository at this point in the history
Support environments with strict permissions
  • Loading branch information
HeavyWombat authored Oct 14, 2020
2 parents eb04350 + 01071da commit 199830b
Show file tree
Hide file tree
Showing 2 changed files with 132 additions and 116 deletions.
208 changes: 105 additions & 103 deletions internal/load/buildrun.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,63 +35,6 @@ import (
// settings to verify whether a buildrun can work and how much pressure it
// would put onto the system
func CheckSystemAndConfig(kubeAccess KubeAccess, config BuildRunSettings, parallel int) error {
// Check how many buildruns are currently in the system already
buildRunsResults, err := kubeAccess.DynClient.Resource(BuildRunResource).List(metav1.ListOptions{})
if err != nil {
return err
}

var (
totalBuildRuns int
completedBuildRuns int
)

for _, item := range buildRunsResults.Items {
buildRun, err := asBuildRun(item)
if err != nil {
return err
}

if buildRun.Status.CompletionTime != nil {
completedBuildRuns++
}

totalBuildRuns++
}

if totalBuildRuns > 0 {
bunt.Printf("There are currently LightSkyBlue{%s} in the system. It might be an idea to go through the list of completed buildruns to remove old and obsolete buildruns.\n",
text.Plural(totalBuildRuns, "completed buildrun"),
)

fmt.Println()
}

if totalBuildRuns-completedBuildRuns > 0 {
bunt.Printf("PaleGoldenrod{_Please note:_} With currently %s, there might be some interference with the test buildruns. Please take the current system utilisation into consideration when analysing any performance measurements.\n",
text.Plural(totalBuildRuns-completedBuildRuns, "active buildrun"),
)

fmt.Println()
}

nodesResults, err := kubeAccess.Client.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return err
}

var totalCPU int64
var totalMemory int64
for _, node := range nodesResults.Items {
totalCPU += node.Status.Capacity.Cpu().MilliValue()
totalMemory += node.Status.Capacity.Memory().Value()
}

totalNodeResources := corev1.ResourceList{
corev1.ResourceCPU: *resource.NewMilliQuantity(totalCPU, resource.DecimalSI),
corev1.ResourceMemory: *resource.NewQuantity(totalMemory, resource.BinarySI),
}

// Check whether the configured cluster build strategy is available
cbsRaw, err := kubeAccess.DynClient.Resource(ClusterBuildStrategy).Get(config.ClusterBuildStrategy, metav1.GetOptions{})
if err != nil {
Expand All @@ -107,35 +50,91 @@ func CheckSystemAndConfig(kubeAccess KubeAccess, config BuildRunSettings, parall
)
}

return err
}
clusterBuildStrategy, err := asClusterBuildStrategy(*cbsRaw)
if err != nil {
return err
bunt.Printf("DarkOrange{*Warning:*} The current permissions do not allow to check whether build strategy CadetBlue{*%s*} is available.\n\n", config.ClusterBuildStrategy)
}

resourcesForClusterBuildStrategy := estimateResourceRequests(*clusterBuildStrategy, int64(parallel))
// Given that the permissions allow it, check how many buildruns are
// currently in the system already
if buildRunsResults, err := kubeAccess.DynClient.Resource(BuildRunResource).List(metav1.ListOptions{}); err == nil {
var (
totalBuildRuns int
completedBuildRuns int
)

scaleToString := func(q *resource.Quantity) string {
var mods = []string{"Byte", "KiB", "MiB", "GiB", "TiB"}
for _, item := range buildRunsResults.Items {
buildRun, err := asBuildRun(item)
if err != nil {
return err
}

tmp := float64(q.Value())
if buildRun.Status.CompletionTime != nil {
completedBuildRuns++
}

var i = 0
for i = 0; tmp > 1023.9 && i < len(mods); i++ {
tmp /= 1024.0
totalBuildRuns++
}

return fmt.Sprintf("%.1f %s", tmp, mods[i])
if totalBuildRuns > 0 {
bunt.Printf("There are currently LightSkyBlue{%s} in the system. It might be an idea to go through the list of completed buildruns to remove old and obsolete buildruns.\n",
text.Plural(totalBuildRuns, "completed buildrun"),
)

fmt.Println()
}

if totalBuildRuns-completedBuildRuns > 0 {
bunt.Printf("PaleGoldenrod{_Please note:_} With currently %s, there might be some interference with the test buildruns. Please take the current system utilisation into consideration when analysing any performance measurements.\n",
text.Plural(totalBuildRuns-completedBuildRuns, "active buildrun"),
)

fmt.Println()
}
}

bunt.Printf("Keep in mind, with Moccasin{_%s_}, the estimated resource request will be roughly SlateGray{%v CPU cores} and LightSlateGray{%v system memory}. Available in the cluster are SlateGray{%v CPU cores} and LightSlateGray{%v system memory}.\n\n",
text.Plural(parallel, "concurrent buildrun"),
resourcesForClusterBuildStrategy.Cpu(),
scaleToString(resourcesForClusterBuildStrategy.Memory()),
totalNodeResources.Cpu(),
scaleToString(totalNodeResources.Memory()),
)
if nodesResults, err := kubeAccess.Client.CoreV1().Nodes().List(metav1.ListOptions{}); err != nil {
var totalCPU int64
var totalMemory int64
for _, node := range nodesResults.Items {
totalCPU += node.Status.Capacity.Cpu().MilliValue()
totalMemory += node.Status.Capacity.Memory().Value()
}

totalNodeResources := corev1.ResourceList{
corev1.ResourceCPU: *resource.NewMilliQuantity(totalCPU, resource.DecimalSI),
corev1.ResourceMemory: *resource.NewQuantity(totalMemory, resource.BinarySI),
}

//
if cbsRaw != nil {
clusterBuildStrategy, err := asClusterBuildStrategy(*cbsRaw)
if err != nil {
return err
}

resourcesForClusterBuildStrategy := estimateResourceRequests(*clusterBuildStrategy, int64(parallel))

scaleToString := func(q *resource.Quantity) string {
var mods = []string{"Byte", "KiB", "MiB", "GiB", "TiB"}

tmp := float64(q.Value())

var i = 0
for i = 0; tmp > 1023.9 && i < len(mods); i++ {
tmp /= 1024.0
}

return fmt.Sprintf("%.1f %s", tmp, mods[i])
}

bunt.Printf("Keep in mind, with Moccasin{_%s_}, the estimated resource request will be roughly SlateGray{%v CPU cores} and LightSlateGray{%v system memory}. Available in the cluster are SlateGray{%v CPU cores} and LightSlateGray{%v system memory}.\n\n",
text.Plural(parallel, "concurrent buildrun"),
resourcesForClusterBuildStrategy.Cpu(),
scaleToString(resourcesForClusterBuildStrategy.Memory()),
totalNodeResources.Cpu(),
scaleToString(totalNodeResources.Memory()),
)
}
}

return nil
}
Expand Down Expand Up @@ -163,37 +162,40 @@ func ExecuteSingleBuildRun(kubeAccess KubeAccess, name string, config BuildRunSe

defer deleteContainerImage(kubeAccess, buildRun.Namespace, config.Output.SecretRef, buildRun.Status.BuildSpec.Output.ImageURL)

taskRun, err := lookUpTaskRun(kubeAccess.DynClient, config.Namespace, *buildRun)
if err != nil {
return nil, err
var buildRunResult = &BuildRunResult{
TotalBuildRunTime: buildRun.Status.CompletionTime.Time.Sub(buildRun.ObjectMeta.CreationTimestamp.Time),
BuildRunRampUpDuration: time.Duration(-1),
TaskRunRampUpDuration: time.Duration(-1),
PodRampUpDuration: time.Duration(-1),
InternalProcessingTime: time.Duration(-1),
}

pod, err := lookUpPod(kubeAccess.Client, config.Namespace, *taskRun)
if err != nil {
return nil, err
}
if taskRun, err := lookUpTaskRun(kubeAccess.DynClient, config.Namespace, *buildRun); err == nil {
pod, err := lookUpPod(kubeAccess.Client, config.Namespace, *taskRun)
if err != nil {
return nil, err
}

var totalTektonStepsTime time.Duration
for _, step := range taskRun.Status.Steps {
totalTektonStepsTime += step.Terminated.FinishedAt.Time.Sub(step.Terminated.StartedAt.Time)
}

lastInitPodIdx := len(pod.Status.InitContainerStatuses) - 1
lastInitPod := pod.Status.InitContainerStatuses[lastInitPodIdx]

buildRunResult.BuildRunRampUpDuration = taskRun.CreationTimestamp.Time.Sub(buildRun.CreationTimestamp.Time)
buildRunResult.TaskRunRampUpDuration = pod.CreationTimestamp.Time.Sub(taskRun.CreationTimestamp.Time)
buildRunResult.PodRampUpDuration = lastInitPod.State.Terminated.FinishedAt.Sub(pod.CreationTimestamp.Time)

var totalTektonStepsTime time.Duration
for _, step := range taskRun.Status.Steps {
totalTektonStepsTime += step.Terminated.FinishedAt.Time.Sub(step.Terminated.StartedAt.Time)
buildRunResult.InternalProcessingTime = buildRunResult.TotalBuildRunTime -
buildRunResult.BuildRunRampUpDuration -
buildRunResult.TaskRunRampUpDuration -
buildRunResult.PodRampUpDuration -
totalTektonStepsTime
}

lastInitPodIdx := len(pod.Status.InitContainerStatuses) - 1
lastInitPod := pod.Status.InitContainerStatuses[lastInitPodIdx]

totalBuildRunTime := buildRun.Status.CompletionTime.Time.Sub(buildRun.ObjectMeta.CreationTimestamp.Time)
buildRunRampUpDuration := taskRun.CreationTimestamp.Time.Sub(buildRun.CreationTimestamp.Time)
taskRunRampUpDuration := pod.CreationTimestamp.Time.Sub(taskRun.CreationTimestamp.Time)
podRampUpDuration := lastInitPod.State.Terminated.FinishedAt.Sub(pod.CreationTimestamp.Time)
internalProcessingTime := totalBuildRunTime - buildRunRampUpDuration - taskRunRampUpDuration - podRampUpDuration - totalTektonStepsTime

return &BuildRunResult{
TotalBuildRunTime: totalBuildRunTime,
BuildRunRampUpDuration: buildRunRampUpDuration,
TaskRunRampUpDuration: taskRunRampUpDuration,
PodRampUpDuration: podRampUpDuration,
InternalProcessingTime: internalProcessingTime,
}, nil
return buildRunResult, nil
}

// ExecuteParallelBuildRuns executes the same buildrun multiple times in
Expand Down
40 changes: 27 additions & 13 deletions internal/load/reports.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,40 +46,54 @@ func (brs BuildRunResultSet) String() string {
return tmp
}

table, err := neat.Table([][]string{
bold("Description", "Minimum", "Mean", "Median", "Maximum"),
cnvfnc("_total time between buildrun creation until finish_",
tableData := [][]string{bold("Description", "Minimum", "Mean", "Median", "Maximum")}

if brs.Minimum.TotalBuildRunTime >= 0 {
tableData = append(tableData, cnvfnc("_total time between buildrun creation until finish_",
brs.Minimum.TotalBuildRunTime,
brs.Mean.TotalBuildRunTime,
brs.Median.TotalBuildRunTime,
brs.Maximum.TotalBuildRunTime,
),
cnvfnc("_time between buildrun creation and taskrun creation_",
))
}

if brs.Minimum.BuildRunRampUpDuration >= 0 {
tableData = append(tableData, cnvfnc("_time between buildrun creation and taskrun creation_",
brs.Minimum.BuildRunRampUpDuration,
brs.Mean.BuildRunRampUpDuration,
brs.Median.BuildRunRampUpDuration,
brs.Maximum.BuildRunRampUpDuration,
),
cnvfnc("_time between taskrun creation and tekton pod creation_",
))
}

if brs.Minimum.TaskRunRampUpDuration >= 0 {
tableData = append(tableData, cnvfnc("_time between taskrun creation and tekton pod creation_",
brs.Minimum.TaskRunRampUpDuration,
brs.Mean.TaskRunRampUpDuration,
brs.Median.TaskRunRampUpDuration,
brs.Maximum.TaskRunRampUpDuration,
),
cnvfnc("_time between tekton pod creation and first container start_",
))
}

if brs.Minimum.PodRampUpDuration >= 0 {
tableData = append(tableData, cnvfnc("_time between tekton pod creation and first container start_",
brs.Minimum.PodRampUpDuration,
brs.Mean.PodRampUpDuration,
brs.Median.PodRampUpDuration,
brs.Maximum.PodRampUpDuration,
),
cnvfnc("_remaining internal processing time_",
))
}

if brs.Minimum.InternalProcessingTime >= 0 {
tableData = append(tableData, cnvfnc("_remaining internal processing time_",
brs.Minimum.InternalProcessingTime,
brs.Mean.InternalProcessingTime,
brs.Median.InternalProcessingTime,
brs.Maximum.InternalProcessingTime,
),
}, neat.VertialBarSeparator())
))
}

table, err := neat.Table(tableData, neat.VertialBarSeparator())
if err != nil {
panic(err)
}
Expand Down

0 comments on commit 199830b

Please sign in to comment.