Skip to content

Commit

Permalink
Improving logging
Browse files Browse the repository at this point in the history
  • Loading branch information
cmendesce committed Aug 2, 2024
1 parent 7b167c8 commit d27cd6a
Show file tree
Hide file tree
Showing 6 changed files with 106 additions and 136 deletions.

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,8 @@

import com.fasterxml.jackson.databind.JsonNode;
import io.fabric8.kubernetes.api.model.Pod;
import io.fabric8.kubernetes.api.model.PodList;
import io.fabric8.kubernetes.api.model.apps.Deployment;
import io.fabric8.kubernetes.client.KubernetesClient;
import io.fabric8.kubernetes.client.dsl.FilterWatchListDeletable;
import io.fabric8.kubernetes.client.dsl.PodResource;
import io.resiliencebench.resources.queue.ExecutionQueue;
import io.resiliencebench.resources.scenario.Scenario;
import io.resiliencebench.resources.service.ResilientService;
Expand Down Expand Up @@ -87,33 +84,42 @@ public void applyNewVariables(Deployment targetDeployment, String containerName,
for (var variable : deploymentVars) {
var newValue = env.get(variable.getName());
if (newValue != null) {
logger.info("Container {}. variable {} new value {}", containerName, variable.getName(), newValue);
variable.setValue(newValue.toString());
}
}

updateDeployment(targetDeployment);
restartPods(targetDeployment);
}

private void updateDeployment(Deployment targetDeployment) {
kubernetesClient().apps().deployments()
.inNamespace(targetDeployment.getMetadata().getNamespace())
.resource(targetDeployment)
.update();

// logger.info("Deleting pods for deployment {}", targetDeployment.getMetadata().getName());
// getPods(targetDeployment).delete();
logger.info("Waiting for the pods to restart.");
getPods(targetDeployment).waitUntilCondition(this::waitUntilCondition, 60, TimeUnit.SECONDS);
logger.info("Pods restarted successfully");
}

public FilterWatchListDeletable<Pod, PodList, PodResource> getPods(Deployment targetDeployment) {
return kubernetesClient().pods()
.inNamespace(targetDeployment.getMetadata().getNamespace())
.withLabel("app", targetDeployment.getMetadata().getName());
private void restartPods(Deployment targetDeployment) {
logger.info("Waiting for the pods to restart");
kubernetesClient().pods()
.inNamespace(targetDeployment.getMetadata().getNamespace())
.withLabel("app", targetDeployment.getMetadata().getName())
.waitUntilCondition(this::waitUntilCondition, 60, TimeUnit.SECONDS);
logger.info("Pods restarted successfully");
}

public boolean waitUntilCondition(Pod pod) {
var match = pod.getMetadata().getDeletionTimestamp() == null &&
pod.getStatus().getConditions().stream()
.anyMatch(condition -> "Ready".equals(condition.getType()) && "True".equals(condition.getStatus()));
logger.info("Pod {} is ready: {}", pod.getMetadata().getName(), match);
return match;
var isMarkedForDeletion = pod.getMetadata().getDeletionTimestamp() == null;
if (!isMarkedForDeletion) return false;
var isReady = pod.getStatus()
.getConditions()
.stream()
.anyMatch(condition -> "Ready".equals(condition.getType()) && "True".equals(condition.getStatus()));
if (isReady) {
logger.info("Pod {} is ready", pod.getMetadata().getName());
}

return isReady;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,9 @@ protected ExecutionQueue internalExecute(Scenario scenario, ExecutionQueue queue

var currentResults = getFileString(item.getResultFile());
if (currentResults.isPresent()) {
var currentResultsJson = new JsonObject(currentResults.get());
currentResultsJson.put("metadata", scenario.getSpec().toJson());

var results = getFileString(queue.getSpec().getResultFile());
JsonObject resultsJson;
if (results.isPresent()) {
Expand All @@ -49,8 +52,6 @@ protected ExecutionQueue internalExecute(Scenario scenario, ExecutionQueue queue
resultsJson = new JsonObject();
resultsJson.put("results", new JsonArray());
}
var currentResultsJson = new JsonObject(currentResults.get());
currentResultsJson.put("metadata", scenario.getSpec().toJson());
resultsJson.getJsonArray("results").add(currentResultsJson);
writeToFile(queue.getSpec().getResultFile(), resultsJson.encode());
}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,13 +1,9 @@
package io.resiliencebench.execution.steps.aws;

import com.amazonaws.SdkClientException;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3ClientBuilder;
import com.amazonaws.services.s3.model.HeadBucketRequest;
import io.resiliencebench.execution.FileManager;
import io.resiliencebench.execution.LocalFileManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Value;
Expand All @@ -31,16 +27,4 @@ AmazonS3 createS3Client(AWSCredentialsProvider credentialsProvider) {
AWSCredentialsProvider credentialsProvider() {
return new DefaultAWSCredentialsProviderChain();
}

FileManager fileManager(AmazonS3 amazonS3) {
try {
logger.info("Attempting to connect to S3 bucket {} in region {}", bucketName, region);
amazonS3.headBucket(new HeadBucketRequest(bucketName));
logger.info("Successfully connected to S3 bucket {}", bucketName);
return new S3FileManager(amazonS3, bucketName);
} catch (SdkClientException ex) {
logger.error("Failed to connect to S3 bucket {}{}. Using local file manager instead", bucketName, ex.getMessage());
return new LocalFileManager();
}
}
}
Original file line number Diff line number Diff line change
@@ -1,14 +1,36 @@
package io.resiliencebench.execution.steps.aws;

import com.amazonaws.AmazonServiceException;
import com.amazonaws.SdkClientException;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest;
import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest;
import com.amazonaws.services.s3.model.PartETag;
import com.amazonaws.services.s3.model.UploadPartRequest;
import io.fabric8.kubernetes.api.model.HasMetadata;
import io.fabric8.kubernetes.client.KubernetesClient;
import io.resiliencebench.execution.steps.ExecutorStep;
import io.resiliencebench.resources.queue.ExecutionQueue;
import io.resiliencebench.resources.scenario.Scenario;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.File;
import java.nio.file.Paths;
import java.util.ArrayList;

public class AwsResultFileStep extends ExecutorStep<HasMetadata> {
public AwsResultFileStep(KubernetesClient kubernetesClient) {

private final Logger log = LoggerFactory.getLogger(AwsResultFileStep.class);
private final AmazonS3 s3Client;

private final String bucketName;
private final static long PART_SIZE = 5 * 1024 * 1024; // part size to 5 MB.

public AwsResultFileStep(KubernetesClient kubernetesClient, AmazonS3 s3Client, String bucketName) {
super(kubernetesClient);
this.s3Client = s3Client;
this.bucketName = bucketName;
}

@Override
Expand All @@ -20,4 +42,60 @@ protected boolean isApplicable(Scenario scenario) {
protected HasMetadata internalExecute(Scenario scenario, ExecutionQueue queue) {
return null;
}

public void save(String fileName, String destinationPath) {
var files = new File(fileName).listFiles();
if (files == null) {
log.error("Directory {} does not exist", fileName);
return;
}
for (var file : files) {
internalSave(file.getAbsolutePath(), destinationPath);
}
}

private void internalSave(String fileName, String destinationPath) {
var file = new File(fileName);
if (!file.exists()) {
log.error("File {} does not exist", fileName);
return;
}
var keyName = Paths.get(destinationPath, file.getName()).toString();
var contentLength = file.length();

try {
var partETags = new ArrayList<PartETag>();
var initRequest = new InitiateMultipartUploadRequest(bucketName, keyName);
var initResponse = s3Client.initiateMultipartUpload(initRequest);

long filePosition = 0;
for (var i = 1; filePosition < contentLength; i++) {
var partSize = Math.min(PART_SIZE, (contentLength - filePosition));

var uploadRequest = new UploadPartRequest()
.withBucketName(bucketName)
.withKey(keyName)
.withUploadId(initResponse.getUploadId())
.withPartNumber(i)
.withFileOffset(filePosition)
.withFile(file)
.withPartSize(partSize);

var uploadResult = s3Client.uploadPart(uploadRequest);
partETags.add(uploadResult.getPartETag());

filePosition += partSize;
}

var compRequest = new CompleteMultipartUploadRequest(bucketName, keyName, initResponse.getUploadId(), partETags);
s3Client.completeMultipartUpload(compRequest);
log.info("File {} successfully uploaded", keyName);
}
catch(AmazonServiceException e) {
log.error("The call was transmitted successfully with requestId {}, but Amazon S3 couldn't process it. Message {}", e.getRequestId(), e.getErrorMessage());
}
catch(SdkClientException e) {
log.error("Amazon S3 couldn't be contacted for a response, or the client couldn't parse the response from Amazon S3. {}", e.getMessage());
}
}
}

This file was deleted.

0 comments on commit d27cd6a

Please sign in to comment.