From ccf1a4962e88166e80633a0e9115db6ba2159e8a Mon Sep 17 00:00:00 2001 From: msvinaykumar Date: Mon, 29 May 2023 18:25:56 +0530 Subject: [PATCH 01/12] We have implemented the updateREcomendation API and enabled simultaneous uploading of results and recommendations. Additionally, we have introduced extra flags to selectively post results or update recommendations only. Moreover, it is now possible to specify a start date as well. Signed-off-by: msvinaykumar --- tests/scripts/quickTestScalability.py | 761 +++++++++++++++----------- 1 file changed, 440 insertions(+), 321 deletions(-) diff --git a/tests/scripts/quickTestScalability.py b/tests/scripts/quickTestScalability.py index f2384aef7..e0582c445 100644 --- a/tests/scripts/quickTestScalability.py +++ b/tests/scripts/quickTestScalability.py @@ -18,359 +18,478 @@ import datetime import requests import argparse +import multiprocessing +import time +from multiprocessing import Manager -# create an ArgumentParser object -parser = argparse.ArgumentParser() - -# add the named arguments -parser.add_argument('--ip', type=str, help='enter ip') -parser.add_argument('--port', type=int, help='enter port') -parser.add_argument('--name', type=str, help='enter experiment name') -parser.add_argument('--count', type=str, help='enter number of experiment and results to create separated by , ') -parser.add_argument('--minutesjump', type=int, help='enter time diff b/w interval_start_time and interval_end_time') - -# parse the arguments from the command line -args = parser.parse_args() - -createExpURL = 'http://%s:%s/createExperiment'%(args.ip,args.port) -updateExpURL = 'http://%s:%s/updateResults'%(args.ip,args.port) -createProfileURL = 'http://%s:%s/createPerformanceProfile'%(args.ip,args.port) -expnameprfix = args.name -expcount = int(args.count.split(',')[0]) -rescount = int(args.count.split(',')[1]) -minutesjump = args.minutesjump -headers = { - 'Content-Type': 'application/json' -} -timeout = (60, 60) - -print(createExpURL) -print(updateExpURL) -print(createProfileURL) -print("experiment_name : %s " %(expnameprfix)) -print("Number of experiments to create : %s" %(expcount)) -print("Number of results to create : %s" %(rescount)) -print("minutes jump : %s" %(minutesjump)) - -profile_data = { - "name": "resource-optimization-openshift", - "profile_version": 1, - "k8s_type": "openshift", - "slo": { - "slo_class": "resource_usage", - "direction": "minimize", - "objective_function": { - "function_type": "expression", - "expression": "cpuRequest" - }, - "function_variables": [ - { - "name": "cpuRequest", - "datasource": "prometheus", - "value_type": "double", - "kubernetes_object": "container", - "query": "kube_pod_container_resource_requests{pod=~'$DEPLOYMENT_NAME$-[^-]*-[^-]*$', container='$CONTAINER_NAME$', namespace='$NAMESPACE', resource='cpu', unit='core'}", - "aggregation_functions": [ - { - "function": "avg", - "query": "avg(kube_pod_container_resource_requests{pod=~\"$DEPLOYMENT_NAME$-[^-]*-[^-]*$\", container=\"$CONTAINER_NAME$\", namespace=\"$NAMESPACE\", resource=\"cpu\", unit=\"core\"})" - } - ] - } - ] - } - } -profile_json_payload = json.dumps(profile_data) -# Send the request with the payload -response = requests.post(createProfileURL, data=profile_json_payload, headers=headers) -# Check the response -if response.status_code == 201: - print('Request successful!') -else: - print(f'Request failed with status code {response.status_code}: {response.text}') - - -createdata = { - "version": "1.0", - "experiment_name": "quarkus-resteasy-kruize-min-http-response-time-db_10", - "cluster_name": "cluster-one-division-bell", - "performance_profile": "resource-optimization-openshift", - "mode": "monitor", - "target_cluster": "remote", - "kubernetes_objects": [ - { - "type": "deployment", - "name": "tfb-qrh-deployment_5", - "namespace": "default_5", - "containers": [ - { - "container_image_name": "kruize/tfb-db:1.15", - "container_name": "tfb-server-0" - }, - { - "container_image_name": "kruize/tfb-qrh:1.13.2.F_et17", - "container_name": "tfb-server-1" - } - ] +def postResult(expName,startDate,endDate): + if args.debug: print("Posting results for %s - %s "%(startDate,endDate)) + # update the JSON data with the new interval times + data['experiment_name'] = expName + data['interval_start_time'] = startDate + data['interval_end_time'] = endDate + # Convert payload to JSON string + json_payload = json.dumps([data]) + try: + # Send the request with the payload + response = requests.post(updateExpURL, data=json_payload, headers=headers, timeout=timeout) + # Check the response + if response.status_code == 201: + pass + else: + print(f'Request failed with status code {response.status_code}: {response.text}') + requests.post(createProfileURL, data=profile_json_payload, headers=headers) + except requests.exceptions.Timeout: + print('Timeout occurred while connecting to') + except requests.exceptions.RequestException as e: + print('An error occurred while connecting to', e) + finally: + completedResultDatesList.append( datetime.datetime.strptime( endDate , '%Y-%m-%dT%H:%M:%S.%fZ')) + +def updateRecommendation(experiment_name,endDate): + try: + # Send the request with the payload + payloadRecommendationURL = "%s?experiment_name=%s&interval_end_time=%s"%(updateRecommendationURL,experiment_name,endDate.strftime('%Y-%m-%dT%H:%M:%S.%fZ')[:-4] + 'Z') + if args.debug: print(payloadRecommendationURL) + response = requests.post(payloadRecommendationURL, data={}, headers=headers, timeout=timeout) + # Check the response + if response.status_code == 201: + pass + else: + if args.debug: print(f'{payloadRecommendationURL} Request failed with status code {response.status_code}: {response.text}') + requests.post(createProfileURL, data=profile_json_payload, headers=headers) + except requests.exceptions.Timeout: + print('updateRecommendation Timeout occurred while connecting to') + except requests.exceptions.RequestException as e: + print('updateRecommendation Timeout occurred while connecting to', e) + +def validateRecommendation(): + totalResultDates.sort() + completedResultDatesList.sort() + while len(completedRecommendation) < len(totalResultDates): + for completedDate in completedResultDatesList: + if completedDate not in completedRecommendation: + subTotalResulutDates = totalResultDates[:totalResultDates.index(completedDate)] + if(all(x in completedResultDatesList for x in subTotalResulutDates)): + if args.debug: print("You can generate recommendation for completedDate %s \n due to subTotalResulutDates %s \n are subset of completedResultSet %s" %(completedDate,subTotalResulutDates,completedResultDatesList)) + completedRecommendation.append(completedDate) + updateRecommendation(createdata['experiment_name'],completedDate) + else: + if args.debug: print("You CANNOT generate recommendation for completedDate %s \n due to subTotalResulutDates %s \n are not subset of completedResultSet %s" %(completedDate,subTotalResulutDates,completedResultDatesList)) + pass + if args.debug: print('*************************') + time.sleep(2) + +def loadData(): + createdata = { + "version": "1.0", + "experiment_name": "quarkus-resteasy-kruize-min-http-response-time-db_10", + "cluster_name": "cluster-one-division-bell", + "performance_profile": "resource-optimization-openshift", + "mode": "monitor", + "target_cluster": "remote", + "kubernetes_objects": [ + { + "type": "deployment", + "name": "tfb-qrh-deployment_5", + "namespace": "default_5", + "containers": [ + { + "container_image_name": "kruize/tfb-db:1.15", + "container_name": "tfb-server-0" + }, + { + "container_image_name": "kruize/tfb-qrh:1.13.2.F_et17", + "container_name": "tfb-server-1" + } + ] + } + ], + "trial_settings": { + "measurement_duration": "15min" + }, + "recommendation_settings": { + "threshold": "0.1" } - ], - "trial_settings": { - "measurement_duration": "15min" - }, - "recommendation_settings": { - "threshold": "0.1" } - } - -data = { - "version": "1.0", - "experiment_name": "quarkus-resteasy-kruize-min-http-response-time-db_4", - "interval_start_time": "2022-01-26T14:35:43.511Z", - "interval_end_time": "2022-01-26T14:50:50.511Z", - "kubernetes_objects": [ - { - "type": "deployment", - "name": "tfb-qrh-deployment_5", - "namespace": "default_5", - "containers": [ - { - "container_image_name": "kruize/tfb-db:1.15", - "container_name": "tfb-server-0", - "metrics": [ - { - "name": "cpuRequest", - "results": { - "aggregation_info": { - "sum": 3.4, - "avg": 2.1, - "format": "cores" + + data = { + "version": "1.0", + "experiment_name": "quarkus-resteasy-kruize-min-http-response-time-db_4", + "interval_start_time": "2023-01-01T00:00:00.000Z", + "interval_end_time": "2023-01-01T00:00:00.000Z", + "kubernetes_objects": [ + { + "type": "deployment", + "name": "tfb-qrh-deployment_5", + "namespace": "default_5", + "containers": [ + { + "container_image_name": "kruize/tfb-db:1.15", + "container_name": "tfb-server-0", + "metrics": [ + { + "name": "cpuRequest", + "results": { + "aggregation_info": { + "sum": None, + "avg": 0, + "format": "cores" + } } - } - }, - { - "name": "cpuLimit", - "results": { - "aggregation_info": { - "sum": 3.0, - "avg": 1.5, - "format": "cores" + }, + { + "name": "cpuLimit", + "results": { + "aggregation_info": { + "sum": None, + "avg": 0, + "format": "cores" + } } - } - }, - { - "name": "cpuUsage", - "results": { - "aggregation_info": { - "min": 0.54, - "max": 0.94, - "sum": 0.52, - "avg": 0.12, - "format": "cores" + }, + { + "name": "cpuUsage", + "results": { + "aggregation_info": { + "min": 0, + "max": 0, + "sum": 0, + "avg": 0, + "format": "cores" + } } - } - }, - { - "name": "cpuThrottle", - "results": { - "aggregation_info": { - "sum": 0.9, - "max": 0.09, - "avg": 0.04, - "format": "cores" + }, + { + "name": "cpuThrottle", + "results": { + "aggregation_info": { + "sum": 0, + "max": 0, + "avg": 0, + "format": "cores" + } } - } - }, - { - "name": "memoryRequest", - "results": { - "aggregation_info": { - "sum": 260.85, - "avg": 50.21, - "format": "MiB" + }, + { + "name": "memoryRequest", + "results": { + "aggregation_info": { + "sum": 260.85, + "avg": 50.21, + "format": "MiB" + } } - } - }, - { - "name": "memoryLimit", - "results": { - "aggregation_info": { - "sum": 700, - "avg": 100, - "format": "MiB" + }, + { + "name": "memoryLimit", + "results": { + "aggregation_info": { + "sum": 700, + "avg": 100, + "format": "MiB" + } } - } - }, - { - "name": "memoryUsage", - "results": { - "aggregation_info": { - "min": 50.6, - "max": 198.5, - "sum": 298.5, - "avg": 40.1, - "format": "MiB" + }, + { + "name": "memoryUsage", + "results": { + "aggregation_info": { + "min": 50.6, + "max": 198.5, + "sum": 298.5, + "avg": 40.1, + "format": "MiB" + } } - } - }, - { - "name": "memoryRSS", - "results": { - "aggregation_info": { - "min": 50.6, - "max": 523.6, - "sum": 123.6, - "avg": 31.91, - "format": "MiB" + }, + { + "name": "memoryRSS", + "results": { + "aggregation_info": { + "min": 50.6, + "max": 523.6, + "sum": 123.6, + "avg": 31.91, + "format": "MiB" + } } } - } - ] - }, - { - "container_image_name": "kruize/tfb-qrh:1.13.2.F_et17", - "container_name": "tfb-server-1", - "metrics": [ - { - "name": "cpuRequest", - "results": { - "aggregation_info": { - "sum": 4.4, - "avg": 1.1, - "format": "cores" + ] + }, + { + "container_image_name": "kruize/tfb-qrh:1.13.2.F_et17", + "container_name": "tfb-server-1", + "metrics": [ + { + "name": "cpuRequest", + "results": { + "aggregation_info": { + "sum": 4.4, + "avg": 1.1, + "format": "cores" + } } - } - }, - { - "name": "cpuLimit", - "results": { - "aggregation_info": { - "sum": 2.0, - "avg": 0.5, - "format": "cores" + }, + { + "name": "cpuLimit", + "results": { + "aggregation_info": { + "sum": 2.0, + "avg": 0.5, + "format": "cores" + } } - } - }, - { - "name": "cpuUsage", - "results": { - "aggregation_info": { - "min": 0.14, - "max": 0.84, - "sum": 0.84, - "avg": 0.12, - "format": "cores" + }, + { + "name": "cpuUsage", + "results": { + "aggregation_info": { + "min": 0.14, + "max": 0.84, + "sum": 0.84, + "avg": 0.12, + "format": "cores" + } } - } - }, - { - "name": "cpuThrottle", - "results": { - "aggregation_info": { - "sum": 0.19, - "max": 0.09, - "avg": 0.045, - "format": "cores" + }, + { + "name": "cpuThrottle", + "results": { + "aggregation_info": { + "sum": 0.19, + "max": 0.09, + "avg": 0.045, + "format": "cores" + } } - } - }, - { - "name": "memoryRequest", - "results": { - "aggregation_info": { - "sum": 250.85, - "avg": 50.21, - "format": "MiB" + }, + { + "name": "memoryRequest", + "results": { + "aggregation_info": { + "sum": 250.85, + "avg": 50.21, + "format": "MiB" + } } - } - }, - { - "name": "memoryLimit", - "results": { - "aggregation_info": { - "sum": 500, - "avg": 100, - "format": "MiB" + }, + { + "name": "memoryLimit", + "results": { + "aggregation_info": { + "sum": 500, + "avg": 100, + "format": "MiB" + } } - } - }, - { - "name": "memoryUsage", - "results": { - "aggregation_info": { - "min": 50.6, - "max": 198.5, - "sum": 198.5, - "avg": 40.1, - "format": "MiB" + }, + { + "name": "memoryUsage", + "results": { + "aggregation_info": { + "min": 50.6, + "max": 198.5, + "sum": 198.5, + "avg": 40.1, + "format": "MiB" + } } - } - }, - { - "name": "memoryRSS", - "results": { - "aggregation_info": { - "min": 50.6, - "max": 123.6, - "sum": 123.6, - "avg": 31.91, - "format": "MiB" + }, + { + "name": "memoryRSS", + "results": { + "aggregation_info": { + "min": 50.6, + "max": 123.6, + "sum": 123.6, + "avg": 31.91, + "format": "MiB" + } } } - } - ] - } - ] - } - ] - } + ] + } + ] + } + ] + } + profile_data = { + "name": "resource-optimization-openshift", + "profile_version": 1, + "k8s_type": "openshift", + "slo": { + "slo_class": "resource_usage", + "direction": "minimize", + "objective_function": { + "function_type": "expression", + "expression": "cpuRequest" + }, + "function_variables": [ + { + "name": "cpuRequest", + "datasource": "prometheus", + "value_type": "double", + "kubernetes_object": "container", + "query": "kube_pod_container_resource_requests{pod=~'$DEPLOYMENT_NAME$-[^-]*-[^-]*$', container='$CONTAINER_NAME$', namespace='$NAMESPACE', resource='cpu', unit='core'}", + "aggregation_functions": [ + { + "function": "avg", + "query": "avg(kube_pod_container_resource_requests{pod=~\"$DEPLOYMENT_NAME$-[^-]*-[^-]*$\", container=\"$CONTAINER_NAME$\", namespace=\"$NAMESPACE\", resource=\"cpu\", unit=\"core\"})" + } + ] + } + ] + } + } + return (data,createdata,profile_data) -for i in range(expcount): - try: - experiment_name = "%s_%s" %(expnameprfix,i) - createdata['experiment_name'] = experiment_name - create_json_payload = json.dumps([createdata]) - # Send the request with the payload - response = requests.post(createExpURL, data=create_json_payload, headers=headers, timeout=timeout) - # Check the response - if response.status_code == 201: - print('Request successful!') - data['experiment_name'] = experiment_name - for j in range(rescount): - try: - # calculate the new interval start and end times + + +if __name__ == "__main__": + # create an ArgumentParser object + parser = argparse.ArgumentParser() + + # add the named arguments + parser.add_argument('--ip', type=str, help='specify kruize ip') + parser.add_argument('--port', type=int, help='specify port') + parser.add_argument('--name', type=str, help='specify experiment name') + parser.add_argument('--count', type=str, help='specify input the number of experiments and corresponding results, separated by commas.') + parser.add_argument('--startdate', type=str, help='Specify start date and time in "%Y-%m-%dT%H:%M:%S.%fZ" format.') + parser.add_argument('--minutesjump', type=int, help='specify the time difference between the start time and end time of the interval.') + parser.add_argument('--postresults', action='store_true' , help='By enabling flag it genrates results and post to updateResults api.') + parser.add_argument('--parallelresultcount', type=int, help='specify the quantity of processes to execute simultaneously for posting the results.') + parser.add_argument('--generaterecommendation', action='store_true', help='execution of recommendation generation.') + parser.add_argument('--debug', type=bool, help='print debug log.') + + + + # parse the arguments from the command line + args = parser.parse_args() + + createExpURL = 'http://%s:%s/createExperiment'%(args.ip,args.port) + updateExpURL = 'http://%s:%s/updateResults'%(args.ip,args.port) + createProfileURL = 'http://%s:%s/createPerformanceProfile'%(args.ip,args.port) + updateExpURL = 'http://%s:%s/updateResults'%(args.ip,args.port) + updateRecommendationURL = 'http://%s:%s/updateRecommendations'%(args.ip,args.port) + expnameprfix = args.name + expcount = int(args.count.split(',')[0]) + rescount = int(args.count.split(',')[1]) + minutesjump = args.minutesjump + generaterecommendation = args.generaterecommendation + headers = { + 'Content-Type': 'application/json' + } + timeout = (60, 60) + data,createdata,profile_data = loadData() + + if args.startdate: + data['interval_end_time'] = args.startdate + + print(createExpURL) + print(updateExpURL) + print(createProfileURL) + print("experiment_name : %s " %(expnameprfix)) + print("Number of experiments to create : %s" %(expcount)) + print("Number of results to create : %s" %(rescount)) + print("startdate : %s" %(data['interval_end_time'])) + print("minutes jump : %s" %(minutesjump)) + print("postresults : %s" %(args.postresults)) + print("generaterecommendation : %s" %(generaterecommendation)) + + + profile_json_payload = json.dumps(profile_data) + # Send the request with the payload + response = requests.post(createProfileURL, data=profile_json_payload, headers=headers) + # Check the response + if response.status_code == 201: + print('Request successful!') + else: + print(f'Request failed with status code {response.status_code}: {response.text}') + + + # Create a shared list using multiprocessing.Manager() + manager = Manager() + completedResultDatesList = manager.list() + totalResultDates = manager.list() + completedResultDatesList = manager.list() + completedRecommendation = manager.list() + + start_time = time.time() + for i in range(1,expcount+1): + try: + successfulCnt = 0 + experiment_name = "%s_%s" %(expnameprfix,i) + createdata['experiment_name'] = experiment_name + create_json_payload = json.dumps([createdata]) + # Send the request with the payload + response = requests.post(createExpURL, data=create_json_payload, headers=headers, timeout=timeout) + # Check the response + j = 0 + if response.status_code == 201 or response.status_code == 409: + print('Create experiment_name %s Request successful!'%(experiment_name)) + timeDeltaList = [] + for j in range(rescount): interval_start_time = datetime.datetime.strptime(data['interval_end_time'] , '%Y-%m-%dT%H:%M:%S.%fZ') interval_end_time = datetime.datetime.strptime(data['interval_end_time'] , '%Y-%m-%dT%H:%M:%S.%fZ' ) + datetime.timedelta(minutes=minutesjump) - - # update the JSON data with the new interval times - data['interval_start_time'] = interval_start_time.strftime('%Y-%m-%dT%H:%M:%S.%fZ') data['interval_end_time'] = interval_end_time.strftime('%Y-%m-%dT%H:%M:%S.%fZ') + timeDeltaList.append((experiment_name,interval_start_time.strftime('%Y-%m-%dT%H:%M:%S.%fZ'),interval_end_time.strftime('%Y-%m-%dT%H:%M:%S.%fZ'))) + totalResultDates.append(interval_end_time) + if args.postresults and args.generaterecommendation: + # Create a pool of processes + recommendationProcess = multiprocessing.Process(target=validateRecommendation) + recommendationProcess.start() + num_processes = args.parallelresultcount + pool = multiprocessing.Pool(processes=num_processes) + # Start the parallel execution + pool.starmap(postResult, timeDeltaList) + # Close the pool and wait for the processes to finish + recommendationProcess.join() + pool.close() + pool.join() + elif args.postresults: + num_processes = args.parallelresultcount + pool = multiprocessing.Pool(processes=num_processes) + # Start the parallel execution + pool.starmap(postResult, timeDeltaList) + # Close the pool and wait for the processes to finish + pool.close() + pool.join() + elif args.generaterecommendation: + recommendationDataList = [] + for i_end_date in totalResultDates: + recommendationDataList.append((createdata['experiment_name'],i_end_date)) + num_processes = args.parallelresultcount + pool = multiprocessing.Pool(processes=num_processes) + # Start the parallel execution + pool.starmap(updateRecommendation, recommendationDataList) + # Close the pool and wait for the processes to finish + pool.close() + pool.join() + else: + print("Invalid choice") + else: + print(f'Request failed with status code {response.status_code}: {response.text}') + except requests.exceptions.Timeout: + print('Timeout occurred while connecting to') + except requests.exceptions.RequestException as e: + print('An error occurred while connecting to', e) + except Exception as e: + print('An error occurred ', e) - # Convert payload to JSON string - json_payload = json.dumps([data]) - # Send the request with the payload - response = requests.post(updateExpURL, data=json_payload, headers=headers, timeout=timeout) + print('Request successful! completed : ExperimentCount : %s/%s Results Count : %s/%s Recommendation count : %s' %(i,expcount,len(completedResultDatesList),len(totalResultDates),len(completedRecommendation) )) + elapsed_time = time.time() - start_time + hours, rem = divmod(elapsed_time, 3600) + minutes, seconds = divmod(rem, 60) + print("Time elapsed: {:0>2}:{:0>2}:{:05.2f}".format(int(hours), int(minutes), seconds)) + - # Check the response - if response.status_code == 201: - pass - else: - print(f'Request failed with status code {response.status_code}: {response.text}') - except requests.exceptions.Timeout: - print('Timeout occurred while connecting to') - except requests.exceptions.RequestException as e: - print('An error occurred while connecting to', e) - else: - print(f'Request failed with status code {response.status_code}: {response.text}') - except requests.exceptions.Timeout: - print('Timeout occurred while connecting to') - except requests.exceptions.RequestException as e: - print('An error occurred while connecting to', e) - print('Request successful! completed : %s/%s %s/%s ' %(i,expcount,j,rescount )) + #for i in {1..50}; do nohup time python3 -u quickTestScalability.py --ip master-1.kruizevin.lab.psi.pnq2.redhat.com --port 31521 --name 5kexp$i --count 100,1500 --minutesjump=15 > /tmp/5kexp$i.log 2>&1 & done From 0d8824a656757fd0c57d6b811baa0432cee46d86 Mon Sep 17 00:00:00 2001 From: msvinaykumar Date: Tue, 6 Jun 2023 13:10:10 +0530 Subject: [PATCH 02/12] Added progress prints Signed-off-by: msvinaykumar --- tests/scripts/quickTestScalability.py | 72 ++++++++++++++++++++------- 1 file changed, 54 insertions(+), 18 deletions(-) diff --git a/tests/scripts/quickTestScalability.py b/tests/scripts/quickTestScalability.py index e0582c445..a6eddfd88 100644 --- a/tests/scripts/quickTestScalability.py +++ b/tests/scripts/quickTestScalability.py @@ -22,6 +22,7 @@ import time from multiprocessing import Manager + def postResult(expName,startDate,endDate): if args.debug: print("Posting results for %s - %s "%(startDate,endDate)) # update the JSON data with the new interval times @@ -35,6 +36,7 @@ def postResult(expName,startDate,endDate): response = requests.post(updateExpURL, data=json_payload, headers=headers, timeout=timeout) # Check the response if response.status_code == 201: + print(f"progress {expName} : ExperimentCount : %s/%s Results Count : %s/%s Recommendation count : %s" %(completedExperimentCount.value,expcount,len(completedResultDatesList),len(totalResultDates),len(completedRecommendation) ), end="\r") pass else: print(f'Request failed with status code {response.status_code}: {response.text}') @@ -54,6 +56,7 @@ def updateRecommendation(experiment_name,endDate): response = requests.post(payloadRecommendationURL, data={}, headers=headers, timeout=timeout) # Check the response if response.status_code == 201: + print(f"progress {experiment_name} : ExperimentCount : %s/%s Results Count : %s/%s Recommendation count : %s" %(completedExperimentCount.value,expcount,len(completedResultDatesList),len(totalResultDates),len(completedRecommendation) ), end="\r") pass else: if args.debug: print(f'{payloadRecommendationURL} Request failed with status code {response.status_code}: {response.text}') @@ -62,23 +65,46 @@ def updateRecommendation(experiment_name,endDate): print('updateRecommendation Timeout occurred while connecting to') except requests.exceptions.RequestException as e: print('updateRecommendation Timeout occurred while connecting to', e) + finally: + completedRecommendation.append(endDate) + +def updateRecommendationInBulk(): + pendingRecommendation = list(set(totalResultDates).difference(completedRecommendation)) + print(len(pendingRecommendation)) + print(len(pendingRecommendation)) + recommendationDataList = [] + for i_end_date in pendingRecommendation: + recommendationDataList.append((createdata['experiment_name'],i_end_date)) + num_processes = args.parallelresultcount + pool = multiprocessing.Pool(processes=num_processes) + # Start the parallel execution + pool.starmap(updateRecommendation, recommendationDataList) + # Close the pool and wait for the processes to finish + pool.close() + pool.join() def validateRecommendation(): totalResultDates.sort() completedResultDatesList.sort() while len(completedRecommendation) < len(totalResultDates): - for completedDate in completedResultDatesList: - if completedDate not in completedRecommendation: - subTotalResulutDates = totalResultDates[:totalResultDates.index(completedDate)] - if(all(x in completedResultDatesList for x in subTotalResulutDates)): - if args.debug: print("You can generate recommendation for completedDate %s \n due to subTotalResulutDates %s \n are subset of completedResultSet %s" %(completedDate,subTotalResulutDates,completedResultDatesList)) - completedRecommendation.append(completedDate) - updateRecommendation(createdata['experiment_name'],completedDate) - else: - if args.debug: print("You CANNOT generate recommendation for completedDate %s \n due to subTotalResulutDates %s \n are not subset of completedResultSet %s" %(completedDate,subTotalResulutDates,completedResultDatesList)) - pass - if args.debug: print('*************************') - time.sleep(2) + if (len(totalResultDates) == len(completedResultDatesList)): + updateRecommendationInBulk() + break + else: + for completedDate in completedResultDatesList: + if completedDate not in completedRecommendation: + subTotalResulutDates = totalResultDates[:totalResultDates.index(completedDate)] + if(all(x in completedResultDatesList for x in subTotalResulutDates)): + if args.debug: print("You can generate recommendation for completedDate %s \n due to subTotalResulutDates %s \n are subset of completedResultSet %s" %(completedDate,subTotalResulutDates,completedResultDatesList)) + updateRecommendation(createdata['experiment_name'],completedDate) + if (len(totalResultDates) == len(completedResultDatesList)): + updateRecommendationInBulk() + break + else: + if args.debug: print("You CANNOT generate recommendation for completedDate %s \n due to subTotalResulutDates %s \n are not subset of completedResultSet %s" %(completedDate,subTotalResulutDates,completedResultDatesList)) + pass + if args.debug: print('*************************') + time.sleep(1) def loadData(): createdata = { @@ -367,12 +393,19 @@ def loadData(): # parse the arguments from the command line args = parser.parse_args() + if args.port != 0: + createExpURL = 'http://%s:%s/createExperiment'%(args.ip,args.port) + updateExpURL = 'http://%s:%s/updateResults'%(args.ip,args.port) + createProfileURL = 'http://%s:%s/createPerformanceProfile'%(args.ip,args.port) + updateExpURL = 'http://%s:%s/updateResults'%(args.ip,args.port) + updateRecommendationURL = 'http://%s:%s/updateRecommendations'%(args.ip,args.port) + else: + createExpURL = 'http://%s/createExperiment'%(args.ip) + updateExpURL = 'http://%s/updateResults'%(args.ip) + createProfileURL = 'http://%s/createPerformanceProfile'%(args.ip) + updateExpURL = 'http://%s/updateResults'%(args.ip) + updateRecommendationURL = 'http://%s/updateRecommendations'%(args.ip) - createExpURL = 'http://%s:%s/createExperiment'%(args.ip,args.port) - updateExpURL = 'http://%s:%s/updateResults'%(args.ip,args.port) - createProfileURL = 'http://%s:%s/createPerformanceProfile'%(args.ip,args.port) - updateExpURL = 'http://%s:%s/updateResults'%(args.ip,args.port) - updateRecommendationURL = 'http://%s:%s/updateRecommendations'%(args.ip,args.port) expnameprfix = args.name expcount = int(args.count.split(',')[0]) rescount = int(args.count.split(',')[1]) @@ -415,6 +448,8 @@ def loadData(): totalResultDates = manager.list() completedResultDatesList = manager.list() completedRecommendation = manager.list() + completedExperimentCount = manager.Value(int,0) + completedResultsCount = manager.Value(int,0) start_time = time.time() for i in range(1,expcount+1): @@ -429,6 +464,7 @@ def loadData(): j = 0 if response.status_code == 201 or response.status_code == 409: print('Create experiment_name %s Request successful!'%(experiment_name)) + completedExperimentCount.value = completedExperimentCount.value + 1 timeDeltaList = [] for j in range(rescount): interval_start_time = datetime.datetime.strptime(data['interval_end_time'] , '%Y-%m-%dT%H:%M:%S.%fZ') @@ -480,7 +516,7 @@ def loadData(): print('An error occurred ', e) - print('Request successful! completed : ExperimentCount : %s/%s Results Count : %s/%s Recommendation count : %s' %(i,expcount,len(completedResultDatesList),len(totalResultDates),len(completedRecommendation) )) + print('Request successful! completed : ExperimentCount : %s/%s Results Count : %s/%s Recommendation count : %s' %(completedExperimentCount.value,expcount,len(completedResultDatesList),len(totalResultDates),len(completedRecommendation) )) elapsed_time = time.time() - start_time hours, rem = divmod(elapsed_time, 3600) minutes, seconds = divmod(rem, 60) From c1bed78ccd53689111488fb04ffd1e38bfde6887 Mon Sep 17 00:00:00 2001 From: msvinaykumar Date: Tue, 6 Jun 2023 17:21:53 +0530 Subject: [PATCH 03/12] minor fix Signed-off-by: msvinaykumar --- tests/scripts/quickTestScalability.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/tests/scripts/quickTestScalability.py b/tests/scripts/quickTestScalability.py index a6eddfd88..5c54942b3 100644 --- a/tests/scripts/quickTestScalability.py +++ b/tests/scripts/quickTestScalability.py @@ -57,7 +57,6 @@ def updateRecommendation(experiment_name,endDate): # Check the response if response.status_code == 201: print(f"progress {experiment_name} : ExperimentCount : %s/%s Results Count : %s/%s Recommendation count : %s" %(completedExperimentCount.value,expcount,len(completedResultDatesList),len(totalResultDates),len(completedRecommendation) ), end="\r") - pass else: if args.debug: print(f'{payloadRecommendationURL} Request failed with status code {response.status_code}: {response.text}') requests.post(createProfileURL, data=profile_json_payload, headers=headers) @@ -70,8 +69,6 @@ def updateRecommendation(experiment_name,endDate): def updateRecommendationInBulk(): pendingRecommendation = list(set(totalResultDates).difference(completedRecommendation)) - print(len(pendingRecommendation)) - print(len(pendingRecommendation)) recommendationDataList = [] for i_end_date in pendingRecommendation: recommendationDataList.append((createdata['experiment_name'],i_end_date)) From c2e4ab680a8dbf61318a250059dd1d9c9325cf78 Mon Sep 17 00:00:00 2001 From: msvinaykumar Date: Tue, 27 Jun 2023 20:54:00 +0530 Subject: [PATCH 04/12] Included updateRecommendation API call into e2e module' Signed-off-by: msvinaykumar --- .../remote_monitoring_tests/helpers/kruize.py | 13 +++++ .../rest_apis/test_e2e_workflow.py | 50 ++++++++----------- 2 files changed, 34 insertions(+), 29 deletions(-) diff --git a/tests/scripts/remote_monitoring_tests/helpers/kruize.py b/tests/scripts/remote_monitoring_tests/helpers/kruize.py index 9f7ee21e9..8f26ec61e 100644 --- a/tests/scripts/remote_monitoring_tests/helpers/kruize.py +++ b/tests/scripts/remote_monitoring_tests/helpers/kruize.py @@ -94,6 +94,19 @@ def update_results(result_json_file): print(response.text) return response +# Description: This function generates recommendation for the given experiment_name , start time and end time . +def update_recommendations(experiment_name, startTime, endTime): + print("\n************************************************************") + print("\nUpdating the recommendation \n for %s for dates Start-time: %s and End-time: %s..."%(experiment_name,startTime,endTime)) + url = URL + "/updateRecommendations?experiment_name=%s&interval_start_time=%s&interval_end_time=%s"%(experiment_name,startTime,endTime) + print("URL = ", url) + response = requests.post(url,) + print("Response status code = ", response.status_code) + print(response.text) + print("\n************************************************************") + return response + + # Description: This function obtains the recommendations from Kruize Autotune using listRecommendations API # Input Parameters: experiment name, flag indicating latest result and monitoring end time def list_recommendations(experiment_name = None, latest = None, monitoring_end_time = None): diff --git a/tests/scripts/remote_monitoring_tests/rest_apis/test_e2e_workflow.py b/tests/scripts/remote_monitoring_tests/rest_apis/test_e2e_workflow.py index 8adb403aa..f964bddd7 100644 --- a/tests/scripts/remote_monitoring_tests/rest_apis/test_e2e_workflow.py +++ b/tests/scripts/remote_monitoring_tests/rest_apis/test_e2e_workflow.py @@ -1,14 +1,13 @@ -import requests +import json + import pytest -from jinja2 import Environment, FileSystemLoader -from helpers.list_reco_json_validate import * -from helpers.list_reco_json_schema import * -from helpers.utils import * +from helpers.fixtures import * from helpers.generate_rm_jsons import * from helpers.kruize import * -from helpers.fixtures import * -import time -import json +from helpers.list_reco_json_schema import * +from helpers.list_reco_json_validate import * +from helpers.utils import * + @pytest.mark.test_e2e def test_list_recommendations_multiple_exps_from_diff_json_files(cluster_type): @@ -16,8 +15,8 @@ def test_list_recommendations_multiple_exps_from_diff_json_files(cluster_type): Test Description: This test validates list recommendations for multiple experiments posted using different json files """ - input_json_file="../json_files/create_exp.json" - result_json_file="../json_files/update_results.json" + input_json_file = "../json_files/create_exp.json" + result_json_file = "../json_files/update_results.json" find = [] json_data = json.load(open(input_json_file)) @@ -55,13 +54,13 @@ def test_list_recommendations_multiple_exps_from_diff_json_files(cluster_type): # Get the experiment name json_data = json.load(open(create_exp_json_file)) experiment_name = json_data[0]['experiment_name'] - + interval_start_time = get_datetime() for j in range(num_res): update_timestamps = True generate_json(find, result_json_file, update_results_json_file, i, update_timestamps) result_json = read_json_data_from_file(update_results_json_file) if j == 0: - start_time = get_datetime() + start_time = interval_start_time else: start_time = end_time @@ -81,32 +80,26 @@ def test_list_recommendations_multiple_exps_from_diff_json_files(cluster_type): # Expecting that we have recommendations if j > 96: - # Sleep for 1 sec to get recommendations - time.sleep(1) - + response = update_recommendations(experiment_name, interval_start_time, end_time) + data = response.json() + assert response.status_code == SUCCESS_STATUS_CODE response = list_recommendations(experiment_name) if response.status_code == SUCCESS_200_STATUS_CODE: recommendation_json = response.json() - recommendation_section = recommendation_json[0]["kubernetes_objects"][0]["containers"][0]["recommendations"] + recommendation_section = recommendation_json[0]["kubernetes_objects"][0]["containers"][0][ + "recommendations"] high_level_notifications = recommendation_section["notifications"] # Check if duration assert INFO_DURATION_BASED_RECOMMENDATIONS_AVAILABLE_CODE in high_level_notifications - data_section = recommendation_section["data"] - short_term_recommendation = data_section[str(end_time)]["duration_based"]["short_term"] - short_term_notifications = short_term_recommendation["notifications"] - for notification in short_term_notifications.values(): assert notification["type"] != "error" - # sleep for a while before fetching recommendations - time.sleep(20) - - # Get the experiment name - json_data = json.load(open(create_exp_json_file)) - experiment_name = json_data[0]['experiment_name'] + response = update_recommendations(experiment_name, interval_start_time, end_time) + data = response.json() + assert response.status_code == SUCCESS_STATUS_CODE # Invoke list recommendations for the specified experiment response = list_recommendations(experiment_name) @@ -120,11 +113,11 @@ def test_list_recommendations_multiple_exps_from_diff_json_files(cluster_type): # Validate the json values create_exp_json = read_json_data_from_file(create_exp_json_file) update_results_json = [] - update_results_json.append(result_json_arr[len(result_json_arr)-1]) + update_results_json.append(result_json_arr[len(result_json_arr) - 1]) expected_duration_in_hours = SHORT_TERM_DURATION_IN_HRS_MAX validate_reco_json(create_exp_json[0], update_results_json, list_reco_json[0], expected_duration_in_hours) - + # Invoke list recommendations for a non-existing experiment experiment_name = "Non-existing-exp" response = list_recommendations(experiment_name) @@ -141,4 +134,3 @@ def test_list_recommendations_multiple_exps_from_diff_json_files(cluster_type): response = delete_experiment(json_file) print("delete exp = ", response.status_code) assert response.status_code == SUCCESS_STATUS_CODE - From dbaa5acf9e22c4e81ba1ecc7f8202280d0e6455b Mon Sep 17 00:00:00 2001 From: msvinaykumar Date: Fri, 30 Jun 2023 17:53:22 +0530 Subject: [PATCH 05/12] included sanity and negeative testcases Signed-off-by: msvinaykumar --- .../remote_monitoring_tests/helpers/kruize.py | 78 +++-- .../remote_monitoring_tests/helpers/utils.py | 271 +++++++++-------- .../rest_apis/test_update_recommendations.py | 273 ++++++++++++++++++ 3 files changed, 472 insertions(+), 150 deletions(-) create mode 100644 tests/scripts/remote_monitoring_tests/rest_apis/test_update_recommendations.py diff --git a/tests/scripts/remote_monitoring_tests/helpers/kruize.py b/tests/scripts/remote_monitoring_tests/helpers/kruize.py index 8f26ec61e..9f85d1003 100644 --- a/tests/scripts/remote_monitoring_tests/helpers/kruize.py +++ b/tests/scripts/remote_monitoring_tests/helpers/kruize.py @@ -14,43 +14,46 @@ limitations under the License. """ +import json import subprocess + import requests -import json -import os -import time -def form_kruize_url(cluster_type, SERVER_IP = None): + +def form_kruize_url(cluster_type, SERVER_IP=None): global URL if SERVER_IP != None: URL = "http://" + str(SERVER_IP) - print ("\nKRUIZE AUTOTUNE URL = ", URL) + print("\nKRUIZE AUTOTUNE URL = ", URL) return if (cluster_type == "minikube"): - port = subprocess.run(['kubectl -n monitoring get svc kruize --no-headers -o=custom-columns=PORT:.spec.ports[*].nodePort'], shell=True, stdout=subprocess.PIPE) + port = subprocess.run( + ['kubectl -n monitoring get svc kruize --no-headers -o=custom-columns=PORT:.spec.ports[*].nodePort'], + shell=True, stdout=subprocess.PIPE) - AUTOTUNE_PORT=port.stdout.decode('utf-8').strip('\n') + AUTOTUNE_PORT = port.stdout.decode('utf-8').strip('\n') ip = subprocess.run(['minikube ip'], shell=True, stdout=subprocess.PIPE) - SERVER_IP=ip.stdout.decode('utf-8').strip('\n') + SERVER_IP = ip.stdout.decode('utf-8').strip('\n') URL = "http://" + str(SERVER_IP) + ":" + str(AUTOTUNE_PORT) elif (cluster_type == "openshift"): subprocess.run(['oc expose svc/kruize -n openshift-tuning'], shell=True, stdout=subprocess.PIPE) - ip = subprocess.run(['oc status -n openshift-tuning | grep "kruize" | grep port | cut -d " " -f1 | cut -d "/" -f3'], shell=True, stdout=subprocess.PIPE) - SERVER_IP=ip.stdout.decode('utf-8').strip('\n') + ip = subprocess.run( + ['oc status -n openshift-tuning | grep "kruize" | grep port | cut -d " " -f1 | cut -d "/" -f3'], shell=True, + stdout=subprocess.PIPE) + SERVER_IP = ip.stdout.decode('utf-8').strip('\n') print("IP = ", SERVER_IP) URL = "http://" + str(SERVER_IP) - print ("\nKRUIZE AUTOTUNE URL = ", URL) + print("\nKRUIZE AUTOTUNE URL = ", URL) # Description: This function validates the input json and posts the experiment using createExperiment API to Kruize Autotune # Input Parameters: experiment input json -def create_experiment(input_json_file, invalid_header = False): - +def create_experiment(input_json_file, invalid_header=False): json_file = open(input_json_file, "r") input_json = json.loads(json_file.read()) print("\n************************************************************") @@ -59,25 +62,25 @@ def create_experiment(input_json_file, invalid_header = False): # read the json print("\nCreating the experiment...") - + url = URL + "/createExperiment" print("URL = ", url) - + headers = {'content-type': 'application/xml'} if invalid_header: print("Invalid header") response = requests.post(url, json=input_json, headers=headers) else: response = requests.post(url, json=input_json) - + print(response) print("Response status code = ", response.status_code) return response + # Description: This function validates the result json and posts the experiment results using updateResults API to Kruize Autotune # Input Parameters: experiment input json def update_results(result_json_file): - # read the json json_file = open(result_json_file, "r") result_json = json.loads(json_file.read()) @@ -94,13 +97,23 @@ def update_results(result_json_file): print(response.text) return response + # Description: This function generates recommendation for the given experiment_name , start time and end time . def update_recommendations(experiment_name, startTime, endTime): print("\n************************************************************") - print("\nUpdating the recommendation \n for %s for dates Start-time: %s and End-time: %s..."%(experiment_name,startTime,endTime)) - url = URL + "/updateRecommendations?experiment_name=%s&interval_start_time=%s&interval_end_time=%s"%(experiment_name,startTime,endTime) + print("\nUpdating the recommendation \n for %s for dates Start-time: %s and End-time: %s..." % ( + experiment_name, startTime, endTime)) + queryString = "?" + if experiment_name: + queryString = queryString + "&experiment_name=%s" % (experiment_name) + if endTime: + queryString = queryString + "&interval_end_time=%s" % (endTime) + if startTime: + queryString = queryString + "&interval_start_time=%s" % (startTime) + + url = URL + "/updateRecommendations?%s" % (queryString) print("URL = ", url) - response = requests.post(url,) + response = requests.post(url, ) print("Response status code = ", response.status_code) print(response.text) print("\n************************************************************") @@ -109,7 +122,7 @@ def update_recommendations(experiment_name, startTime, endTime): # Description: This function obtains the recommendations from Kruize Autotune using listRecommendations API # Input Parameters: experiment name, flag indicating latest result and monitoring end time -def list_recommendations(experiment_name = None, latest = None, monitoring_end_time = None): +def list_recommendations(experiment_name=None, latest=None, monitoring_end_time=None): PARAMS = "" print("\nListing the recommendations...") url = URL + "/listRecommendations" @@ -119,19 +132,19 @@ def list_recommendations(experiment_name = None, latest = None, monitoring_end_t if latest == None and monitoring_end_time == None: response = requests.get(url) elif latest != None: - PARAMS = {'latest' : latest} + PARAMS = {'latest': latest} elif monitoring_end_time != None: - PARAMS = {'monitoring_end_time' : monitoring_end_time} + PARAMS = {'monitoring_end_time': monitoring_end_time} else: if latest == None and monitoring_end_time == None: PARAMS = {'experiment_name': experiment_name} elif latest != None: - PARAMS = {'experiment_name': experiment_name, 'latest' : latest} + PARAMS = {'experiment_name': experiment_name, 'latest': latest} elif monitoring_end_time != None: - PARAMS = {'experiment_name': experiment_name, 'monitoring_end_time' : monitoring_end_time} - + PARAMS = {'experiment_name': experiment_name, 'monitoring_end_time': monitoring_end_time} + print("PARAMS = ", PARAMS) - response = requests.get(url = url, params = PARAMS) + response = requests.get(url=url, params=PARAMS) print("Response status code = ", response.status_code) print("\n************************************************************") @@ -139,10 +152,10 @@ def list_recommendations(experiment_name = None, latest = None, monitoring_end_t print("\n************************************************************") return response + # Description: This function deletes the experiment and posts the experiment using createExperiment API to Kruize Autotune # Input Parameters: experiment input json -def delete_experiment(input_json_file, invalid_header = False): - +def delete_experiment(input_json_file, invalid_header=False): json_file = open(input_json_file, "r") input_json = json.loads(json_file.read()) @@ -162,15 +175,15 @@ def delete_experiment(input_json_file, invalid_header = False): response = requests.delete(url, json=delete_json, headers=headers) else: response = requests.delete(url, json=delete_json) - + print(response) print("Response status code = ", response.status_code) return response + # Description: This function creates a performance profile using the Kruize createPerformanceProfile API # Input Parameters: performance profile json def create_performance_profile(perf_profile_json_file): - json_file = open(perf_profile_json_file, "r") perf_profile_json = json.loads(json_file.read()) @@ -183,6 +196,7 @@ def create_performance_profile(perf_profile_json_file): print(response.text) return response + # Description: This function obtains the experiments from Kruize Autotune using listExperiments API # Input Parameters: None def list_experiments(): @@ -191,7 +205,7 @@ def list_experiments(): url = URL + "/listExperiments" print("URL = ", url) - response = requests.get(url = url, params = PARAMS) + response = requests.get(url=url, params=PARAMS) print("Response status code = ", response.status_code) return response diff --git a/tests/scripts/remote_monitoring_tests/helpers/utils.py b/tests/scripts/remote_monitoring_tests/helpers/utils.py index 8d03130fa..fe3b0dddc 100644 --- a/tests/scripts/remote_monitoring_tests/helpers/utils.py +++ b/tests/scripts/remote_monitoring_tests/helpers/utils.py @@ -1,4 +1,3 @@ - """ Copyright (c) 2022, 2022 Red Hat, IBM Corporation and others. @@ -18,8 +17,8 @@ import json import os import re -import time import subprocess +import time from datetime import datetime, timedelta SUCCESS_STATUS_CODE = 201 @@ -31,19 +30,23 @@ ERROR_STATUS = "ERROR" UPDATE_RESULTS_SUCCESS_MSG = "Results added successfully! View saved results at /listExperiments." CREATE_EXP_SUCCESS_MSG = "Experiment registered successfully with Kruize. View registered experiments at /listExperiments" +UPDATE_RECOMMENDATIONS_MANDATORY_DEFAULT_MESSAGE = 'experiment_name is mandatory' +UPDATE_RECOMMENDATIONS_MANDATORY_INTERVAL_END_DATE = 'interval_end_time is mandatory' +UPDATE_RECOMMENDATIONS_DATA_NOT_FOUND = 'Data not found!' +UPDATE_RECOMMENDATIONS_START_TIME_PRECEDE_END_TIME = 'The Start time should precede the End time!' # Kruize Recommendations Notification codes -NOTIFICATION_CODE_FOR_DURATION_BASED_RECOMMENDATIONS_AVAILABLE = "112101" -NOTIFICATION_CODE_FOR_NOT_ENOUGH_DATA = "120001" -NOTIFICATION_CODE_FOR_CPU_RECORDS_ARE_IDLE = "323001" -NOTIFICATION_CODE_FOR_CPU_RECORDS_ARE_ZERO = "323002" -NOTIFICATION_CODE_FOR_CPU_RECORDS_NOT_AVAILABLE = "323003" -NOTIFICATION_CODE_FOR_MEMORY_RECORDS_ARE_ZERO = "324001" -NOTIFICATION_CODE_FOR_MEMORY_RECORDS_NOT_AVAILABLE = "324002" -NOTIFICATION_CODE_FOR_CPU_REQUEST_NOT_SET = "523001" -NOTIFICATION_CODE_FOR_CPU_LIMIT_NOT_SET = "423001" -NOTIFICATION_CODE_FOR_MEMORY_REQUEST_NOT_SET = "524001" -NOTIFICATION_CODE_FOR_MEMORY_LIMIT_NOT_SET = "524002" +NOTIFICATION_CODE_FOR_DURATION_BASED_RECOMMENDATIONS_AVAILABLE = "112101" +NOTIFICATION_CODE_FOR_NOT_ENOUGH_DATA = "120001" +NOTIFICATION_CODE_FOR_CPU_RECORDS_ARE_IDLE = "323001" +NOTIFICATION_CODE_FOR_CPU_RECORDS_ARE_ZERO = "323002" +NOTIFICATION_CODE_FOR_CPU_RECORDS_NOT_AVAILABLE = "323003" +NOTIFICATION_CODE_FOR_MEMORY_RECORDS_ARE_ZERO = "324001" +NOTIFICATION_CODE_FOR_MEMORY_RECORDS_NOT_AVAILABLE = "324002" +NOTIFICATION_CODE_FOR_CPU_REQUEST_NOT_SET = "523001" +NOTIFICATION_CODE_FOR_CPU_LIMIT_NOT_SET = "423001" +NOTIFICATION_CODE_FOR_MEMORY_REQUEST_NOT_SET = "524001" +NOTIFICATION_CODE_FOR_MEMORY_LIMIT_NOT_SET = "524002" AMOUNT_MISSING_IN_CPU_SECTION_CODE = "223001" INVALID_AMOUNT_IN_CPU_SECTION_CODE = "223002" @@ -72,7 +75,6 @@ MEMORY_USAGE = "memoryUsage" MEMORY_RSS = "memoryRSS" - NOT_ENOUGH_DATA_MSG = "There is not enough data available to generate a recommendation." EXP_EXISTS_MSG = "Experiment name already exists: " INVALID_DEPLOYMENT_TYPE_MSG = "Invalid deployment type: xyz" @@ -87,75 +89,76 @@ # version,experiment_name,cluster_name,performance_profile,mode,target_cluster,type,name,namespace,container_image_name,container_name,measurement_duration,threshold create_exp_test_data = { - "version": "1.0", - "experiment_name": "quarkus-resteasy-kruize-min-http-response-time-db", - "cluster_name": "cluster-one-division-bell", - "performance_profile": "resource-optimization-openshift", - "mode": "monitor", - "target_cluster": "remote", - "type": "deployment", - "name": "tfb-qrh-sample", - "namespace": "default", - "container_image_name": "kruize/tfb-qrh:1.13.2.F_et17", - "container_name": "tfb-server", - "measurement_duration": "15min", - "threshold": "0.1" + "version": "1.0", + "experiment_name": "quarkus-resteasy-kruize-min-http-response-time-db", + "cluster_name": "cluster-one-division-bell", + "performance_profile": "resource-optimization-openshift", + "mode": "monitor", + "target_cluster": "remote", + "type": "deployment", + "name": "tfb-qrh-sample", + "namespace": "default", + "container_image_name": "kruize/tfb-qrh:1.13.2.F_et17", + "container_name": "tfb-server", + "measurement_duration": "15min", + "threshold": "0.1" } # version, experiment_name,interval_start_time,interval_end_time,type,name,namespace,container_image_name,container_name,cpuRequest_name,cpuRequest_sum,cpuRequest_avg,cpuRequest_format,cpuLimit_name,cpuLimit_sum,cpuLimit_avg,cpuLimit_format,cpuUsage_name,cpuUsage_sum,cpuUsage_max,cpuUsage_avg,cpuUsage_min,cpuUsage_format,cpuThrottle_name,cpuThrottle_sum,cpuThrottle_max,cpuThrottle_avg,cpuThrottle_format,memoryRequest_name,memoryRequest_sum,memoryRequest_avg,memoryRequest_format,memoryLimit_name,memoryLimit_sum,memoryLimit_avg,memoryLimit_format,memoryUsage_name,memoryUsage_sum,memoryUsage_max,memoryUsage_avg,memUsage_min,memoryUsage_format,memoryRSS_name,memoryRSS_sum,memoryRSS_max,memoryRSS_avg,memoryRSS_min,memoryRSS_format update_results_test_data = { - "version": "1.0", - "experiment_name": "quarkus-resteasy-kruize-min-http-response-time-db", - "interval_start_time": "2022-01-23T18:25:43.511Z", - "interval_end_time": "2022-01-23T18:40:43.511Z", - "type": "deployment", - "name": "tfb-qrh-deployment", - "namespace": "default", - "container_image_name": "kruize/tfb-qrh:1.13.2.F_et17", - "container_name": "tfb-server", - "cpuRequest_name": "cpuRequest", - "cpuRequest_sum": 4.4, - "cpuRequest_avg": 1.1, - "cpuRequest_format": "cores", - "cpuLimit_name": "cpuLimit", - "cpuLimit_sum": 5.4, - "cpuLimit_avg": 22.1, - "cpuLimit_format": "cores", - "cpuUsage_name": "cpuUsage", - "cpuUsage_sum": 3.4, - "cpuUsage_max": 2.4, - "cpuUsage_avg": 1.5, - "cpuUsage_min": 0.5, - "cpuUsage_format": "cores", - "cpuThrottle_name": "cpuThrottle", - "cpuThrottle_sum": 1.09, - "cpuThrottle_max": 0.09, - "cpuThrottle_avg": 0.045, - "cpuThrottle_format": "cores", - "memoryRequest_name": "memoryRequest", - "memoryRequest_sum": 250.85, - "memoryRequest_avg": 51.1, - "memoryRequest_format": "MiB", - "memoryLimit_name": "memoryLimit", - "memoryLimit_sum": 500, - "memoryLimit_avg": 100, - "memoryLimit_format": "MiB", - "memoryUsage_name": "memoryUsage", - "memoryUsage_sum": 298.5, - "memoryUsage_max": 198.4, - "memoryUsage_avg": 41.5, - "memoryUsage_min": 21.5, - "memoryUsage_format": "MiB", - "memoryRSS_name": "memoryRSS", - "memoryRSS_sum": 225.64, - "memoryRSS_max": 125.54, - "memoryRSS_avg": 46.5, - "memoryRSS_min": 26.5, - "memoryRSS_format": "MiB" - } + "version": "1.0", + "experiment_name": "quarkus-resteasy-kruize-min-http-response-time-db", + "interval_start_time": "2022-01-23T18:25:43.511Z", + "interval_end_time": "2022-01-23T18:40:43.511Z", + "type": "deployment", + "name": "tfb-qrh-deployment", + "namespace": "default", + "container_image_name": "kruize/tfb-qrh:1.13.2.F_et17", + "container_name": "tfb-server", + "cpuRequest_name": "cpuRequest", + "cpuRequest_sum": 4.4, + "cpuRequest_avg": 1.1, + "cpuRequest_format": "cores", + "cpuLimit_name": "cpuLimit", + "cpuLimit_sum": 5.4, + "cpuLimit_avg": 22.1, + "cpuLimit_format": "cores", + "cpuUsage_name": "cpuUsage", + "cpuUsage_sum": 3.4, + "cpuUsage_max": 2.4, + "cpuUsage_avg": 1.5, + "cpuUsage_min": 0.5, + "cpuUsage_format": "cores", + "cpuThrottle_name": "cpuThrottle", + "cpuThrottle_sum": 1.09, + "cpuThrottle_max": 0.09, + "cpuThrottle_avg": 0.045, + "cpuThrottle_format": "cores", + "memoryRequest_name": "memoryRequest", + "memoryRequest_sum": 250.85, + "memoryRequest_avg": 51.1, + "memoryRequest_format": "MiB", + "memoryLimit_name": "memoryLimit", + "memoryLimit_sum": 500, + "memoryLimit_avg": 100, + "memoryLimit_format": "MiB", + "memoryUsage_name": "memoryUsage", + "memoryUsage_sum": 298.5, + "memoryUsage_max": 198.4, + "memoryUsage_avg": 41.5, + "memoryUsage_min": 21.5, + "memoryUsage_format": "MiB", + "memoryRSS_name": "memoryRSS", + "memoryRSS_sum": 225.64, + "memoryRSS_max": 125.54, + "memoryRSS_avg": 46.5, + "memoryRSS_min": 26.5, + "memoryRSS_format": "MiB" +} test_type = {"blank": "", "null": "null", "invalid": "xyz"} + def generate_test_data(csvfile, test_data): if os.path.isfile(csvfile): os.remove(csvfile) @@ -174,15 +177,16 @@ def generate_test_data(csvfile, test_data): data.append(test_name) data.append(status_code) for k in test_data: - if k != key : - data.append(test_data[k]) + if k != key: + data.append(test_data[k]) else: - if any(re.findall(r'invalid.*sum|invalid.*max|invalid.*min|invalid.*avg', test_name, re.IGNORECASE)): - data.append(-1) + if any(re.findall(r'invalid.*sum|invalid.*max|invalid.*min|invalid.*avg', test_name, + re.IGNORECASE)): + data.append(-1) elif any(re.findall(r'blank.*sum|blank.*max|blank.*min|blank.*avg', test_name, re.IGNORECASE)): - data.append("\"\"") + data.append("\"\"") else: - data.append(test_type[t]) + data.append(test_type[t]) writer.writerow(data) @@ -190,12 +194,14 @@ def generate_test_data(csvfile, test_data): test_data = read_test_data_from_csv(csvfile) return test_data + def get_num_lines_in_csv(csv_filename): reader = csv.reader(open(csv_filename)) - num_lines= len(list(reader)) + num_lines = len(list(reader)) print(num_lines) return num_lines + def write_json_data_to_file(filename, data): """ Helper to read Json file @@ -207,6 +213,7 @@ def write_json_data_to_file(filename, data): except: return None + def read_json_data_from_file(filename): """ Helper to read Json file @@ -218,18 +225,20 @@ def read_json_data_from_file(filename): except: return None + def read_test_data_from_csv(csv_file): test_data = [] with open(csv_file, newline='') as csvfile: data = csv.reader(csvfile, delimiter=',') - #next(data) # skip header row + # next(data) # skip header row for row in data: test_data.append(row) return test_data -def generate_json(find_arr, json_file, filename, i, update_timestamps = False): + +def generate_json(find_arr, json_file, filename, i, update_timestamps=False): with open(json_file, 'r') as file: data = file.read() @@ -243,44 +252,49 @@ def generate_json(find_arr, json_file, filename, i, update_timestamps = False): data = data.replace(find, replace) find = "2022-01-23T18:40:43.570Z" - replace = increment_timestamp(find, i) + replace = increment_timestamp(find, i) data = data.replace(find, replace) with open(filename, 'w') as file: file.write(data) + def increment_timestamp(input_timestamp, step): input_date = datetime.strptime(input_timestamp, "%Y-%m-%dT%H:%M:%S.%fZ") minutes = 50 * step + 3600 output_date = input_date + timedelta(minutes=minutes) - timestamp = output_date.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3]+'Z' + timestamp = output_date.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3] + 'Z' return timestamp + def increment_timestamp_by_given_mins(input_timestamp, minutes): input_date = datetime.strptime(input_timestamp, "%Y-%m-%dT%H:%M:%S.%fZ") output_date = input_date + timedelta(minutes=minutes) - timestamp = output_date.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3]+'Z' + timestamp = output_date.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3] + 'Z' return timestamp + def get_datetime(): my_datetime = datetime.today() - time_str = my_datetime.isoformat(timespec = 'milliseconds') + time_str = my_datetime.isoformat(timespec='milliseconds') time_str = time_str + "Z" return time_str + def term_based_start_time(input_date_str, term): duration = {"short_term": 1, "medium_term": 7, "long_term": 15} input_date = datetime.strptime(input_date_str, "%Y-%m-%dT%H:%M:%S.%fZ") output_date = input_date - timedelta(days=duration[term]) - output_date_str = output_date.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3]+'Z' + output_date_str = output_date.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3] + 'Z' return output_date_str -def validate_reco_json(create_exp_json, update_results_json, list_reco_json, expected_duration_in_hours = None, test_name = None): +def validate_reco_json(create_exp_json, update_results_json, list_reco_json, expected_duration_in_hours=None, + test_name=None): # Validate experiment assert create_exp_json["version"] == list_reco_json["version"] assert create_exp_json["experiment_name"] == list_reco_json["experiment_name"] @@ -296,14 +310,15 @@ def validate_reco_json(create_exp_json, update_results_json, list_reco_json, exp validate_kubernetes_obj(create_exp_kubernetes_obj, update_results_kubernetes_obj, update_results_json, \ list_reco_kubernetes_obj, expected_duration_in_hours, test_name) else: - update_results_kubernetes_obj = None - create_exp_kubernetes_obj = create_exp_json["kubernetes_objects"][0] - list_reco_kubernetes_obj = list_reco_json["kubernetes_objects"][0] - validate_kubernetes_obj(create_exp_kubernetes_obj, update_results_kubernetes_obj, update_results_json, \ - list_reco_kubernetes_obj, expected_duration_in_hours, test_name) + update_results_kubernetes_obj = None + create_exp_kubernetes_obj = create_exp_json["kubernetes_objects"][0] + list_reco_kubernetes_obj = list_reco_json["kubernetes_objects"][0] + validate_kubernetes_obj(create_exp_kubernetes_obj, update_results_kubernetes_obj, update_results_json, \ + list_reco_kubernetes_obj, expected_duration_in_hours, test_name) -def validate_kubernetes_obj(create_exp_kubernetes_obj, update_results_kubernetes_obj, update_results_json, list_reco_kubernetes_obj, expected_duration_in_hours, test_name): +def validate_kubernetes_obj(create_exp_kubernetes_obj, update_results_kubernetes_obj, update_results_json, + list_reco_kubernetes_obj, expected_duration_in_hours, test_name): # Validate type, name, namespace if update_results_kubernetes_obj == None: assert list_reco_kubernetes_obj["type"] == create_exp_kubernetes_obj["type"] @@ -331,10 +346,13 @@ def validate_kubernetes_obj(create_exp_kubernetes_obj, update_results_kubernetes list_reco_container = None for j in range(list_reco_containers_length): - if list_reco_kubernetes_obj["containers"][j]["container_name"] == create_exp_kubernetes_obj["containers"][i]["container_name"]: + if list_reco_kubernetes_obj["containers"][j]["container_name"] == \ + create_exp_kubernetes_obj["containers"][i]["container_name"]: update_results_container = create_exp_kubernetes_obj["containers"][i] list_reco_container = list_reco_kubernetes_obj["containers"][j] - validate_container(update_results_container, update_results_json, list_reco_container, expected_duration_in_hours) + validate_container(update_results_container, update_results_json, list_reco_container, + expected_duration_in_hours) + def validate_container(update_results_container, update_results_json, list_reco_container, expected_duration_in_hours): # Validate container image name and container name @@ -342,7 +360,7 @@ def validate_container(update_results_container, update_results_json, list_reco_ assert list_reco_container["container_image_name"] == update_results_container["container_image_name"], \ f"Container image names did not match! Actual - {list_reco_container['container_image_name']} Expected - {update_results_container['container_image_name']}" - assert list_reco_container["container_name"] == update_results_container["container_name"],\ + assert list_reco_container["container_name"] == update_results_container["container_name"], \ f"Container names did not match! Acutal = {list_reco_container['container_name']} Expected - {update_results_container['container_name']}" # Validate timestamps @@ -363,11 +381,11 @@ def validate_container(update_results_container, update_results_json, list_reco_ for term in duration_terms: if check_if_recommendations_are_present(duration_based_obj[term]): # Validate timestamps - assert duration_based_obj[term]["monitoring_end_time"] == interval_end_time,\ + assert duration_based_obj[term]["monitoring_end_time"] == interval_end_time, \ f"monitoring end time {duration_based_obj[term]['monitoring_end_time']} did not match end timestamp {interval_end_time}" monitoring_start_time = term_based_start_time(interval_end_time, term) - assert duration_based_obj[term]["monitoring_start_time"] == monitoring_start_time,\ + assert duration_based_obj[term]["monitoring_start_time"] == monitoring_start_time, \ f"actual = {duration_based_obj[term]['monitoring_start_time']} expected = {monitoring_start_time}" # Validate duration in hrs @@ -384,10 +402,11 @@ def validate_container(update_results_container, update_results_json, list_reco_ elif term == "long_term" and duration_in_hours > LONG_TERM_DURATION_IN_HRS_MAX: duration_in_hours = LONG_TERM_DURATION_IN_HRS_MAX - print(f"Actual = {duration_based_obj[term]['duration_in_hours']} expected = {duration_in_hours}") - assert duration_based_obj[term]["duration_in_hours"] == duration_in_hours,\ + print( + f"Actual = {duration_based_obj[term]['duration_in_hours']} expected = {duration_in_hours}") + assert duration_based_obj[term]["duration_in_hours"] == duration_in_hours, \ f"Duration in hours did not match! Actual = {duration_based_obj[term]['duration_in_hours']} expected = {duration_in_hours}" - + # Validate recommendation config validate_config(duration_based_obj[term]["config"]) else: @@ -399,13 +418,19 @@ def validate_container(update_results_container, update_results_json, list_reco_ result = check_if_recommendations_are_present(list_reco_container["recommendations"]) assert result == False, f"Recommendations notifications does not contain the expected message - {NOT_ENOUGH_DATA_MSG}" + def validate_config(reco_config): usage_list = ["requests", "limits"] for usage in usage_list: - assert reco_config[usage]["cpu"]["amount"] > 0, f"cpu amount in recommendation config is {reco_config[usage]['cpu']['amount']}" - assert reco_config[usage]["cpu"]["format"] == "cores", f"cpu format in recommendation config is {reco_config[usage]['cpu']['format']}" - assert reco_config[usage]["memory"]["amount"] > 0, f"cpu amount in recommendation config is {reco_config[usage]['memory']['amount']}" - assert reco_config[usage]["memory"]["format"] == "MiB", f"memory format in recommendation config is {reco_config[usage]['memory']['format']}" + assert reco_config[usage]["cpu"][ + "amount"] > 0, f"cpu amount in recommendation config is {reco_config[usage]['cpu']['amount']}" + assert reco_config[usage]["cpu"][ + "format"] == "cores", f"cpu format in recommendation config is {reco_config[usage]['cpu']['format']}" + assert reco_config[usage]["memory"][ + "amount"] > 0, f"cpu amount in recommendation config is {reco_config[usage]['memory']['amount']}" + assert reco_config[usage]["memory"][ + "format"] == "MiB", f"memory format in recommendation config is {reco_config[usage]['memory']['format']}" + def check_if_recommendations_are_present(duration_based_obj): notifications = duration_based_obj["notifications"] @@ -413,14 +438,15 @@ def check_if_recommendations_are_present(duration_based_obj): return False return True + def time_diff_in_hours(interval_start_time, interval_end_time): start_date = datetime.strptime(interval_start_time, "%Y-%m-%dT%H:%M:%S.%fZ") end_date = datetime.strptime(interval_end_time, "%Y-%m-%dT%H:%M:%S.%fZ") diff = end_date - start_date return round(diff.total_seconds() / 3600, 2) -def strip_double_quotes_for_field(json_file, field, filename): +def strip_double_quotes_for_field(json_file, field, filename): find = "\"{{" + field + "}}\"" replace = "{{" + field + "}}" with open(json_file, 'r') as file: @@ -431,6 +457,7 @@ def strip_double_quotes_for_field(json_file, field, filename): with open(filename, 'w') as file: file.write(data) + def compare_json_files(json_file1, json_file2): with open(json_file1, "r") as f1: try: @@ -438,14 +465,14 @@ def compare_json_files(json_file1, json_file2): except json.JSONDecodeError: print("Received JSONDecodeError") json_data1 = {} - + with open(json_file2, "r") as f2: try: json_data2 = json.load(f2) except json.JSONDecodeError: print("Received JSONDecodeError") json_data2 = {} - + if json_data1 and json_data2: if json_data1 == json_data2: print("The two JSON files are identical!") @@ -457,18 +484,20 @@ def compare_json_files(json_file1, json_file2): print(f"JSON files are empty! Check the files {json_file1} and {json_file2}") return False + def get_kruize_pod(namespace): command = f"kubectl get pod -n {namespace} | grep kruize | grep -v kruize-ui | cut -d ' ' -f1" # Execute the command and capture the output output = subprocess.check_output(command, shell=True) - + pod_name = output.decode('utf-8') print(f"pod name = {pod_name}") return pod_name.rstrip() + def delete_kruize_pod(namespace): - pod_name = get_kruize_pod(namespace) - + pod_name = get_kruize_pod(namespace) + command = f"kubectl delete pod {pod_name} -n {namespace}" print(command) @@ -502,6 +531,7 @@ def check_pod_running(namespace, pod_name): print(f"Kruize Pod {pod_name} is now running") return True + def get_index_of_metric(metrics: list, metric_name: str): for i, metric in enumerate(metrics): if metric["name"] == metric_name: @@ -509,6 +539,7 @@ def get_index_of_metric(metrics: list, metric_name: str): return None + def check_if_dict_has_same_keys(base_dict, test_dict): # Return false if the key set is not equal if set(base_dict.keys()) != set(test_dict.keys()): @@ -521,6 +552,7 @@ def check_if_dict_has_same_keys(base_dict, test_dict): check_if_dict_has_same_keys(base_dict[key], test_dict[key]) return True + def validate_variation(current_config: dict, recommended_config: dict, variation_config: dict): # Check structure assert check_if_dict_has_same_keys(recommended_config, variation_config) == True @@ -568,11 +600,13 @@ def validate_variation(current_config: dict, recommended_config: dict, variation if CPU_KEY in recommended_requests: if CPU_KEY in current_requests and AMOUNT_KEY in current_requests[CPU_KEY]: current_cpu_value = current_requests[CPU_KEY][AMOUNT_KEY] - assert variation_requests[CPU_KEY][AMOUNT_KEY] == recommended_requests[CPU_KEY][AMOUNT_KEY] - current_cpu_value + assert variation_requests[CPU_KEY][AMOUNT_KEY] == recommended_requests[CPU_KEY][ + AMOUNT_KEY] - current_cpu_value if MEMORY_KEY in recommended_requests: if MEMORY_KEY in current_requests and AMOUNT_KEY in current_requests[MEMORY_KEY]: current_memory_value = current_requests[MEMORY_KEY][AMOUNT_KEY] - assert variation_requests[MEMORY_KEY][AMOUNT_KEY] == recommended_requests[MEMORY_KEY][AMOUNT_KEY] - current_memory_value + assert variation_requests[MEMORY_KEY][AMOUNT_KEY] == recommended_requests[MEMORY_KEY][ + AMOUNT_KEY] - current_memory_value if recommended_limits is not None: current_cpu_value = 0 current_memory_value = 0 @@ -583,4 +617,5 @@ def validate_variation(current_config: dict, recommended_config: dict, variation if MEMORY_KEY in recommended_limits: if MEMORY_KEY in current_limits and AMOUNT_KEY in current_limits[MEMORY_KEY]: current_memory_value = current_limits[MEMORY_KEY][AMOUNT_KEY] - assert variation_limits[MEMORY_KEY][AMOUNT_KEY] == recommended_limits[MEMORY_KEY][AMOUNT_KEY] - current_memory_value + assert variation_limits[MEMORY_KEY][AMOUNT_KEY] == recommended_limits[MEMORY_KEY][ + AMOUNT_KEY] - current_memory_value diff --git a/tests/scripts/remote_monitoring_tests/rest_apis/test_update_recommendations.py b/tests/scripts/remote_monitoring_tests/rest_apis/test_update_recommendations.py new file mode 100644 index 000000000..bf4360843 --- /dev/null +++ b/tests/scripts/remote_monitoring_tests/rest_apis/test_update_recommendations.py @@ -0,0 +1,273 @@ +import pytest +from helpers.fixtures import * +from helpers.kruize import * +from helpers.list_reco_json_validate import * +from helpers.utils import * + + +@pytest.mark.sanity +def test_update_valid_recommendations_after_results_after_create_exp(cluster_type): + input_json_file = "../json_files/create_exp.json" + result_json_file = "../json_files/update_results.json" + + find = [] + json_data = json.load(open(input_json_file)) + + find.append(json_data[0]['experiment_name']) + find.append(json_data[0]['kubernetes_objects'][0]['name']) + find.append(json_data[0]['kubernetes_objects'][0]['namespace']) + + form_kruize_url(cluster_type) + + # Create experiment using the specified json + num_exps = 1 + num_res = 100 + for i in range(num_exps): + create_exp_json_file = "/tmp/create_exp_" + str(i) + ".json" + generate_json(find, input_json_file, create_exp_json_file, i) + + # Delete the experiment + response = delete_experiment(create_exp_json_file) + print("delete exp = ", response.status_code) + + # Create the experiment + response = create_experiment(create_exp_json_file) + + data = response.json() + print("message = ", data['message']) + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + assert data['message'] == CREATE_EXP_SUCCESS_MSG + + # Update results for the experiment + update_results_json_file = "/tmp/update_results_" + str(i) + ".json" + + result_json_arr = [] + # Get the experiment name + json_data = json.load(open(create_exp_json_file)) + experiment_name = json_data[0]['experiment_name'] + interval_start_time = get_datetime() + for j in range(num_res): + update_timestamps = True + generate_json(find, result_json_file, update_results_json_file, i, update_timestamps) + result_json = read_json_data_from_file(update_results_json_file) + if j == 0: + start_time = interval_start_time + else: + start_time = end_time + + result_json[0]['interval_start_time'] = start_time + end_time = increment_timestamp_by_given_mins(start_time, 15) + result_json[0]['interval_end_time'] = end_time + + write_json_data_to_file(update_results_json_file, result_json) + result_json_arr.append(result_json[0]) + response = update_results(update_results_json_file) + + data = response.json() + print("message = ", data['message']) + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + assert data['message'] == UPDATE_RESULTS_SUCCESS_MSG + + # Expecting that we have recommendations + if j > 96: + response = update_recommendations(experiment_name, interval_start_time, end_time) + data = response.json() + assert response.status_code == SUCCESS_STATUS_CODE + response = list_recommendations(experiment_name) + if response.status_code == SUCCESS_200_STATUS_CODE: + recommendation_json = response.json() + recommendation_section = recommendation_json[0]["kubernetes_objects"][0]["containers"][0][ + "recommendations"] + high_level_notifications = recommendation_section["notifications"] + # Check if duration + assert INFO_DURATION_BASED_RECOMMENDATIONS_AVAILABLE_CODE in high_level_notifications + data_section = recommendation_section["data"] + short_term_recommendation = data_section[str(end_time)]["duration_based"]["short_term"] + short_term_notifications = short_term_recommendation["notifications"] + for notification in short_term_notifications.values(): + assert notification["type"] != "error" + + response = update_recommendations(experiment_name, interval_start_time, end_time) + data = response.json() + assert response.status_code == SUCCESS_STATUS_CODE + + # Invoke list recommendations for the specified experiment + response = list_recommendations(experiment_name) + assert response.status_code == SUCCESS_200_STATUS_CODE + list_reco_json = response.json() + + # Validate the json against the json schema + errorMsg = validate_list_reco_json(list_reco_json, list_reco_json_schema) + assert errorMsg == "" + + # Validate the json values + create_exp_json = read_json_data_from_file(create_exp_json_file) + update_results_json = [] + update_results_json.append(result_json_arr[len(result_json_arr) - 1]) + + expected_duration_in_hours = SHORT_TERM_DURATION_IN_HRS_MAX + validate_reco_json(create_exp_json[0], update_results_json, list_reco_json[0], expected_duration_in_hours) + + # Delete all the experiments + for i in range(num_exps): + json_file = "/tmp/create_exp_" + str(i) + ".json" + response = delete_experiment(json_file) + print("delete exp = ", response.status_code) + assert response.status_code == SUCCESS_STATUS_CODE + + +@pytest.mark.sanity +def test_update_valid_recommendations_just_endtime_input_after_results_after_create_exp(cluster_type): + input_json_file = "../json_files/create_exp.json" + result_json_file = "../json_files/update_results.json" + + find = [] + json_data = json.load(open(input_json_file)) + + find.append(json_data[0]['experiment_name']) + find.append(json_data[0]['kubernetes_objects'][0]['name']) + find.append(json_data[0]['kubernetes_objects'][0]['namespace']) + + form_kruize_url(cluster_type) + + # Create experiment using the specified json + num_exps = 1 + num_res = 100 + for i in range(num_exps): + create_exp_json_file = "/tmp/create_exp_" + str(i) + ".json" + generate_json(find, input_json_file, create_exp_json_file, i) + + # Delete the experiment + response = delete_experiment(create_exp_json_file) + print("delete exp = ", response.status_code) + + # Create the experiment + response = create_experiment(create_exp_json_file) + + data = response.json() + print("message = ", data['message']) + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + assert data['message'] == CREATE_EXP_SUCCESS_MSG + + # Update results for the experiment + update_results_json_file = "/tmp/update_results_" + str(i) + ".json" + + result_json_arr = [] + # Get the experiment name + json_data = json.load(open(create_exp_json_file)) + experiment_name = json_data[0]['experiment_name'] + interval_start_time = get_datetime() + for j in range(num_res): + update_timestamps = True + generate_json(find, result_json_file, update_results_json_file, i, update_timestamps) + result_json = read_json_data_from_file(update_results_json_file) + if j == 0: + start_time = interval_start_time + else: + start_time = end_time + + result_json[0]['interval_start_time'] = start_time + end_time = increment_timestamp_by_given_mins(start_time, 15) + result_json[0]['interval_end_time'] = end_time + + write_json_data_to_file(update_results_json_file, result_json) + result_json_arr.append(result_json[0]) + response = update_results(update_results_json_file) + + data = response.json() + print("message = ", data['message']) + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + assert data['message'] == UPDATE_RESULTS_SUCCESS_MSG + + # Expecting that we have recommendations + if j > 96: + response = update_recommendations(experiment_name, None, end_time) + data = response.json() + assert response.status_code == SUCCESS_STATUS_CODE + response = list_recommendations(experiment_name) + if response.status_code == SUCCESS_200_STATUS_CODE: + recommendation_json = response.json() + recommendation_section = recommendation_json[0]["kubernetes_objects"][0]["containers"][0][ + "recommendations"] + high_level_notifications = recommendation_section["notifications"] + # Check if duration + assert INFO_DURATION_BASED_RECOMMENDATIONS_AVAILABLE_CODE in high_level_notifications + data_section = recommendation_section["data"] + short_term_recommendation = data_section[str(end_time)]["duration_based"]["short_term"] + short_term_notifications = short_term_recommendation["notifications"] + for notification in short_term_notifications.values(): + assert notification["type"] != "error" + + response = update_recommendations(experiment_name, None, end_time) + data = response.json() + assert response.status_code == SUCCESS_STATUS_CODE + + # Invoke list recommendations for the specified experiment + response = list_recommendations(experiment_name) + assert response.status_code == SUCCESS_200_STATUS_CODE + list_reco_json = response.json() + + # Validate the json against the json schema + errorMsg = validate_list_reco_json(list_reco_json, list_reco_json_schema) + assert errorMsg == "" + + # Validate the json values + create_exp_json = read_json_data_from_file(create_exp_json_file) + update_results_json = [] + update_results_json.append(result_json_arr[len(result_json_arr) - 1]) + + expected_duration_in_hours = SHORT_TERM_DURATION_IN_HRS_MAX + validate_reco_json(create_exp_json[0], update_results_json, list_reco_json[0], expected_duration_in_hours) + + # Delete all the experiments + for i in range(num_exps): + json_file = "/tmp/create_exp_" + str(i) + ".json" + response = delete_experiment(json_file) + print("delete exp = ", response.status_code) + assert response.status_code == SUCCESS_STATUS_CODE + + +@pytest.mark.negative +def test_update_recommendations_without_experiment_name_end_time(cluster_type): + form_kruize_url(cluster_type) + response = update_recommendations(None, None, None) + data = response.json() + assert response.status_code == ERROR_STATUS_CODE + assert data['message'] == UPDATE_RECOMMENDATIONS_MANDATORY_DEFAULT_MESSAGE + + +@pytest.mark.negative +def test_update_recommendations_without_end_time(cluster_type): + form_kruize_url(cluster_type) + experiment_name = "test123" + response = update_recommendations(experiment_name, None, None) + data = response.json() + assert response.status_code == ERROR_STATUS_CODE + assert data['message'] == UPDATE_RECOMMENDATIONS_MANDATORY_INTERVAL_END_DATE + + +@pytest.mark.negative +def test_update_recommendations_with_unknown_experiment_name_and_end_time(cluster_type): + form_kruize_url(cluster_type) + experiment_name = "test123" + end_time = "2023-01-02T00:15:00.000Z" + response = update_recommendations(experiment_name, None, end_time) + data = response.json() + assert response.status_code == ERROR_STATUS_CODE + assert data['message'] == UPDATE_RECOMMENDATIONS_DATA_NOT_FOUND + + +@pytest.mark.negative +def test_update_recommendations_with_end_time_precede_start_time(cluster_type): + form_kruize_url(cluster_type) + experiment_name = "test123" + start_time = "2023-01-03T00:15:00.000Z" + end_time = "2023-01-02T00:15:00.000Z" + response = update_recommendations(experiment_name, start_time, end_time) + data = response.json() + assert response.status_code == ERROR_STATUS_CODE + assert data['message'] == UPDATE_RECOMMENDATIONS_START_TIME_PRECEDE_END_TIME From 8f2c905549fc015791c099b320155605d7586402 Mon Sep 17 00:00:00 2001 From: msvinaykumar Date: Thu, 27 Jul 2023 14:49:09 +0530 Subject: [PATCH 06/12] incoporated review comments Signed-off-by: msvinaykumar --- .../remote_monitoring_tests/helpers/utils.py | 1 + .../rest_apis/test_update_recommendations.py | 64 +++++++++++++++++++ 2 files changed, 65 insertions(+) diff --git a/tests/scripts/remote_monitoring_tests/helpers/utils.py b/tests/scripts/remote_monitoring_tests/helpers/utils.py index fe3b0dddc..28947f304 100644 --- a/tests/scripts/remote_monitoring_tests/helpers/utils.py +++ b/tests/scripts/remote_monitoring_tests/helpers/utils.py @@ -34,6 +34,7 @@ UPDATE_RECOMMENDATIONS_MANDATORY_INTERVAL_END_DATE = 'interval_end_time is mandatory' UPDATE_RECOMMENDATIONS_DATA_NOT_FOUND = 'Data not found!' UPDATE_RECOMMENDATIONS_START_TIME_PRECEDE_END_TIME = 'The Start time should precede the End time!' +UPDATE_RECOMMENDATIONS_START_TIME_END_TIME_GAP_ERROR = 'The gap between the interval_start_time and interval_end_time must be within a maximum of 15 days!' # Kruize Recommendations Notification codes NOTIFICATION_CODE_FOR_DURATION_BASED_RECOMMENDATIONS_AVAILABLE = "112101" diff --git a/tests/scripts/remote_monitoring_tests/rest_apis/test_update_recommendations.py b/tests/scripts/remote_monitoring_tests/rest_apis/test_update_recommendations.py index bf4360843..6db4775cd 100644 --- a/tests/scripts/remote_monitoring_tests/rest_apis/test_update_recommendations.py +++ b/tests/scripts/remote_monitoring_tests/rest_apis/test_update_recommendations.py @@ -4,6 +4,13 @@ from helpers.list_reco_json_validate import * from helpers.utils import * +''' + Creates Experiment + + update results for 24 hrs + + update recommendation using start and end time as a parameter + Expected : recommendation should be available for the timestamp provided +''' + @pytest.mark.sanity def test_update_valid_recommendations_after_results_after_create_exp(cluster_type): @@ -75,6 +82,9 @@ def test_update_valid_recommendations_after_results_after_create_exp(cluster_typ response = update_recommendations(experiment_name, interval_start_time, end_time) data = response.json() assert response.status_code == SUCCESS_STATUS_CODE + assert data[0]['experiment_name'] == experiment_name + assert data[0]['kubernetes_objects'][0]['containers'][0]['recommendations']['notifications']['112101'][ + 'message'] == 'Duration Based Recommendations Available' response = list_recommendations(experiment_name) if response.status_code == SUCCESS_200_STATUS_CODE: recommendation_json = response.json() @@ -92,6 +102,9 @@ def test_update_valid_recommendations_after_results_after_create_exp(cluster_typ response = update_recommendations(experiment_name, interval_start_time, end_time) data = response.json() assert response.status_code == SUCCESS_STATUS_CODE + assert data[0]['experiment_name'] == experiment_name + assert data[0]['kubernetes_objects'][0]['containers'][0]['recommendations']['notifications']['112101'][ + 'message'] == 'Duration Based Recommendations Available' # Invoke list recommendations for the specified experiment response = list_recommendations(experiment_name) @@ -118,6 +131,14 @@ def test_update_valid_recommendations_after_results_after_create_exp(cluster_typ assert response.status_code == SUCCESS_STATUS_CODE +''' + Creates Experiment + + update results for 24 hrs + + update recommendation using only end time as a parameter + Expected : recommendation should be available for the timestamp provided +''' + + @pytest.mark.sanity def test_update_valid_recommendations_just_endtime_input_after_results_after_create_exp(cluster_type): input_json_file = "../json_files/create_exp.json" @@ -188,6 +209,9 @@ def test_update_valid_recommendations_just_endtime_input_after_results_after_cre response = update_recommendations(experiment_name, None, end_time) data = response.json() assert response.status_code == SUCCESS_STATUS_CODE + assert data[0]['experiment_name'] == experiment_name + assert data[0]['kubernetes_objects'][0]['containers'][0]['recommendations']['notifications']['112101'][ + 'message'] == 'Duration Based Recommendations Available' response = list_recommendations(experiment_name) if response.status_code == SUCCESS_200_STATUS_CODE: recommendation_json = response.json() @@ -205,6 +229,9 @@ def test_update_valid_recommendations_just_endtime_input_after_results_after_cre response = update_recommendations(experiment_name, None, end_time) data = response.json() assert response.status_code == SUCCESS_STATUS_CODE + assert data[0]['experiment_name'] == experiment_name + assert data[0]['kubernetes_objects'][0]['containers'][0]['recommendations']['notifications']['112101'][ + 'message'] == 'Duration Based Recommendations Available' # Invoke list recommendations for the specified experiment response = list_recommendations(experiment_name) @@ -231,6 +258,11 @@ def test_update_valid_recommendations_just_endtime_input_after_results_after_cre assert response.status_code == SUCCESS_STATUS_CODE +''' +try to update recommendation without experiment name and end time and get 400 status with UPDATE_RECOMMENDATIONS_MANDATORY_DEFAULT_MESSAGE +''' + + @pytest.mark.negative def test_update_recommendations_without_experiment_name_end_time(cluster_type): form_kruize_url(cluster_type) @@ -240,6 +272,11 @@ def test_update_recommendations_without_experiment_name_end_time(cluster_type): assert data['message'] == UPDATE_RECOMMENDATIONS_MANDATORY_DEFAULT_MESSAGE +''' +try to update recommendation without end time and get 400 status with UPDATE_RECOMMENDATIONS_MANDATORY_INTERVAL_END_DATE +''' + + @pytest.mark.negative def test_update_recommendations_without_end_time(cluster_type): form_kruize_url(cluster_type) @@ -250,6 +287,11 @@ def test_update_recommendations_without_end_time(cluster_type): assert data['message'] == UPDATE_RECOMMENDATIONS_MANDATORY_INTERVAL_END_DATE +''' +Update recommendation with unknown experiment name and end date. +''' + + @pytest.mark.negative def test_update_recommendations_with_unknown_experiment_name_and_end_time(cluster_type): form_kruize_url(cluster_type) @@ -261,6 +303,11 @@ def test_update_recommendations_with_unknown_experiment_name_and_end_time(cluste assert data['message'] == UPDATE_RECOMMENDATIONS_DATA_NOT_FOUND +''' +Update recommendation with start time precede end time. +''' + + @pytest.mark.negative def test_update_recommendations_with_end_time_precede_start_time(cluster_type): form_kruize_url(cluster_type) @@ -271,3 +318,20 @@ def test_update_recommendations_with_end_time_precede_start_time(cluster_type): data = response.json() assert response.status_code == ERROR_STATUS_CODE assert data['message'] == UPDATE_RECOMMENDATIONS_START_TIME_PRECEDE_END_TIME + + +''' +Update recommendation with start time and end time having difference more than 15 days. +''' + + +@pytest.mark.negative +def test_update_recommendations_with_end_time_precede_start_time(cluster_type): + form_kruize_url(cluster_type) + experiment_name = "test123" + start_time = "2023-01-03T00:15:00.000Z" + end_time = "2023-01-30T00:15:00.000Z" + response = update_recommendations(experiment_name, start_time, end_time) + data = response.json() + assert response.status_code == ERROR_STATUS_CODE + assert data['message'] == UPDATE_RECOMMENDATIONS_START_TIME_END_TIME_GAP_ERROR From 97cdbde40e4dbdecdbb8649c8973e4deb57d962f Mon Sep 17 00:00:00 2001 From: msvinaykumar Date: Sat, 29 Jul 2023 00:10:22 +0530 Subject: [PATCH 07/12] incroporated review comments Signed-off-by: msvinaykumar --- .../rest_apis/test_list_recommendations.py | 265 ++++++++++++------ .../rest_apis/test_update_recommendations.py | 67 ++--- 2 files changed, 203 insertions(+), 129 deletions(-) diff --git a/tests/scripts/remote_monitoring_tests/rest_apis/test_list_recommendations.py b/tests/scripts/remote_monitoring_tests/rest_apis/test_list_recommendations.py index 5be1b06e8..8af6b08e8 100644 --- a/tests/scripts/remote_monitoring_tests/rest_apis/test_list_recommendations.py +++ b/tests/scripts/remote_monitoring_tests/rest_apis/test_list_recommendations.py @@ -1,12 +1,15 @@ +import datetime +import json +import time + import pytest -from helpers.list_reco_json_validate import * -from helpers.list_reco_json_schema import * -from helpers.utils import * +from helpers.fixtures import * from helpers.generate_rm_jsons import * from helpers.kruize import * -from helpers.fixtures import * -import time -import json +from helpers.list_reco_json_schema import * +from helpers.list_reco_json_validate import * +from helpers.utils import * + @pytest.mark.sanity def test_list_recommendations_single_result(cluster_type): @@ -14,7 +17,7 @@ def test_list_recommendations_single_result(cluster_type): Test Description: This test validates listRecommendations by passing a valid experiment name and updating a single result """ - input_json_file="../json_files/create_exp.json" + input_json_file = "../json_files/create_exp.json" form_kruize_url(cluster_type) response = delete_experiment(input_json_file) @@ -22,14 +25,17 @@ def test_list_recommendations_single_result(cluster_type): # Create experiment using the specified json response = create_experiment(input_json_file) - data = response.json() assert response.status_code == SUCCESS_STATUS_CODE assert data['status'] == SUCCESS_STATUS assert data['message'] == CREATE_EXP_SUCCESS_MSG # Update results for the experiment - result_json_file="../json_files/update_results.json" + result_json_file = "../json_files/update_results.json" + json_data = json.load(open(result_json_file)) + experiment_name = json_data[0]['experiment_name'] + result_json = read_json_data_from_file(result_json_file) + end_time = result_json[0]['interval_end_time'] response = update_results(result_json_file) data = response.json() @@ -37,6 +43,14 @@ def test_list_recommendations_single_result(cluster_type): assert data['status'] == SUCCESS_STATUS assert data['message'] == UPDATE_RESULTS_SUCCESS_MSG + # Update recommendations for the experiment + response = update_recommendations(experiment_name, None, end_time) + data = response.json() + assert response.status_code == SUCCESS_STATUS_CODE + assert data[0]['experiment_name'] == experiment_name + assert data[0]['kubernetes_objects'][0]['containers'][0]['recommendations']['notifications']['120001'][ + 'message'] == 'There is not enough data available to generate a recommendation.' + time.sleep(1) # Get the experiment name @@ -58,12 +72,13 @@ def test_list_recommendations_single_result(cluster_type): response = delete_experiment(input_json_file) print("delete exp = ", response.status_code) + @pytest.mark.sanity def test_list_recommendations_without_parameters(cluster_type): """ Test Description: This test validates listRecommendations API without parameters """ - input_json_file="../json_files/create_exp.json" + input_json_file = "../json_files/create_exp.json" form_kruize_url(cluster_type) response = delete_experiment(input_json_file) @@ -78,26 +93,30 @@ def test_list_recommendations_without_parameters(cluster_type): assert data['message'] == CREATE_EXP_SUCCESS_MSG # Update results for the same experiment - result_json_file="../json_files/multiple_results_single_exp.json" + result_json_file = "../json_files/multiple_results_single_exp.json" result_json_arr = read_json_data_from_file(result_json_file) - for result_json in result_json_arr: - single_json_arr = [] - json_file = "/tmp/update_results.json" - single_json_arr.append(result_json) - write_json_data_to_file(json_file, single_json_arr) - - response = update_results(json_file) - - data = response.json() - print(data['message']) + response = update_results(result_json_file) - assert response.status_code == SUCCESS_STATUS_CODE - assert data['status'] == SUCCESS_STATUS - assert data['message'] == UPDATE_RESULTS_SUCCESS_MSG + data = response.json() + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + assert data['message'] == UPDATE_RESULTS_SUCCESS_MSG time.sleep(1) + start_time = '2023-04-13T22:59:20.982Z' + end_time = '2023-04-14T23:59:20.982Z' + # Get the experiment name + json_data = json.load(open(input_json_file)) + experiment_name = json_data[0]['experiment_name'] + response = update_recommendations(experiment_name, start_time, end_time) + data = response.json() + assert response.status_code == SUCCESS_STATUS_CODE + assert data[0]['experiment_name'] == experiment_name + assert data[0]['kubernetes_objects'][0]['containers'][0]['recommendations']['notifications']['112101'][ + 'message'] == 'Duration Based Recommendations Available' + # Get the experiment name experiment_name = None response = list_recommendations(experiment_name) @@ -112,7 +131,7 @@ def test_list_recommendations_without_parameters(cluster_type): # Validate the json values create_exp_json = read_json_data_from_file(input_json_file) update_results_json = [] - update_results_json.append(result_json_arr[len(result_json_arr)-1]) + update_results_json.append(result_json_arr[len(result_json_arr) - 1]) # Expected duration in hours is 24h as for short term only 24h plus or minus 30s of data is considered to generate recommendations expected_duration_in_hours = SHORT_TERM_DURATION_IN_HRS_MAX @@ -122,12 +141,13 @@ def test_list_recommendations_without_parameters(cluster_type): response = delete_experiment(input_json_file) print("delete exp = ", response.status_code) + @pytest.mark.negative def test_list_recommendations_invalid_exp(cluster_type): """ Test Description: This test validates listRecommendations by passing an invalid experiment name """ - input_json_file="../json_files/create_exp.json" + input_json_file = "../json_files/create_exp.json" form_kruize_url(cluster_type) response = delete_experiment(input_json_file) @@ -142,7 +162,7 @@ def test_list_recommendations_invalid_exp(cluster_type): assert data['message'] == CREATE_EXP_SUCCESS_MSG # Update results for the experiment - result_json_file="../json_files/update_results.json" + result_json_file = "../json_files/update_results.json" response = update_results(result_json_file) data = response.json() @@ -165,12 +185,13 @@ def test_list_recommendations_invalid_exp(cluster_type): response = delete_experiment(input_json_file) print("delete exp = ", response.status_code) + @pytest.mark.sanity def test_list_recommendations_without_results(cluster_type): """ Test Description: This test validates listRecommendations when there was no updation of results """ - input_json_file="../json_files/create_exp.json" + input_json_file = "../json_files/create_exp.json" form_kruize_url(cluster_type) response = delete_experiment(input_json_file) @@ -206,12 +227,13 @@ def test_list_recommendations_without_results(cluster_type): response = delete_experiment(input_json_file) print("delete exp = ", response.status_code) + @pytest.mark.sanity def test_list_recommendations_single_exp_multiple_results(cluster_type): """ Test Description: This test validates listRecommendations by updating multiple results for a single experiment """ - input_json_file="../json_files/create_exp.json" + input_json_file = "../json_files/create_exp.json" form_kruize_url(cluster_type) response = delete_experiment(input_json_file) @@ -226,13 +248,13 @@ def test_list_recommendations_single_exp_multiple_results(cluster_type): assert data['message'] == CREATE_EXP_SUCCESS_MSG # Update results for the experiment - result_json_file="../json_files/multiple_results_single_exp.json" + result_json_file = "../json_files/multiple_results_single_exp.json" response = update_results(result_json_file) data = response.json() - assert response.status_code == ERROR_STATUS_CODE - assert data['status'] == ERROR_STATUS - assert data['message'] == "Bulk entries are currently unsupported!" + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + assert data['message'] == UPDATE_RESULTS_SUCCESS_MSG time.sleep(1) @@ -263,6 +285,7 @@ def test_list_recommendations_single_exp_multiple_results(cluster_type): response = delete_experiment(input_json_file) print("delete exp = ", response.status_code) + @pytest.mark.extended def test_list_recommendations_multiple_exps_from_diff_json_files_2(cluster_type): """ @@ -310,9 +333,15 @@ def test_list_recommendations_multiple_exps_from_diff_json_files_2(cluster_type) assert data['message'] == CREATE_EXP_SUCCESS_MSG result_json_arr = [] + start_time = None + end_time = None for j in range(num_res): # Update results for the experiment result_json_file = result_jsons_dir + "/result_" + str(i) + "_" + str(j) + ".json" + result_json = read_json_data_from_file(result_json_file) + if start_time is None: + start_time = result_json[0]['interval_start_time'] + end_time = result_json[0]['interval_end_time'] response = update_results(result_json_file) data = response.json() @@ -320,7 +349,8 @@ def test_list_recommendations_multiple_exps_from_diff_json_files_2(cluster_type) print("message = ", data['message']) assert response.status_code == SUCCESS_STATUS_CODE assert data['status'] == SUCCESS_STATUS - assert data['message'] == UPDATE_RESULTS_SUCCESS_MSG, f"expected message = {UPDATE_RESULTS_SUCCESS_MSG} actual message = {data['message']}" + assert data[ + 'message'] == UPDATE_RESULTS_SUCCESS_MSG, f"expected message = {UPDATE_RESULTS_SUCCESS_MSG} actual message = {data['message']}" result_json_data = read_json_data_from_file(result_json_file) result_json_arr.append(result_json_data[0]) @@ -329,6 +359,14 @@ def test_list_recommendations_multiple_exps_from_diff_json_files_2(cluster_type) json_data = json.load(open(create_exp_json_file)) experiment_name = json_data[0]['experiment_name'] + # Update Recommendations + response = update_recommendations(experiment_name, start_time, end_time) + data = response.json() + assert response.status_code == SUCCESS_STATUS_CODE + assert data[0]['experiment_name'] == experiment_name + assert data[0]['kubernetes_objects'][0]['containers'][0]['recommendations']['notifications']['112101'][ + 'message'] == 'Duration Based Recommendations Available' + # Invoke list recommendations for the specified experiment response = list_recommendations(experiment_name) assert response.status_code == SUCCESS_200_STATUS_CODE @@ -341,7 +379,7 @@ def test_list_recommendations_multiple_exps_from_diff_json_files_2(cluster_type) create_exp_json = read_json_data_from_file(create_exp_json_file) update_results_json = [] - update_results_json.append(result_json_arr[len(result_json_arr)-1]) + update_results_json.append(result_json_arr[len(result_json_arr) - 1]) # Expected duration in hours is 24h as for short term only 24h plus or minus 30s of data is considered to generate recommendations expected_duration_in_hours = SHORT_TERM_DURATION_IN_HRS_MAX @@ -361,7 +399,7 @@ def test_list_recommendations_exp_name_and_latest(latest, cluster_type): """ Test Description: This test validates listRecommendations by passing a valid experiment name and latest as true or false """ - input_json_file="../json_files/create_exp.json" + input_json_file = "../json_files/create_exp.json" form_kruize_url(cluster_type) @@ -379,29 +417,41 @@ def test_list_recommendations_exp_name_and_latest(latest, cluster_type): assert data['message'] == CREATE_EXP_SUCCESS_MSG # Update results for the same experiment - result_json_file="../json_files/multiple_results_single_exp.json" - + result_json_file = "../json_files/multiple_results_single_exp.json" result_json_arr = read_json_data_from_file(result_json_file) - for result_json in result_json_arr: - single_json_arr = [] - json_file = "/tmp/update_results.json" - single_json_arr.append(result_json) - write_json_data_to_file(json_file, single_json_arr) + response = update_results(result_json_file) + data = response.json() + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + assert data['message'] == UPDATE_RESULTS_SUCCESS_MSG - response = update_results(json_file) + time.sleep(1) - data = response.json() - print(data['message']) + # update Recommendations + with open(result_json_file, 'r') as file: + data = json.load(file) - assert response.status_code == SUCCESS_STATUS_CODE - assert data['status'] == SUCCESS_STATUS - assert data['message'] == UPDATE_RESULTS_SUCCESS_MSG + # Step 2: Convert UTC strings to datetime objects + for item in data: + item['interval_start_time'] = datetime.strptime(item['interval_start_time'], "%Y-%m-%dT%H:%M:%S.%fZ") + item['interval_end_time'] = datetime.strptime(item['interval_end_time'], "%Y-%m-%dT%H:%M:%S.%fZ") + + # Step 3: Find minimum start_time and maximum end_time + start_time = min(data, key=lambda x: x['interval_start_time'])['interval_start_time'] + end_time = max(data, key=lambda x: x['interval_end_time'])['interval_end_time'] - time.sleep(1) # Get the experiment name json_data = json.load(open(input_json_file)) experiment_name = json_data[0]['experiment_name'] + response = update_recommendations(experiment_name, start_time.strftime("%Y-%m-%dT%H:%M:%S.%fZ")[:-4] + "Z", + end_time.strftime("%Y-%m-%dT%H:%M:%S.%fZ")[:-4] + "Z") + data = response.json() + assert response.status_code == SUCCESS_STATUS_CODE + assert data[0]['experiment_name'] == experiment_name + assert data[0]['kubernetes_objects'][0]['containers'][0]['recommendations']['notifications']['112101'][ + 'message'] == 'Duration Based Recommendations Available' + response = list_recommendations(experiment_name, latest) list_reco_json = response.json() @@ -409,7 +459,7 @@ def test_list_recommendations_exp_name_and_latest(latest, cluster_type): update_results_json = [] if latest == "true": - update_results_json.append(result_json_arr[len(result_json_arr)-1]) + update_results_json.append(result_json_arr[len(result_json_arr) - 1]) # Expected duration in hours is 24h as for short term only 24h plus or minus 30s of data is considered to generate recommendations expected_duration_in_hours = SHORT_TERM_DURATION_IN_HRS_MAX @@ -421,6 +471,8 @@ def test_list_recommendations_exp_name_and_latest(latest, cluster_type): # Recommendations are generated only when 24h results are present total_num_results = len(result_json_arr) num_results_without_recos = int(SHORT_TERM_DURATION_IN_HRS_MAX * 4 - 1) + print(f"total_num_results {total_num_results}") + print(f"num_results_without_recos {num_results_without_recos}") expected_num_recos = total_num_results - num_results_without_recos # Fetch only the results corresponding to the recommendations generated @@ -443,13 +495,14 @@ def test_list_recommendations_exp_name_and_latest(latest, cluster_type): response = delete_experiment(input_json_file) print("delete exp = ", response.status_code) + @pytest.mark.negative @pytest.mark.parametrize("monitoring_end_time", ["2022-12-20T21:10:11", "20220211"]) def test_list_recommendations_exp_name_and_monitoring_end_time_invalid(monitoring_end_time, cluster_type): """ Test Description: This test validates listRecommendations by passing a valid experiment name and an invalid monitoring end time value """ - input_json_file="../json_files/create_exp.json" + input_json_file = "../json_files/create_exp.json" form_kruize_url(cluster_type) response = delete_experiment(input_json_file) @@ -464,7 +517,7 @@ def test_list_recommendations_exp_name_and_monitoring_end_time_invalid(monitorin assert data['message'] == CREATE_EXP_SUCCESS_MSG # Update results for the experiment - result_json_file="../json_files/update_results.json" + result_json_file = "../json_files/update_results.json" response = update_results(result_json_file) data = response.json() @@ -488,15 +541,17 @@ def test_list_recommendations_exp_name_and_monitoring_end_time_invalid(monitorin response = delete_experiment(input_json_file) print("delete exp = ", response.status_code) + @pytest.mark.sanity @pytest.mark.parametrize("test_name, monitoring_end_time", \ - [("valid_monitoring_end_time", "2023-04-14T22:59:20.982Z"), ("invalid_monitoring_end_time","2018-12-20T23:40:15.000Z")]) + [("valid_monitoring_end_time", "2023-04-14T22:59:20.982Z"), + ("invalid_monitoring_end_time", "2018-12-20T23:40:15.000Z")]) def test_list_recommendations_exp_name_and_monitoring_end_time(test_name, monitoring_end_time, cluster_type): """ Test Description: This test validates listRecommendations by passing a valid experiment name and a valid monitoring end time and an invalid monitoring end time """ - input_json_file="../json_files/create_exp.json" + input_json_file = "../json_files/create_exp.json" form_kruize_url(cluster_type) @@ -513,29 +568,38 @@ def test_list_recommendations_exp_name_and_monitoring_end_time(test_name, monito assert data['message'] == CREATE_EXP_SUCCESS_MSG # Update results for the same experiment - result_json_file="../json_files/multiple_results_single_exp.json" - + result_json_file = "../json_files/multiple_results_single_exp.json" result_json_arr = read_json_data_from_file(result_json_file) - for result_json in result_json_arr: - single_json_arr = [] - json_file = "/tmp/update_results.json" - single_json_arr.append(result_json) - write_json_data_to_file(json_file, single_json_arr) - - response = update_results(json_file) - - data = response.json() - print(data['message']) - - assert response.status_code == SUCCESS_STATUS_CODE - assert data['status'] == SUCCESS_STATUS - assert data['message'] == UPDATE_RESULTS_SUCCESS_MSG - + response = update_results(result_json_file) + data = response.json() + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + assert data['message'] == UPDATE_RESULTS_SUCCESS_MSG time.sleep(1) + # update Recommendations + with open(result_json_file, 'r') as file: + data = json.load(file) + + # Step 2: Convert UTC strings to datetime objects + for item in data: + item['interval_start_time'] = datetime.strptime(item['interval_start_time'], "%Y-%m-%dT%H:%M:%S.%fZ") + item['interval_end_time'] = datetime.strptime(item['interval_end_time'], "%Y-%m-%dT%H:%M:%S.%fZ") + + # Step 3: Find minimum start_time and maximum end_time + start_time = min(data, key=lambda x: x['interval_start_time'])['interval_start_time'] + end_time = max(data, key=lambda x: x['interval_end_time'])['interval_end_time'] # Get the experiment name json_data = json.load(open(input_json_file)) experiment_name = json_data[0]['experiment_name'] + response = update_recommendations(experiment_name, start_time.strftime("%Y-%m-%dT%H:%M:%S.%fZ")[:-4] + "Z", + end_time.strftime("%Y-%m-%dT%H:%M:%S.%fZ")[:-4] + "Z") + data = response.json() + assert response.status_code == SUCCESS_STATUS_CODE + assert data[0]['experiment_name'] == experiment_name + assert data[0]['kubernetes_objects'][0]['containers'][0]['recommendations']['notifications']['112101'][ + 'message'] == 'Duration Based Recommendations Available' + latest = None response = list_recommendations(experiment_name, latest, monitoring_end_time) @@ -555,7 +619,8 @@ def test_list_recommendations_exp_name_and_monitoring_end_time(test_name, monito # Validate the json values create_exp_json = read_json_data_from_file(input_json_file) - validate_reco_json(create_exp_json[0], update_results_json, list_reco_json[0], expected_duration_in_hours, test_name) + validate_reco_json(create_exp_json[0], update_results_json, list_reco_json[0], expected_duration_in_hours, + test_name) elif test_name == "invalid_monitoring_end_time": print(list_reco_json) assert response.status_code == ERROR_STATUS_CODE @@ -565,6 +630,7 @@ def test_list_recommendations_exp_name_and_monitoring_end_time(test_name, monito response = delete_experiment(input_json_file) print("delete exp = ", response.status_code) + @pytest.mark.sanity def test_list_recommendations_multiple_exps_with_missing_metrics(cluster_type): """ @@ -572,8 +638,8 @@ def test_list_recommendations_multiple_exps_with_missing_metrics(cluster_type): with some of the mandatory metrics missing in the results """ - input_json_file="../json_files/create_exp.json" - result_json_file="../json_files/update_results.json" + input_json_file = "../json_files/create_exp.json" + result_json_file = "../json_files/update_results.json" find = [] json_data = json.load(open(input_json_file)) @@ -584,7 +650,7 @@ def test_list_recommendations_multiple_exps_with_missing_metrics(cluster_type): form_kruize_url(cluster_type) - drop_metrics = {"cpuRequest":0, "cpuLimit":1, "cpuThrottle":3, "memoryRequest":4, "memoryLimit":5} + drop_metrics = {"cpuRequest": 0, "cpuLimit": 1, "cpuThrottle": 3, "memoryRequest": 4, "memoryLimit": 5} keys = list(drop_metrics.keys()) j = 0 num_exps = 10 @@ -651,6 +717,7 @@ def test_list_recommendations_multiple_exps_with_missing_metrics(cluster_type): response = delete_experiment(create_exp_json_file) print("delete exp = ", response.status_code) + @pytest.mark.extended @pytest.mark.parametrize("latest", ["true", "false"]) def test_list_recommendations_with_only_latest(latest, cluster_type): @@ -659,8 +726,8 @@ def test_list_recommendations_with_only_latest(latest, cluster_type): and query with only the parameter latest and with both latest=true and latest=false """ - input_json_file="../json_files/create_exp.json" - result_json_file="../json_files/update_results.json" + input_json_file = "../json_files/create_exp.json" + result_json_file = "../json_files/update_results.json" find = [] json_data = json.load(open(input_json_file)) @@ -694,12 +761,16 @@ def test_list_recommendations_with_only_latest(latest, cluster_type): update_results_json_file = "/tmp/update_results_" + str(i) + ".json" result_json_arr = [] + # Get the experiment name + json_data = json.load(open(create_exp_json_file)) + experiment_name = json_data[0]['experiment_name'] + interval_start_time = get_datetime() for j in range(num_res): update_timestamps = True generate_json(find, result_json_file, update_results_json_file, i, update_timestamps) result_json = read_json_data_from_file(update_results_json_file) if j == 0: - start_time = get_datetime() + start_time = interval_start_time else: start_time = end_time @@ -717,8 +788,6 @@ def test_list_recommendations_with_only_latest(latest, cluster_type): assert data['status'] == SUCCESS_STATUS assert data['message'] == UPDATE_RESULTS_SUCCESS_MSG - time.sleep(1) - # Get the experiment name json_data = json.load(open(create_exp_json_file)) experiment_name = json_data[0]['experiment_name'] @@ -728,8 +797,12 @@ def test_list_recommendations_with_only_latest(latest, cluster_type): list_of_result_json_arr.append(result_json_arr) - time.sleep(5) - + response = update_recommendations(experiment_name, interval_start_time, end_time) + data = response.json() + assert response.status_code == SUCCESS_STATUS_CODE + assert data[0]['experiment_name'] == experiment_name + assert data[0]['kubernetes_objects'][0]['containers'][0]['recommendations']['notifications']['112101'][ + 'message'] == 'Duration Based Recommendations Available' experiment_name = None response = list_recommendations(experiment_name, latest) @@ -749,7 +822,7 @@ def test_list_recommendations_with_only_latest(latest, cluster_type): update_results_json = [] if latest == "true": - update_results_json.append(list_of_result_json_arr[i][len(list_of_result_json_arr[i])-1]) + update_results_json.append(list_of_result_json_arr[i][len(list_of_result_json_arr[i]) - 1]) # Expected duration in hours is 24h as for short term only 24h plus or minus 30s of data is considered to generate recommendations expected_duration_in_hours = SHORT_TERM_DURATION_IN_HRS_MAX elif latest == "false": @@ -821,13 +894,16 @@ def test_list_recommendations_notification_codes(cluster_type: str): update_results_json_file = "/tmp/update_results_" + str(i) + ".json" result_json_arr = [] - + # Get the experiment name + json_data = json.load(open(create_exp_json_file)) + experiment_name = json_data[0]['experiment_name'] + interval_start_time = get_datetime() for j in range(num_res): update_timestamps = True generate_json(find, result_json_file, update_results_json_file, i, update_timestamps) result_json = read_json_data_from_file(update_results_json_file) if j == 0: - start_time = get_datetime() + start_time = interval_start_time else: start_time = end_time @@ -965,6 +1041,10 @@ def test_list_recommendations_notification_codes(cluster_type: str): if j > 95: time.sleep(1) + response = update_recommendations(experiment_name, interval_start_time, end_time) + data = response.json() + assert response.status_code == SUCCESS_STATUS_CODE + assert data[0]['experiment_name'] == experiment_name # Get the experiment name json_data = json.load(open(create_exp_json_file)) @@ -986,7 +1066,8 @@ def test_list_recommendations_notification_codes(cluster_type: str): for containers in recommendation_json[0]["kubernetes_objects"][0]["containers"]: actual_container_name = containers["container_name"] - print(f"actual container name = {actual_container_name} expected container name = {container_name_to_update}") + print( + f"actual container name = {actual_container_name} expected container name = {container_name_to_update}") if containers["container_name"] == container_name_to_update: recommendation_section = containers["recommendations"] break @@ -1017,6 +1098,13 @@ def test_list_recommendations_notification_codes(cluster_type: str): short_term_notifications = short_term_recommendation["notifications"] if j == 96: + response = update_recommendations(experiment_name, interval_start_time, end_time) + data = response.json() + assert response.status_code == SUCCESS_STATUS_CODE + assert data[0]['experiment_name'] == experiment_name + assert \ + data[0]['kubernetes_objects'][0]['containers'][0]['recommendations']['notifications']['112101'][ + 'message'] == 'Duration Based Recommendations Available' # Expected notifications in short term recommendation # WARNING_CPU_LIMIT_NOT_SET_CODE = "423001" # CRITICAL_CPU_REQUEST_NOT_SET_CODE = "523001" @@ -1095,10 +1183,9 @@ def test_list_recommendations_notification_codes(cluster_type: str): recommended_config=short_term_recommendation_config, variation_config=short_term_recommendation_variation) - # Delete the experiments for i in range(num_exps): json_file = "/tmp/create_exp_" + str(i) + ".json" response = delete_experiment(json_file) - print("delete exp = ", response.status_code) \ No newline at end of file + print("delete exp = ", response.status_code) diff --git a/tests/scripts/remote_monitoring_tests/rest_apis/test_update_recommendations.py b/tests/scripts/remote_monitoring_tests/rest_apis/test_update_recommendations.py index 6db4775cd..a03867c85 100644 --- a/tests/scripts/remote_monitoring_tests/rest_apis/test_update_recommendations.py +++ b/tests/scripts/remote_monitoring_tests/rest_apis/test_update_recommendations.py @@ -4,16 +4,15 @@ from helpers.list_reco_json_validate import * from helpers.utils import * -''' - Creates Experiment + - update results for 24 hrs + - update recommendation using start and end time as a parameter - Expected : recommendation should be available for the timestamp provided -''' - @pytest.mark.sanity def test_update_valid_recommendations_after_results_after_create_exp(cluster_type): + ''' + Creates Experiment + + update results for 24 hrs + + update recommendation using start and end time as a parameter + Expected : recommendation should be available for the timestamp provided + ''' input_json_file = "../json_files/create_exp.json" result_json_file = "../json_files/update_results.json" @@ -131,16 +130,14 @@ def test_update_valid_recommendations_after_results_after_create_exp(cluster_typ assert response.status_code == SUCCESS_STATUS_CODE -''' - Creates Experiment + - update results for 24 hrs + - update recommendation using only end time as a parameter - Expected : recommendation should be available for the timestamp provided -''' - - @pytest.mark.sanity def test_update_valid_recommendations_just_endtime_input_after_results_after_create_exp(cluster_type): + ''' + Creates Experiment + + update results for 24 hrs + + update recommendation using only end time as a parameter + Expected : recommendation should be available for the timestamp provided + ''' input_json_file = "../json_files/create_exp.json" result_json_file = "../json_files/update_results.json" @@ -258,13 +255,11 @@ def test_update_valid_recommendations_just_endtime_input_after_results_after_cre assert response.status_code == SUCCESS_STATUS_CODE -''' -try to update recommendation without experiment name and end time and get 400 status with UPDATE_RECOMMENDATIONS_MANDATORY_DEFAULT_MESSAGE -''' - - @pytest.mark.negative def test_update_recommendations_without_experiment_name_end_time(cluster_type): + ''' + try to update recommendation without experiment name and end time and get 400 status with UPDATE_RECOMMENDATIONS_MANDATORY_DEFAULT_MESSAGE + ''' form_kruize_url(cluster_type) response = update_recommendations(None, None, None) data = response.json() @@ -272,13 +267,11 @@ def test_update_recommendations_without_experiment_name_end_time(cluster_type): assert data['message'] == UPDATE_RECOMMENDATIONS_MANDATORY_DEFAULT_MESSAGE -''' -try to update recommendation without end time and get 400 status with UPDATE_RECOMMENDATIONS_MANDATORY_INTERVAL_END_DATE -''' - - @pytest.mark.negative def test_update_recommendations_without_end_time(cluster_type): + ''' + try to update recommendation without end time and get 400 status with UPDATE_RECOMMENDATIONS_MANDATORY_INTERVAL_END_DATE + ''' form_kruize_url(cluster_type) experiment_name = "test123" response = update_recommendations(experiment_name, None, None) @@ -287,13 +280,11 @@ def test_update_recommendations_without_end_time(cluster_type): assert data['message'] == UPDATE_RECOMMENDATIONS_MANDATORY_INTERVAL_END_DATE -''' -Update recommendation with unknown experiment name and end date. -''' - - @pytest.mark.negative def test_update_recommendations_with_unknown_experiment_name_and_end_time(cluster_type): + ''' + Update recommendation with unknown experiment name and end date. + ''' form_kruize_url(cluster_type) experiment_name = "test123" end_time = "2023-01-02T00:15:00.000Z" @@ -303,13 +294,11 @@ def test_update_recommendations_with_unknown_experiment_name_and_end_time(cluste assert data['message'] == UPDATE_RECOMMENDATIONS_DATA_NOT_FOUND -''' -Update recommendation with start time precede end time. -''' - - @pytest.mark.negative def test_update_recommendations_with_end_time_precede_start_time(cluster_type): + ''' + Update recommendation with start time precede end time. + ''' form_kruize_url(cluster_type) experiment_name = "test123" start_time = "2023-01-03T00:15:00.000Z" @@ -320,13 +309,11 @@ def test_update_recommendations_with_end_time_precede_start_time(cluster_type): assert data['message'] == UPDATE_RECOMMENDATIONS_START_TIME_PRECEDE_END_TIME -''' -Update recommendation with start time and end time having difference more than 15 days. -''' - - @pytest.mark.negative def test_update_recommendations_with_end_time_precede_start_time(cluster_type): + ''' + Update recommendation with start time and end time having difference more than 15 days. + ''' form_kruize_url(cluster_type) experiment_name = "test123" start_time = "2023-01-03T00:15:00.000Z" From 3d498ca583a9e8b54f7b7014e9e7d28309c6791f Mon Sep 17 00:00:00 2001 From: msvinaykumar Date: Mon, 31 Jul 2023 12:45:31 +0530 Subject: [PATCH 08/12] invalid date tc added Signed-off-by: msvinaykumar --- .../remote_monitoring_tests/helpers/utils.py | 1 + .../rest_apis/test_update_recommendations.py | 14 ++++++++++++++ 2 files changed, 15 insertions(+) diff --git a/tests/scripts/remote_monitoring_tests/helpers/utils.py b/tests/scripts/remote_monitoring_tests/helpers/utils.py index 28947f304..ed5325fc4 100644 --- a/tests/scripts/remote_monitoring_tests/helpers/utils.py +++ b/tests/scripts/remote_monitoring_tests/helpers/utils.py @@ -35,6 +35,7 @@ UPDATE_RECOMMENDATIONS_DATA_NOT_FOUND = 'Data not found!' UPDATE_RECOMMENDATIONS_START_TIME_PRECEDE_END_TIME = 'The Start time should precede the End time!' UPDATE_RECOMMENDATIONS_START_TIME_END_TIME_GAP_ERROR = 'The gap between the interval_start_time and interval_end_time must be within a maximum of 15 days!' +UPDATE_RECOMMENDATIONS_INVALID_DATE_TIME_FORMAT = "Given timestamp - \" %s \" is not a valid timestamp format" # Kruize Recommendations Notification codes NOTIFICATION_CODE_FOR_DURATION_BASED_RECOMMENDATIONS_AVAILABLE = "112101" diff --git a/tests/scripts/remote_monitoring_tests/rest_apis/test_update_recommendations.py b/tests/scripts/remote_monitoring_tests/rest_apis/test_update_recommendations.py index a03867c85..3d6e2ece3 100644 --- a/tests/scripts/remote_monitoring_tests/rest_apis/test_update_recommendations.py +++ b/tests/scripts/remote_monitoring_tests/rest_apis/test_update_recommendations.py @@ -280,6 +280,20 @@ def test_update_recommendations_without_end_time(cluster_type): assert data['message'] == UPDATE_RECOMMENDATIONS_MANDATORY_INTERVAL_END_DATE +@pytest.mark.negative +def test_update_recommendations_with_invalid_date_format_end_time(cluster_type): + ''' + Update recommendation with invalid end date format. + ''' + form_kruize_url(cluster_type) + experiment_name = "test123" + end_time = "2023-011-02T00:15:00.000Z" + response = update_recommendations(experiment_name, None, end_time) + data = response.json() + assert response.status_code == ERROR_STATUS_CODE + assert data['message'] == UPDATE_RECOMMENDATIONS_INVALID_DATE_TIME_FORMAT % (end_time) + + @pytest.mark.negative def test_update_recommendations_with_unknown_experiment_name_and_end_time(cluster_type): ''' From 199dbe72a77fc72b30fb6e290c6beb2a711d2b7d Mon Sep 17 00:00:00 2001 From: msvinaykumar Date: Wed, 2 Aug 2023 19:37:59 +0530 Subject: [PATCH 09/12] fixed failed tc Signed-off-by: msvinaykumar --- .../remote_monitoring_tests/helpers/kruize.py | 21 +- .../remote_monitoring_tests/helpers/utils.py | 2 + .../rest_apis/test_create_experiment.py | 104 ++++---- .../rest_apis/test_e2e_workflow.py | 68 +++-- .../rest_apis/test_list_recommendations.py | 24 +- .../rest_apis/test_update_results.py | 245 ++++++++++-------- 6 files changed, 267 insertions(+), 197 deletions(-) diff --git a/tests/scripts/remote_monitoring_tests/helpers/kruize.py b/tests/scripts/remote_monitoring_tests/helpers/kruize.py index 9f85d1003..fb9aa7170 100644 --- a/tests/scripts/remote_monitoring_tests/helpers/kruize.py +++ b/tests/scripts/remote_monitoring_tests/helpers/kruize.py @@ -57,7 +57,8 @@ def create_experiment(input_json_file, invalid_header=False): json_file = open(input_json_file, "r") input_json = json.loads(json_file.read()) print("\n************************************************************") - print(input_json) + pretty_json_str = json.dumps(input_json, indent=4) + print(pretty_json_str) print("\n************************************************************") # read the json @@ -73,8 +74,24 @@ def create_experiment(input_json_file, invalid_header=False): else: response = requests.post(url, json=input_json) - print(response) print("Response status code = ", response.status_code) + try: + # Parse the response content as JSON into a Python dictionary + response_json = response.json() + + # Check if the response_json is a valid JSON object or array + if isinstance(response_json, (dict, list)): + # Convert the response_json back to a JSON-formatted string with double quotes and pretty print it + pretty_response_json_str = json.dumps(response_json, indent=4) + + # Print the JSON string + print(pretty_response_json_str) + else: + print("Invalid JSON format in the response.") + print(response.text) # Print the response text as-is + except json.JSONDecodeError: + print("Response content is not valid JSON.") + print(response.text) # Print the response text as-is return response diff --git a/tests/scripts/remote_monitoring_tests/helpers/utils.py b/tests/scripts/remote_monitoring_tests/helpers/utils.py index ed5325fc4..5d2c1322d 100644 --- a/tests/scripts/remote_monitoring_tests/helpers/utils.py +++ b/tests/scripts/remote_monitoring_tests/helpers/utils.py @@ -29,7 +29,9 @@ SUCCESS_STATUS = "SUCCESS" ERROR_STATUS = "ERROR" UPDATE_RESULTS_SUCCESS_MSG = "Results added successfully! View saved results at /listExperiments." +UPDATE_RESULTS_DATE_PRECEDE_ERROR_MSG = "The Start time should precede the End time!" CREATE_EXP_SUCCESS_MSG = "Experiment registered successfully with Kruize. View registered experiments at /listExperiments" +CREATE_EXP_BULK_ERROR_MSG = "At present, the system does not support bulk entries!" UPDATE_RECOMMENDATIONS_MANDATORY_DEFAULT_MESSAGE = 'experiment_name is mandatory' UPDATE_RECOMMENDATIONS_MANDATORY_INTERVAL_END_DATE = 'interval_end_time is mandatory' UPDATE_RECOMMENDATIONS_DATA_NOT_FOUND = 'Data not found!' diff --git a/tests/scripts/remote_monitoring_tests/rest_apis/test_create_experiment.py b/tests/scripts/remote_monitoring_tests/rest_apis/test_create_experiment.py index 07c3ec41a..5df144c5f 100644 --- a/tests/scripts/remote_monitoring_tests/rest_apis/test_create_experiment.py +++ b/tests/scripts/remote_monitoring_tests/rest_apis/test_create_experiment.py @@ -1,38 +1,42 @@ -import requests import pytest -from jinja2 import Environment, FileSystemLoader -from helpers.utils import * -from helpers.kruize import * from helpers.fixtures import * +from helpers.kruize import * +from helpers.utils import * +from jinja2 import Environment, FileSystemLoader mandatory_fields = [ - ("version", ERROR_STATUS_CODE, ERROR_STATUS), - ("cluster_name", ERROR_STATUS_CODE, ERROR_STATUS), - ("experiment_name", ERROR_STATUS_CODE, ERROR_STATUS), - ("mode", ERROR_STATUS_CODE, ERROR_STATUS), - ("target_cluster", ERROR_STATUS_CODE, ERROR_STATUS), - ("kubernetes_objects", ERROR_STATUS_CODE, ERROR_STATUS), - ("type", ERROR_STATUS_CODE, ERROR_STATUS), - ("kubernetes_objects_name", ERROR_STATUS_CODE, ERROR_STATUS), - ("namespace", ERROR_STATUS_CODE, ERROR_STATUS), - ("containers", ERROR_STATUS_CODE, ERROR_STATUS), - ("container_image_name", ERROR_STATUS_CODE, ERROR_STATUS), - ("container_name", ERROR_STATUS_CODE, ERROR_STATUS), - ("selector", SUCCESS_STATUS_CODE, SUCCESS_STATUS), - ("namespace", ERROR_STATUS_CODE, ERROR_STATUS), - ("performance_profile", ERROR_STATUS_CODE, ERROR_STATUS), - ("slo", SUCCESS_STATUS_CODE, SUCCESS_STATUS), - ("recommendation_settings", ERROR_STATUS_CODE, ERROR_STATUS), - ("trial_settings", ERROR_STATUS_CODE, ERROR_STATUS), - ("kubernetes_objects_name_selector", ERROR_STATUS_CODE, ERROR_STATUS), - ("performance_profile_slo", ERROR_STATUS_CODE, ERROR_STATUS) + ("version", ERROR_STATUS_CODE, ERROR_STATUS), + ("cluster_name", ERROR_STATUS_CODE, ERROR_STATUS), + ("experiment_name", ERROR_STATUS_CODE, ERROR_STATUS), + ("mode", ERROR_STATUS_CODE, ERROR_STATUS), + ("target_cluster", ERROR_STATUS_CODE, ERROR_STATUS), + ("kubernetes_objects", ERROR_STATUS_CODE, ERROR_STATUS), + ("type", ERROR_STATUS_CODE, ERROR_STATUS), + ("kubernetes_objects_name", ERROR_STATUS_CODE, ERROR_STATUS), + ("namespace", ERROR_STATUS_CODE, ERROR_STATUS), + ("containers", ERROR_STATUS_CODE, ERROR_STATUS), + ("container_image_name", ERROR_STATUS_CODE, ERROR_STATUS), + ("container_name", ERROR_STATUS_CODE, ERROR_STATUS), + ("selector", SUCCESS_STATUS_CODE, SUCCESS_STATUS), + ("namespace", ERROR_STATUS_CODE, ERROR_STATUS), + ("performance_profile", ERROR_STATUS_CODE, ERROR_STATUS), + ("slo", SUCCESS_STATUS_CODE, SUCCESS_STATUS), + ("recommendation_settings", ERROR_STATUS_CODE, ERROR_STATUS), + ("trial_settings", ERROR_STATUS_CODE, ERROR_STATUS), + ("kubernetes_objects_name_selector", ERROR_STATUS_CODE, ERROR_STATUS), + ("performance_profile_slo", ERROR_STATUS_CODE, ERROR_STATUS) ] csvfile = "/tmp/create_exp_test_data.csv" + @pytest.mark.negative -@pytest.mark.parametrize("test_name, expected_status_code, version, experiment_name, cluster_name, performance_profile, mode, target_cluster, kubernetes_obj_type, name, namespace, container_image_name, container_name, measurement_duration, threshold", generate_test_data(csvfile, create_exp_test_data)) -def test_create_exp_invalid_tests(test_name, expected_status_code, version, experiment_name, cluster_name, performance_profile, mode, target_cluster, kubernetes_obj_type, name, namespace, container_image_name, container_name, measurement_duration, threshold, cluster_type): +@pytest.mark.parametrize( + "test_name, expected_status_code, version, experiment_name, cluster_name, performance_profile, mode, target_cluster, kubernetes_obj_type, name, namespace, container_image_name, container_name, measurement_duration, threshold", + generate_test_data(csvfile, create_exp_test_data)) +def test_create_exp_invalid_tests(test_name, expected_status_code, version, experiment_name, cluster_name, + performance_profile, mode, target_cluster, kubernetes_obj_type, name, namespace, + container_image_name, container_name, measurement_duration, threshold, cluster_type): """ Test Description: This test validates the response status code of createExperiment API against invalid input (blank, null, empty) for the json parameters. @@ -40,7 +44,7 @@ def test_create_exp_invalid_tests(test_name, expected_status_code, version, expe print("\n****************************************************") print("Test - ", test_name) print("****************************************************\n") - tmp_json_file="/tmp/create_exp_" + test_name + ".json" + tmp_json_file = "/tmp/create_exp_" + test_name + ".json" print("tmp_json_file = ", tmp_json_file) @@ -87,13 +91,14 @@ def test_create_exp_invalid_tests(test_name, expected_status_code, version, expe response = delete_experiment(tmp_json_file) print("delete exp = ", response.status_code) + @pytest.mark.sanity def test_create_exp(cluster_type): """ Test Description: This test validates the response status code of createExperiment API by passing a valid input for the json """ - input_json_file="../json_files/create_exp.json" + input_json_file = "../json_files/create_exp.json" form_kruize_url(cluster_type) response = delete_experiment(input_json_file) @@ -112,14 +117,16 @@ def test_create_exp(cluster_type): response = delete_experiment(input_json_file) print("delete exp = ", response.status_code) + @pytest.mark.sanity -@pytest.mark.parametrize("k8s_obj_type", ["deployment", "deploymentConfig", "statefulset", "daemonset", "replicaset", "replicationController"]) +@pytest.mark.parametrize("k8s_obj_type", ["deployment", "deploymentConfig", "statefulset", "daemonset", "replicaset", + "replicationController"]) def test_create_exp_for_supported_k8s_obj_type(k8s_obj_type, cluster_type): """ Test Description: This test validates the response status code of createExperiment API by passing a valid json with supported kuberenetes object type """ - input_json_file="../json_files/create_exp.json" + input_json_file = "../json_files/create_exp.json" form_kruize_url(cluster_type) json_data = read_json_data_from_file(input_json_file) @@ -144,13 +151,14 @@ def test_create_exp_for_supported_k8s_obj_type(k8s_obj_type, cluster_type): response = delete_experiment(json_file) print("delete exp = ", response.status_code) + @pytest.mark.sanity def test_create_duplicate_exp(cluster_type): """ Test Description: This test validates the response status code of createExperiment API by specifying the same experiment name """ - input_json_file="../json_files/create_exp.json" + input_json_file = "../json_files/create_exp.json" json_data = json.load(open(input_json_file)) experiment_name = json_data[0]['experiment_name'] @@ -184,6 +192,7 @@ def test_create_duplicate_exp(cluster_type): response = delete_experiment(input_json_file) print("delete exp = ", response.status_code) + @pytest.mark.sanity def test_create_multiple_exps_from_same_json_file(cluster_type): """ @@ -191,7 +200,7 @@ def test_create_multiple_exps_from_same_json_file(cluster_type): multiple experiments in the same json file. This test also validates the behaviour with multiple containers with different container images & container names """ - input_json_file="../json_files/create_multiple_exps.json" + input_json_file = "../json_files/create_multiple_exps.json" form_kruize_url(cluster_type) @@ -204,20 +213,21 @@ def test_create_multiple_exps_from_same_json_file(cluster_type): data = response.json() print(data['message']) - assert response.status_code == SUCCESS_STATUS_CODE - assert data['status'] == SUCCESS_STATUS - assert data['message'] == CREATE_EXP_SUCCESS_MSG + assert response.status_code == ERROR_STATUS_CODE + assert data['status'] == ERROR_STATUS + assert data['message'] == CREATE_EXP_BULK_ERROR_MSG response = delete_experiment(input_json_file) print("delete exp = ", response.status_code) + @pytest.mark.sanity def test_create_multiple_exps_from_diff_json_files(cluster_type): """ Test Description: This test validates the creation of multiple experiments using different json files """ - input_json_file="../json_files/create_exp.json" + input_json_file = "../json_files/create_exp.json" find = [] json_data = json.load(open(input_json_file)) @@ -248,13 +258,14 @@ def test_create_multiple_exps_from_diff_json_files(cluster_type): response = delete_experiment(json_file) print("delete exp = ", response.status_code) + @pytest.mark.sanity def test_create_multiple_exps_with_same_deployment_namespace(cluster_type): """ Test Description: This test validates the creation of multiple experiments using same deployment & namespace """ - input_json_file="../json_files/create_exp.json" + input_json_file = "../json_files/create_exp.json" find = [] json_data = json.load(open(input_json_file)) @@ -263,7 +274,7 @@ def test_create_multiple_exps_with_same_deployment_namespace(cluster_type): form_kruize_url(cluster_type) # Create experiment using the specified json - num_exps = 5 + num_exps = 5 for i in range(num_exps): json_file = "/tmp/create_exp.json" generate_json(find, input_json_file, json_file, i) @@ -277,7 +288,7 @@ def test_create_multiple_exps_with_same_deployment_namespace(cluster_type): assert data['status'] == SUCCESS_STATUS assert data['message'] == CREATE_EXP_SUCCESS_MSG - num_exps = 5 + num_exps = 5 for i in range(num_exps): json_file = "/tmp/create_exp.json" generate_json(find, input_json_file, json_file, i) @@ -285,13 +296,14 @@ def test_create_multiple_exps_with_same_deployment_namespace(cluster_type): response = delete_experiment(json_file) print("delete exp = ", response.status_code) + @pytest.mark.sanity def test_create_exp_with_both_performance_profile_slo(cluster_type): """ Test Description: This test validates the creation of an experiment by specifying both performance profile & slo """ - input_json_file="../json_files/perf_profile_slo.json" + input_json_file = "../json_files/perf_profile_slo.json" form_kruize_url(cluster_type) @@ -309,13 +321,14 @@ def test_create_exp_with_both_performance_profile_slo(cluster_type): response = delete_experiment(input_json_file) print("delete exp = ", response.status_code) + @pytest.mark.sanity def test_create_exp_with_both_deployment_name_selector(cluster_type): """ Test Description: This test validates the creation of an experiment by specifying both deployment name & selector """ - input_json_file="../json_files/deployment_name_selector.json" + input_json_file = "../json_files/deployment_name_selector.json" form_kruize_url(cluster_type) @@ -333,13 +346,14 @@ def test_create_exp_with_both_deployment_name_selector(cluster_type): response = delete_experiment(input_json_file) print("delete exp = ", response.status_code) + @pytest.mark.sanity def test_create_exp_with_invalid_header(cluster_type): """ Test Description: This test validates the creation of an experiment by specifying invalid content type in the header """ - input_json_file="../json_files/create_exp.json" + input_json_file = "../json_files/create_exp.json" form_kruize_url(cluster_type) @@ -347,7 +361,7 @@ def test_create_exp_with_invalid_header(cluster_type): print("delete exp = ", response.status_code) # Create experiment using the specified json - response = create_experiment(input_json_file, invalid_header = True) + response = create_experiment(input_json_file, invalid_header=True) data = response.json() print(data['message']) @@ -359,15 +373,15 @@ def test_create_exp_with_invalid_header(cluster_type): response = delete_experiment(input_json_file) print("delete exp = ", response.status_code) + @pytest.mark.extended @pytest.mark.parametrize("field, expected_status_code, expected_status", mandatory_fields) def test_create_exp_mandatory_fields(cluster_type, field, expected_status_code, expected_status): - form_kruize_url(cluster_type) # Create experiment using the specified json json_file = "/tmp/create_exp.json" - input_json_file="../json_files/create_exp_mandatory.json" + input_json_file = "../json_files/create_exp_mandatory.json" json_data = json.load(open(input_json_file)) if field == "performance_profile_slo": diff --git a/tests/scripts/remote_monitoring_tests/rest_apis/test_e2e_workflow.py b/tests/scripts/remote_monitoring_tests/rest_apis/test_e2e_workflow.py index f964bddd7..98dcf0089 100644 --- a/tests/scripts/remote_monitoring_tests/rest_apis/test_e2e_workflow.py +++ b/tests/scripts/remote_monitoring_tests/rest_apis/test_e2e_workflow.py @@ -1,3 +1,4 @@ +import copy import json import pytest @@ -70,36 +71,53 @@ def test_list_recommendations_multiple_exps_from_diff_json_files(cluster_type): write_json_data_to_file(update_results_json_file, result_json) result_json_arr.append(result_json[0]) - response = update_results(update_results_json_file) + # Define the batch size + batch_size = 96 + # Loop to fetch elements in batches + current_index = 0 + while current_index < len(result_json_arr): + print(f"{current_index} -- {len(result_json_arr)}") + # Get the current batch + batch = result_json_arr[current_index:current_index + batch_size] + batch_deep_copy = copy.deepcopy(batch) + file_path = '/tmp/result_%s_to_%s.json' % (current_index, batch_size) + with open(file_path, 'w') as json_file: + json.dump(batch, json_file) + response = update_results(file_path) data = response.json() - print("message = ", data['message']) assert response.status_code == SUCCESS_STATUS_CODE assert data['status'] == SUCCESS_STATUS assert data['message'] == UPDATE_RESULTS_SUCCESS_MSG - - # Expecting that we have recommendations - if j > 96: - response = update_recommendations(experiment_name, interval_start_time, end_time) - data = response.json() - assert response.status_code == SUCCESS_STATUS_CODE - response = list_recommendations(experiment_name) - if response.status_code == SUCCESS_200_STATUS_CODE: - recommendation_json = response.json() - recommendation_section = recommendation_json[0]["kubernetes_objects"][0]["containers"][0][ - "recommendations"] - high_level_notifications = recommendation_section["notifications"] - # Check if duration - assert INFO_DURATION_BASED_RECOMMENDATIONS_AVAILABLE_CODE in high_level_notifications - data_section = recommendation_section["data"] - short_term_recommendation = data_section[str(end_time)]["duration_based"]["short_term"] - short_term_notifications = short_term_recommendation["notifications"] - for notification in short_term_notifications.values(): - assert notification["type"] != "error" - - response = update_recommendations(experiment_name, interval_start_time, end_time) - data = response.json() - assert response.status_code == SUCCESS_STATUS_CODE + # Update the current index for the next batch + current_index += batch_size + # Convert UTC strings to datetime objects + for item in batch_deep_copy: + item['interval_start_time'] = datetime.strptime(item['interval_start_time'], "%Y-%m-%dT%H:%M:%S.%fZ") + item['interval_end_time'] = datetime.strptime(item['interval_end_time'], "%Y-%m-%dT%H:%M:%S.%fZ") + end_time = max(batch_deep_copy, key=lambda x: x['interval_end_time'])['interval_end_time'] + start_time = min(batch_deep_copy, key=lambda x: x['interval_start_time'])['interval_start_time'] + response = update_recommendations(experiment_name, start_time.strftime("%Y-%m-%dT%H:%M:%S.%fZ")[:-4] + "Z", + end_time.strftime("%Y-%m-%dT%H:%M:%S.%fZ")[:-4] + "Z") + data = response.json() + assert response.status_code == SUCCESS_STATUS_CODE + assert data[0]['experiment_name'] == experiment_name + assert data[0]['kubernetes_objects'][0]['containers'][0]['recommendations']['notifications']['112101'][ + 'message'] == 'Duration Based Recommendations Available' + response = list_recommendations(experiment_name) + if response.status_code == SUCCESS_200_STATUS_CODE: + recommendation_json = response.json() + recommendation_section = recommendation_json[0]["kubernetes_objects"][0]["containers"][0][ + "recommendations"] + high_level_notifications = recommendation_section["notifications"] + # Check if duration + assert INFO_DURATION_BASED_RECOMMENDATIONS_AVAILABLE_CODE in high_level_notifications + data_section = recommendation_section["data"] + short_term_recommendation = \ + data_section[end_time.strftime("%Y-%m-%dT%H:%M:%S.%fZ")[:-4] + "Z"]["duration_based"]["short_term"] + short_term_notifications = short_term_recommendation["notifications"] + for notification in short_term_notifications.values(): + assert notification["type"] != "error" # Invoke list recommendations for the specified experiment response = list_recommendations(experiment_name) diff --git a/tests/scripts/remote_monitoring_tests/rest_apis/test_list_recommendations.py b/tests/scripts/remote_monitoring_tests/rest_apis/test_list_recommendations.py index 8af6b08e8..d8ffd7f90 100644 --- a/tests/scripts/remote_monitoring_tests/rest_apis/test_list_recommendations.py +++ b/tests/scripts/remote_monitoring_tests/rest_apis/test_list_recommendations.py @@ -1,6 +1,5 @@ import datetime import json -import time import pytest from helpers.fixtures import * @@ -51,8 +50,6 @@ def test_list_recommendations_single_result(cluster_type): assert data[0]['kubernetes_objects'][0]['containers'][0]['recommendations']['notifications']['120001'][ 'message'] == 'There is not enough data available to generate a recommendation.' - time.sleep(1) - # Get the experiment name json_data = json.load(open(input_json_file)) experiment_name = json_data[0]['experiment_name'] @@ -103,8 +100,6 @@ def test_list_recommendations_without_parameters(cluster_type): assert data['status'] == SUCCESS_STATUS assert data['message'] == UPDATE_RESULTS_SUCCESS_MSG - time.sleep(1) - start_time = '2023-04-13T22:59:20.982Z' end_time = '2023-04-14T23:59:20.982Z' # Get the experiment name @@ -205,8 +200,6 @@ def test_list_recommendations_without_results(cluster_type): assert data['status'] == SUCCESS_STATUS assert data['message'] == CREATE_EXP_SUCCESS_MSG - time.sleep(1) - # Get the experiment name json_data = json.load(open(input_json_file)) experiment_name = json_data[0]['experiment_name'] @@ -256,8 +249,6 @@ def test_list_recommendations_single_exp_multiple_results(cluster_type): assert data['status'] == SUCCESS_STATUS assert data['message'] == UPDATE_RESULTS_SUCCESS_MSG - time.sleep(1) - # Get the experiment name json_data = json.load(open(input_json_file)) experiment_name = json_data[0]['experiment_name'] @@ -425,8 +416,6 @@ def test_list_recommendations_exp_name_and_latest(latest, cluster_type): assert data['status'] == SUCCESS_STATUS assert data['message'] == UPDATE_RESULTS_SUCCESS_MSG - time.sleep(1) - # update Recommendations with open(result_json_file, 'r') as file: data = json.load(file) @@ -440,11 +429,17 @@ def test_list_recommendations_exp_name_and_latest(latest, cluster_type): start_time = min(data, key=lambda x: x['interval_start_time'])['interval_start_time'] end_time = max(data, key=lambda x: x['interval_end_time'])['interval_end_time'] + sorted_data = sorted(data, key=lambda x: x['interval_end_time'], reverse=True) + top_5_records = sorted_data[:5] + top_5_dates = [ + item['interval_start_time'] for item in sorted_data[:5] + ] + print(f"{top_5_dates}") # Get the experiment name json_data = json.load(open(input_json_file)) experiment_name = json_data[0]['experiment_name'] - response = update_recommendations(experiment_name, start_time.strftime("%Y-%m-%dT%H:%M:%S.%fZ")[:-4] + "Z", + response = update_recommendations(experiment_name, top_5_dates[4].strftime("%Y-%m-%dT%H:%M:%S.%fZ")[:-4] + "Z", end_time.strftime("%Y-%m-%dT%H:%M:%S.%fZ")[:-4] + "Z") data = response.json() assert response.status_code == SUCCESS_STATUS_CODE @@ -575,7 +570,6 @@ def test_list_recommendations_exp_name_and_monitoring_end_time(test_name, monito assert response.status_code == SUCCESS_STATUS_CODE assert data['status'] == SUCCESS_STATUS assert data['message'] == UPDATE_RESULTS_SUCCESS_MSG - time.sleep(1) # update Recommendations with open(result_json_file, 'r') as file: data = json.load(file) @@ -689,9 +683,6 @@ def test_list_recommendations_multiple_exps_with_missing_metrics(cluster_type): assert response.status_code == SUCCESS_STATUS_CODE assert data['status'] == SUCCESS_STATUS assert data['message'] == UPDATE_RESULTS_SUCCESS_MSG - - time.sleep(1) - # Get the experiment name json_data = json.load(open(create_exp_json_file)) experiment_name = json_data[0]['experiment_name'] @@ -1040,7 +1031,6 @@ def test_list_recommendations_notification_codes(cluster_type: str): assert data['message'] == UPDATE_RESULTS_SUCCESS_MSG if j > 95: - time.sleep(1) response = update_recommendations(experiment_name, interval_start_time, end_time) data = response.json() assert response.status_code == SUCCESS_STATUS_CODE diff --git a/tests/scripts/remote_monitoring_tests/rest_apis/test_update_results.py b/tests/scripts/remote_monitoring_tests/rest_apis/test_update_results.py index d5090fe16..d37190109 100644 --- a/tests/scripts/remote_monitoring_tests/rest_apis/test_update_results.py +++ b/tests/scripts/remote_monitoring_tests/rest_apis/test_update_results.py @@ -1,31 +1,42 @@ -import requests import pytest -from jinja2 import Environment, FileSystemLoader -from helpers.utils import * -from helpers.kruize import * from helpers.fixtures import * -import time +from helpers.kruize import * +from helpers.utils import * +from jinja2 import Environment, FileSystemLoader csvfile = "/tmp/update_results_test_data.csv" # Interval end time that is acceptable is measurement_duration + or - 30s # interval_start_time - "2022-01-23T18:25:43.511Z" interval_end_times = [ - ("invalid_zero_diff", "2022-01-23T18:25:43.511Z"), - ("invalid_minus_more_than_30s", "2022-01-23T18:40:12.511Z"), - ("valid_minus_30s", "2022-01-23T18:40:13.511Z"), - ("invalid_plus_more_than_30s", "2022-01-23T18:41:14.511Z"), - ("valid_plus_30s", "2022-01-23T18:41:13.511Z") + ("invalid_zero_diff", "2022-01-23T18:25:43.511Z"), + ("invalid_minus_more_than_30s", "2022-01-23T18:40:12.511Z"), + ("valid_minus_30s", "2022-01-23T18:40:13.511Z"), + ("invalid_plus_more_than_30s", "2022-01-23T18:41:14.511Z"), + ("valid_plus_30s", "2022-01-23T18:41:13.511Z") ] -@pytest.mark.negative -@pytest.mark.parametrize("test_name, expected_status_code, version, experiment_name, interval_start_time, interval_end_time, kubernetes_obj_type, name, namespace, container_image_name, container_name, cpuRequest_name, cpuRequest_sum, cpuRequest_avg, cpuRequest_format, cpuLimit_name, cpuLimit_sum, cpuLimit_avg, cpuLimit_format, cpuUsage_name, cpuUsage_sum, cpuUsage_max, cpuUsage_avg, cpuUsage_min, cpuUsage_format, cpuThrottle_name, cpuThrottle_sum, cpuThrottle_max, cpuThrottle_avg, cpuThrottle_format, memoryRequest_name, memoryRequest_sum, memoryRequest_avg, memoryRequest_format, memoryLimit_name, memoryLimit_sum, memoryLimit_avg, memoryLimit_format, memoryUsage_name, memoryUsage_sum, memoryUsage_max, memoryUsage_avg, memoryUsage_min, memoryUsage_format, memoryRSS_name, memoryRSS_sum, memoryRSS_max, memoryRSS_avg, memoryRSS_min, memoryRSS_format", generate_test_data(csvfile, update_results_test_data)) -def test_update_results_invalid_tests(test_name, expected_status_code, version, experiment_name, interval_start_time, interval_end_time, kubernetes_obj_type, name, namespace, container_image_name, container_name, cpuRequest_name, cpuRequest_sum, cpuRequest_avg, cpuRequest_format, cpuLimit_name, cpuLimit_sum, cpuLimit_avg, cpuLimit_format, cpuUsage_name, cpuUsage_sum, cpuUsage_max, cpuUsage_avg, cpuUsage_min, cpuUsage_format, cpuThrottle_name, cpuThrottle_sum, cpuThrottle_max, cpuThrottle_avg, cpuThrottle_format, memoryRequest_name, memoryRequest_sum, memoryRequest_avg, memoryRequest_format, memoryLimit_name, memoryLimit_sum, memoryLimit_avg, memoryLimit_format, memoryUsage_name, memoryUsage_sum, memoryUsage_max, memoryUsage_avg, memoryUsage_min, memoryUsage_format, memoryRSS_name, memoryRSS_sum, memoryRSS_max, memoryRSS_avg, memoryRSS_min, memoryRSS_format, cluster_type): +@pytest.mark.negative +@pytest.mark.parametrize( + "test_name, expected_status_code, version, experiment_name, interval_start_time, interval_end_time, kubernetes_obj_type, name, namespace, container_image_name, container_name, cpuRequest_name, cpuRequest_sum, cpuRequest_avg, cpuRequest_format, cpuLimit_name, cpuLimit_sum, cpuLimit_avg, cpuLimit_format, cpuUsage_name, cpuUsage_sum, cpuUsage_max, cpuUsage_avg, cpuUsage_min, cpuUsage_format, cpuThrottle_name, cpuThrottle_sum, cpuThrottle_max, cpuThrottle_avg, cpuThrottle_format, memoryRequest_name, memoryRequest_sum, memoryRequest_avg, memoryRequest_format, memoryLimit_name, memoryLimit_sum, memoryLimit_avg, memoryLimit_format, memoryUsage_name, memoryUsage_sum, memoryUsage_max, memoryUsage_avg, memoryUsage_min, memoryUsage_format, memoryRSS_name, memoryRSS_sum, memoryRSS_max, memoryRSS_avg, memoryRSS_min, memoryRSS_format", + generate_test_data(csvfile, update_results_test_data)) +def test_update_results_invalid_tests(test_name, expected_status_code, version, experiment_name, interval_start_time, + interval_end_time, kubernetes_obj_type, name, namespace, container_image_name, + container_name, cpuRequest_name, cpuRequest_sum, cpuRequest_avg, + cpuRequest_format, cpuLimit_name, cpuLimit_sum, cpuLimit_avg, cpuLimit_format, + cpuUsage_name, cpuUsage_sum, cpuUsage_max, cpuUsage_avg, cpuUsage_min, + cpuUsage_format, cpuThrottle_name, cpuThrottle_sum, cpuThrottle_max, + cpuThrottle_avg, cpuThrottle_format, memoryRequest_name, memoryRequest_sum, + memoryRequest_avg, memoryRequest_format, memoryLimit_name, memoryLimit_sum, + memoryLimit_avg, memoryLimit_format, memoryUsage_name, memoryUsage_sum, + memoryUsage_max, memoryUsage_avg, memoryUsage_min, memoryUsage_format, + memoryRSS_name, memoryRSS_sum, memoryRSS_max, memoryRSS_avg, memoryRSS_min, + memoryRSS_format, cluster_type): print("\n*******************************************************") print("Test - ", test_name) print("*******************************************************\n") - input_json_file="../json_files/create_exp.json" + input_json_file = "../json_files/create_exp.json" form_kruize_url(cluster_type) @@ -42,8 +53,8 @@ def test_update_results_invalid_tests(test_name, expected_status_code, version, assert data['message'] == CREATE_EXP_SUCCESS_MSG # Create experiment using the specified json - result_json_file="../json_files/update_results_template.json" - tmp_json_file="/tmp/update_results_" + test_name + ".json" + result_json_file = "../json_files/update_results_template.json" + tmp_json_file = "/tmp/update_results_" + test_name + ".json" environment = Environment(loader=FileSystemLoader("../json_files/")) template = environment.get_template("update_results_template.json") @@ -59,54 +70,54 @@ def test_update_results_invalid_tests(test_name, expected_status_code, version, filename = f"/tmp/update_results_{test_name}.json" content = template.render( - version = version, - experiment_name = experiment_name, - interval_start_time = interval_start_time, - interval_end_time = interval_end_time, - kubernetes_obj_type = kubernetes_obj_type, - name = name, - namespace = namespace, - container_image_name = container_image_name, - container_name = container_name, - cpuRequest_name = cpuRequest_name, - cpuRequest_sum = cpuRequest_sum, - cpuRequest_avg = cpuRequest_avg, - cpuRequest_format = cpuRequest_format, - cpuLimit_name = cpuLimit_name, - cpuLimit_sum = cpuLimit_sum, - cpuLimit_avg = cpuLimit_avg, - cpuLimit_format = cpuLimit_format, - cpuUsage_name = cpuUsage_name, - cpuUsage_sum = cpuUsage_sum, - cpuUsage_max = cpuUsage_max, - cpuUsage_avg = cpuUsage_avg, - cpuUsage_min = cpuUsage_min, - cpuUsage_format = cpuUsage_format, - cpuThrottle_name = cpuThrottle_name, - cpuThrottle_sum = cpuThrottle_sum, - cpuThrottle_max = cpuThrottle_max, - cpuThrottle_avg = cpuThrottle_avg, - cpuThrottle_format = cpuThrottle_format, - memoryRequest_name = memoryRequest_name, - memoryRequest_sum = memoryRequest_sum, - memoryRequest_avg = memoryRequest_avg, - memoryRequest_format = memoryRequest_format, - memoryLimit_name = memoryLimit_name, - memoryLimit_sum = memoryLimit_sum, - memoryLimit_avg = memoryLimit_avg, - memoryLimit_format = memoryLimit_format, - memoryUsage_name = memoryUsage_name, - memoryUsage_sum = memoryUsage_sum, - memoryUsage_max = memoryUsage_max, - memoryUsage_avg = memoryUsage_avg, - memoryUsage_min = memoryUsage_min, - memoryUsage_format = memoryUsage_format, - memoryRSS_name = memoryRSS_name, - memoryRSS_sum = memoryRSS_sum, - memoryRSS_max = memoryRSS_max, - memoryRSS_avg = memoryRSS_avg, - memoryRSS_min = memoryRSS_min, - memoryRSS_format = memoryRSS_format + version=version, + experiment_name=experiment_name, + interval_start_time=interval_start_time, + interval_end_time=interval_end_time, + kubernetes_obj_type=kubernetes_obj_type, + name=name, + namespace=namespace, + container_image_name=container_image_name, + container_name=container_name, + cpuRequest_name=cpuRequest_name, + cpuRequest_sum=cpuRequest_sum, + cpuRequest_avg=cpuRequest_avg, + cpuRequest_format=cpuRequest_format, + cpuLimit_name=cpuLimit_name, + cpuLimit_sum=cpuLimit_sum, + cpuLimit_avg=cpuLimit_avg, + cpuLimit_format=cpuLimit_format, + cpuUsage_name=cpuUsage_name, + cpuUsage_sum=cpuUsage_sum, + cpuUsage_max=cpuUsage_max, + cpuUsage_avg=cpuUsage_avg, + cpuUsage_min=cpuUsage_min, + cpuUsage_format=cpuUsage_format, + cpuThrottle_name=cpuThrottle_name, + cpuThrottle_sum=cpuThrottle_sum, + cpuThrottle_max=cpuThrottle_max, + cpuThrottle_avg=cpuThrottle_avg, + cpuThrottle_format=cpuThrottle_format, + memoryRequest_name=memoryRequest_name, + memoryRequest_sum=memoryRequest_sum, + memoryRequest_avg=memoryRequest_avg, + memoryRequest_format=memoryRequest_format, + memoryLimit_name=memoryLimit_name, + memoryLimit_sum=memoryLimit_sum, + memoryLimit_avg=memoryLimit_avg, + memoryLimit_format=memoryLimit_format, + memoryUsage_name=memoryUsage_name, + memoryUsage_sum=memoryUsage_sum, + memoryUsage_max=memoryUsage_max, + memoryUsage_avg=memoryUsage_avg, + memoryUsage_min=memoryUsage_min, + memoryUsage_format=memoryUsage_format, + memoryRSS_name=memoryRSS_name, + memoryRSS_sum=memoryRSS_sum, + memoryRSS_max=memoryRSS_max, + memoryRSS_avg=memoryRSS_avg, + memoryRSS_min=memoryRSS_min, + memoryRSS_format=memoryRSS_format ) with open(filename, mode="w", encoding="utf-8") as message: message.write(content) @@ -120,12 +131,13 @@ def test_update_results_invalid_tests(test_name, expected_status_code, version, response = delete_experiment(input_json_file) print("delete exp = ", response.status_code) + @pytest.mark.sanity def test_update_valid_results_after_create_exp(cluster_type): """ Test Description: This test validates update results for a valid experiment """ - input_json_file="../json_files/create_exp.json" + input_json_file = "../json_files/create_exp.json" form_kruize_url(cluster_type) response = delete_experiment(input_json_file) @@ -140,7 +152,7 @@ def test_update_valid_results_after_create_exp(cluster_type): assert data['message'] == CREATE_EXP_SUCCESS_MSG # Update results for the experiment - result_json_file="../json_files/update_results.json" + result_json_file = "../json_files/update_results.json" response = update_results(result_json_file) data = response.json() @@ -151,12 +163,13 @@ def test_update_valid_results_after_create_exp(cluster_type): response = delete_experiment(input_json_file) print("delete exp = ", response.status_code) + @pytest.mark.sanity def test_update_multiple_valid_results_single_json_after_create_exp(cluster_type): """ Test Description: This test validates update results for a valid experiment by posting multiple results """ - input_json_file="../json_files/create_exp.json" + input_json_file = "../json_files/create_exp.json" form_kruize_url(cluster_type) response = delete_experiment(input_json_file) @@ -171,7 +184,7 @@ def test_update_multiple_valid_results_single_json_after_create_exp(cluster_type assert data['message'] == CREATE_EXP_SUCCESS_MSG # Update results for the experiment - result_json_file="../json_files/multiple_results_single_exp.json" + result_json_file = "../json_files/multiple_results_single_exp.json" response = update_results(result_json_file) data = response.json() @@ -182,12 +195,13 @@ def test_update_multiple_valid_results_single_json_after_create_exp(cluster_type response = delete_experiment(input_json_file) print("delete exp = ", response.status_code) + @pytest.mark.sanity def test_update_multiple_valid_results_after_create_exp(cluster_type): """ Test Description: This test validates update results for a valid experiment """ - input_json_file="../json_files/create_exp.json" + input_json_file = "../json_files/create_exp.json" form_kruize_url(cluster_type) response = delete_experiment(input_json_file) @@ -205,8 +219,8 @@ def test_update_multiple_valid_results_after_create_exp(cluster_type): find_start_ts = "2022-01-23T18:25:43.511Z" find_end_ts = "2022-01-23T18:40:43.570Z" - result_json_file="../json_files/update_results.json" - filename="/tmp/result.json" + result_json_file = "../json_files/update_results.json" + filename = "/tmp/result.json" for i in range(num_res): with open(result_json_file, 'r') as file: @@ -246,7 +260,7 @@ def test_update_results_multiple_exps_from_same_json_file(cluster_type): Test Description: This test validates the response status code of updateResults API by posting results of multiple experiments in the same json file. """ - input_json_file="../json_files/create_multiple_exps.json" + input_json_file = "../json_files/create_multiple_exps.json" form_kruize_url(cluster_type) @@ -258,31 +272,32 @@ def test_update_results_multiple_exps_from_same_json_file(cluster_type): data = response.json() print("message = ", data['message']) - assert response.status_code == SUCCESS_STATUS_CODE - assert data['status'] == SUCCESS_STATUS - assert data['message'] == CREATE_EXP_SUCCESS_MSG + assert response.status_code == ERROR_STATUS_CODE + assert data['status'] == ERROR_STATUS + assert data['message'] == CREATE_EXP_BULK_ERROR_MSG # Update results for the experiment - result_json_file="../json_files/multiple_exps_results.json" + result_json_file = "../json_files/multiple_exps_results.json" response = update_results(result_json_file) data = response.json() print("message = ", data['message']) - assert response.status_code == SUCCESS_STATUS_CODE - assert data['status'] == SUCCESS_STATUS - assert data['message'] == UPDATE_RESULTS_SUCCESS_MSG + assert response.status_code == ERROR_STATUS_CODE + assert data['status'] == ERROR_STATUS + assert data['message'] == 'Out of a total of 3 records, 3 failed to save' response = delete_experiment(input_json_file) print("delete exp = ", response.status_code) + @pytest.mark.sanity def test_update_results_multiple_exps_multiple_containers_from_same_json_file(cluster_type): """ Test Description: This test validates the response status code of updateResults API by posting results of multiple experiments with multiple containers in the same json file. """ - input_json_file="../json_files/create_multiple_exps_multiple_containers.json" + input_json_file = "../json_files/create_multiple_exps_multiple_containers.json" form_kruize_url(cluster_type) @@ -294,31 +309,32 @@ def test_update_results_multiple_exps_multiple_containers_from_same_json_file(cl data = response.json() print("message = ", data['message']) - assert response.status_code == SUCCESS_STATUS_CODE - assert data['status'] == SUCCESS_STATUS - assert data['message'] == CREATE_EXP_SUCCESS_MSG + assert response.status_code == ERROR_STATUS_CODE + assert data['status'] == ERROR_STATUS + assert data['message'] == CREATE_EXP_BULK_ERROR_MSG # Update results for the experiment - result_json_file="../json_files/multiple_exps_multiple_containers_results.json" + result_json_file = "../json_files/multiple_exps_multiple_containers_results.json" response = update_results(result_json_file) data = response.json() print("message = ", data['message']) - assert response.status_code == SUCCESS_STATUS_CODE - assert data['status'] == SUCCESS_STATUS - assert data['message'] == UPDATE_RESULTS_SUCCESS_MSG + assert response.status_code == ERROR_STATUS_CODE + assert data['status'] == ERROR_STATUS + assert data['message'] == 'Out of a total of 3 records, 3 failed to save' response = delete_experiment(input_json_file) print("delete exp = ", response.status_code) + @pytest.mark.sanity def test_update_results_for_containers_not_present(cluster_type): """ Test Description: This test validates the response status code of updateResults API by posting results of multiple experiments with multiple containers in the same json file. """ - input_json_file="../json_files/create_multiple_exps.json" + input_json_file = "../json_files/create_multiple_exps.json" form_kruize_url(cluster_type) @@ -330,19 +346,20 @@ def test_update_results_for_containers_not_present(cluster_type): data = response.json() print("message = ", data['message']) - assert response.status_code == SUCCESS_STATUS_CODE - assert data['status'] == SUCCESS_STATUS - assert data['message'] == CREATE_EXP_SUCCESS_MSG + assert response.status_code == ERROR_STATUS_CODE + assert data['status'] == ERROR_STATUS + assert data['message'] == CREATE_EXP_BULK_ERROR_MSG # Update results for the experiment - result_json_file="../json_files/multiple_exps_multiple_containers_results.json" + result_json_file = "../json_files/multiple_exps_multiple_containers_results.json" response = update_results(result_json_file) data = response.json() print("message = ", data['message']) - assert response.status_code == ERROR_STATUS_CODE assert data['status'] == ERROR_STATUS + assert data['message'] == 'Out of a total of 3 records, 3 failed to save' + @pytest.mark.sanity def test_update_results_multiple_exps_from_diff_json_files(cluster_type): @@ -350,8 +367,8 @@ def test_update_results_multiple_exps_from_diff_json_files(cluster_type): Test Description: This test validates the updation of results for multiple experiments using different json files """ - input_json_file="../json_files/create_exp.json" - result_json_file="../json_files/update_results.json" + input_json_file = "../json_files/create_exp.json" + result_json_file = "../json_files/update_results.json" find = [] json_data = json.load(open(input_json_file)) @@ -393,24 +410,24 @@ def test_update_results_multiple_exps_from_diff_json_files(cluster_type): response = delete_experiment(json_file) print("delete exp = ", response.status_code) -#@pytest.mark.negative + +# @pytest.mark.negative def test_update_valid_results_without_create_exp(cluster_type): """ Test Description: This test validates the behavior of updateResults API by posting results for a non-existing experiment """ - input_json_file="../json_files/create_exp.json" + input_json_file = "../json_files/create_exp.json" json_data = json.load(open(input_json_file)) experiment_name = json_data[0]['experiment_name'] print("experiment_name = ", experiment_name) - form_kruize_url(cluster_type) response = delete_experiment(input_json_file) print("delete exp = ", response.status_code) # Create experiment using the specified json - result_json_file="../json_files/update_results.json" + result_json_file = "../json_files/update_results.json" response = update_results(result_json_file) data = response.json() @@ -421,12 +438,13 @@ def test_update_valid_results_without_create_exp(cluster_type): assert data['status'] == ERROR_STATUS assert data['message'] == EXP_NAME_NOT_FOUND_MSG + @pytest.mark.sanity def test_update_results_with_same_result(cluster_type): """ Test Description: This test validates update results for a valid experiment """ - input_json_file="../json_files/create_exp.json" + input_json_file = "../json_files/create_exp.json" form_kruize_url(cluster_type) response = delete_experiment(input_json_file) @@ -441,7 +459,7 @@ def test_update_results_with_same_result(cluster_type): assert data['message'] == CREATE_EXP_SUCCESS_MSG # Update results for the experiment - result_json_file="../json_files/update_results.json" + result_json_file = "../json_files/update_results.json" response = update_results(result_json_file) data = response.json() @@ -453,7 +471,7 @@ def test_update_results_with_same_result(cluster_type): response = update_results(result_json_file) data = response.json() - assert response.status_code == ERROR_409_STATUS_CODE + assert response.status_code == ERROR_STATUS_CODE assert data['status'] == ERROR_STATUS exp_json_data = read_json_data_from_file(input_json_file) @@ -461,9 +479,15 @@ def test_update_results_with_same_result(cluster_type): result_json_data = read_json_data_from_file(result_json_file) interval_end_time = result_json_data[0]['interval_end_time'] + interval_start_time = result_json_data[0]['interval_start_time'] + + TIMESTAMP_PRESENT_MSG = 'A record with the name %s already exists within the timestamp range starting ' \ + 'from %s and ending on %s.' % (experiment_name, interval_start_time, interval_end_time) - TIMESTAMP_PRESENT_MSG = "Experiment name : " + experiment_name + " already contains result for timestamp : " + interval_end_time - assert data['message'] == TIMESTAMP_PRESENT_MSG + print(TIMESTAMP_PRESENT_MSG) + print(data['data'][0]['errorReasons'][0]) + assert data['message'] == 'Out of a total of 1 records, 1 failed to save' + assert data['data'][0]['errorReasons'][0] == TIMESTAMP_PRESENT_MSG response = delete_experiment(input_json_file) print("delete exp = ", response.status_code) @@ -476,7 +500,7 @@ def test_update_results_with_valid_and_invalid_interval_duration(test_name, inte Test Description: This test validates update results by posting results with interval time difference that is not valid for the given measurement duration """ - input_json_file="../json_files/create_exp.json" + input_json_file = "../json_files/create_exp.json" form_kruize_url(cluster_type) @@ -492,7 +516,7 @@ def test_update_results_with_valid_and_invalid_interval_duration(test_name, inte assert data['message'] == CREATE_EXP_SUCCESS_MSG # Update results for the experiment - result_json_file="../json_files/update_results.json" + result_json_file = "../json_files/update_results.json" json_data = read_json_data_from_file(result_json_file) json_data[0]['interval_end_time'] = interval_end_time @@ -507,11 +531,16 @@ def test_update_results_with_valid_and_invalid_interval_duration(test_name, inte assert response.status_code == SUCCESS_STATUS_CODE assert data['status'] == SUCCESS_STATUS assert data['message'] == UPDATE_RESULTS_SUCCESS_MSG + elif test_name == "invalid_zero_diff": + assert response.status_code == ERROR_STATUS_CODE + assert data['status'] == ERROR_STATUS + assert data['message'] == 'Out of a total of 1 records, 1 failed to save' + assert data['data'][0]['errorReasons'][0] == UPDATE_RESULTS_DATE_PRECEDE_ERROR_MSG else: assert response.status_code == ERROR_STATUS_CODE assert data['status'] == ERROR_STATUS - assert data['message'] == INVALID_INTERVAL_DURATION_MSG + assert data['message'] == 'Out of a total of 1 records, 1 failed to save' + assert data['data'][0]['errorReasons'][0] == INVALID_INTERVAL_DURATION_MSG response = delete_experiment(input_json_file) print("delete exp = ", response.status_code) - From d85e09d2149ef5ffe6fe7954d8240c2e52153470 Mon Sep 17 00:00:00 2001 From: msvinaykumar Date: Wed, 16 Aug 2023 20:51:51 +0530 Subject: [PATCH 10/12] Bulk update results test script Signed-off-by: msvinaykumar --- tests/scripts/bulkScalabilityTest.py | 154 ++++++++ tests/scripts/quickTestScalability.py | 515 ++++++++------------------ 2 files changed, 311 insertions(+), 358 deletions(-) create mode 100644 tests/scripts/bulkScalabilityTest.py diff --git a/tests/scripts/bulkScalabilityTest.py b/tests/scripts/bulkScalabilityTest.py new file mode 100644 index 000000000..00a478175 --- /dev/null +++ b/tests/scripts/bulkScalabilityTest.py @@ -0,0 +1,154 @@ +import argparse +import copy +import datetime +import json +import time + +import requests + + +def loadData(): + createdata = {"version":"1.0","experiment_name":"quarkus-resteasy-kruize-min-http-response-time-db_10","cluster_name":"cluster-one-division-bell","performance_profile":"resource-optimization-openshift","mode":"monitor","target_cluster":"remote","kubernetes_objects":[{"type":"deployment","name":"tfb-qrh-deployment_5","namespace":"default_5","containers":[{"container_image_name":"kruize/tfb-db:1.15","container_name":"tfb-server-0"},{"container_image_name":"kruize/tfb-qrh:1.13.2.F_et17","container_name":"tfb-server-1"}]}],"trial_settings":{"measurement_duration":"60min"},"recommendation_settings":{"threshold":"0.1"}} + data = {"version":"3.0","experiment_name":"quarkus-resteasy-kruize-min-http-response-time-db_4","interval_start_time":"2023-01-01T00:00:00.000Z","interval_end_time":"2023-01-01T00:00:00.000Z","kubernetes_objects":[{"type":"deployment","name":"tfb-qrh-deployment_5","namespace":"default_5","containers":[{"container_image_name":"kruize/tfb-db:1.15","container_name":"tfb-server-0","metrics":[{"name":"cpuRequest","results":{"aggregation_info":{"sum":None,"avg":0,"format":"cores"}}},{"name":"cpuLimit","results":{"aggregation_info":{"sum":None,"avg":0,"format":"cores"}}},{"name":"cpuUsage","results":{"aggregation_info":{"min":0,"max":0,"sum":0,"avg":0,"format":"cores"}}},{"name":"cpuThrottle","results":{"aggregation_info":{"sum":0,"max":0,"avg":0,"format":"cores"}}},{"name":"memoryRequest","results":{"aggregation_info":{"sum":260.85,"avg":50.21,"format":"MiB"}}},{"name":"memoryLimit","results":{"aggregation_info":{"sum":700,"avg":100,"format":"MiB"}}},{"name":"memoryUsage","results":{"aggregation_info":{"min":50.6,"max":198.5,"sum":298.5,"avg":40.1,"format":"MiB"}}},{"name":"memoryRSS","results":{"aggregation_info":{"min":50.6,"max":523.6,"sum":123.6,"avg":31.91,"format":"MiB"}}}]},{"container_image_name":"kruize/tfb-qrh:1.13.2.F_et17","container_name":"tfb-server-1","metrics":[{"name":"cpuRequest","results":{"aggregation_info":{"sum":4.4,"avg":1.1,"format":"cores"}}},{"name":"cpuLimit","results":{"aggregation_info":{"sum":2.0,"avg":0.5,"format":"cores"}}},{"name":"cpuUsage","results":{"aggregation_info":{"min":0.14,"max":0.84,"sum":0.84,"avg":0.12,"format":"cores"}}},{"name":"cpuThrottle","results":{"aggregation_info":{"sum":0.19,"max":0.09,"avg":0.045,"format":"cores"}}},{"name":"memoryRequest","results":{"aggregation_info":{"sum":250.85,"avg":50.21,"format":"MiB"}}},{"name":"memoryLimit","results":{"aggregation_info":{"sum":500,"avg":100,"format":"MiB"}}},{"name":"memoryUsage","results":{"aggregation_info":{"min":50.6,"max":198.5,"sum":198.5,"avg":40.1,"format":"MiB"}}},{"name":"memoryRSS","results":{"aggregation_info":{"min":50.6,"max":123.6,"sum":123.6,"avg":31.91,"format":"MiB"}}}]}]}]} + profile_data = {"name":"resource-optimization-openshift","profile_version":1,"k8s_type":"openshift","slo":{"slo_class":"resource_usage","direction":"minimize","objective_function":{"function_type":"expression","expression":"cpuRequest"},"function_variables":[{"name":"cpuRequest","datasource":"prometheus","value_type":"double","kubernetes_object":"container","query":"kube_pod_container_resource_requests{pod=~'$DEPLOYMENT_NAME$-[^-]*-[^-]*$', container='$CONTAINER_NAME$', namespace='$NAMESPACE', resource='cpu', unit='core'}","aggregation_functions":[{"function":"avg","query":"avg(kube_pod_container_resource_requests{pod=~\"$DEPLOYMENT_NAME$-[^-]*-[^-]*$\", container=\"$CONTAINER_NAME$\", namespace=\"$NAMESPACE\", resource=\"cpu\", unit=\"core\"})"}]}]}} + return (data, createdata, profile_data) + +def updateRecommendation(experiment_name, endDate): + try: + # Send the request with the payload + payloadRecommendationURL = "%s?experiment_name=%s&interval_end_time=%s" % ( + updateRecommendationURL, experiment_name, endDate.strftime('%Y-%m-%dT%H:%M:%S.%fZ')[:-4] + 'Z') + response = requests.post(payloadRecommendationURL, data={}, headers=headers, timeout=timeout) + # Check the response + if response.status_code == 201: + #data = response.json() + #print('experiment_name %s : %s' % (experiment_name , data[0]['kubernetes_objects'][0]['containers'][0]['recommendations']['notifications']['112101'][ + # 'message'] )) + pass + else: + print( + f'{payloadRecommendationURL} Request failed with status code {response.status_code}: {response.text}') + requests.post(createProfileURL, data=profile_json_payload, headers=headers) + except requests.exceptions.Timeout: + print('updateRecommendation Timeout occurred while connecting to') + except requests.exceptions.RequestException as e: + print('updateRecommendation Timeout occurred while connecting to', e) + +def postResultsInBulk(expName, bulkData): + json_payload = json.dumps(bulkData) + try: + # Send the request with the payload + response = requests.post(updateExpURL, data=json_payload, headers=headers, timeout=timeout) + # Check the response + if response.status_code == 201: + pass + else: + print(f'Request failed with status code {expName} {response.status_code}: {response.text}') + requests.post(createProfileURL, data=profile_json_payload, headers=headers) + except requests.exceptions.Timeout: + print('Timeout occurred while connecting to') + except requests.exceptions.RequestException as e: + print('An error occurred while connecting to', e) + +if __name__ == "__main__": + # create an ArgumentParser object + parser = argparse.ArgumentParser() + + # add the named arguments + parser.add_argument('--ip', type=str, help='specify kruize ip') + parser.add_argument('--port', type=int, help='specify port') + parser.add_argument('--name', type=str, help='specify experiment name') + parser.add_argument('--count', type=str, + help='specify input the number of experiments and corresponding results, separated by commas.') + parser.add_argument('--startdate', type=str, help='Specify start date and time in "%Y-%m-%dT%H:%M:%S.%fZ" format.') + parser.add_argument('--minutesjump', type=int, + help='specify the time difference between the start time and end time of the interval.') + + # parse the arguments from the command line + args = parser.parse_args() + if args.port != 0: + createExpURL = 'http://%s:%s/createExperiment' % (args.ip, args.port) + updateExpURL = 'http://%s:%s/updateResults' % (args.ip, args.port) + createProfileURL = 'http://%s:%s/createPerformanceProfile' % (args.ip, args.port) + updateExpURL = 'http://%s:%s/updateResults' % (args.ip, args.port) + updateRecommendationURL = 'http://%s:%s/updateRecommendations' % (args.ip, args.port) + else: + createExpURL = 'http://%s/createExperiment' % (args.ip) + updateExpURL = 'http://%s/updateResults' % (args.ip) + createProfileURL = 'http://%s/createPerformanceProfile' % (args.ip) + updateExpURL = 'http://%s/updateResults' % (args.ip) + updateRecommendationURL = 'http://%s/updateRecommendations' % (args.ip) + + expnameprfix = args.name + expcount = int(args.count.split(',')[0]) + rescount = int(args.count.split(',')[1]) + minutesjump = args.minutesjump + headers = { + 'Content-Type': 'application/json' + } + timeout = (60, 60) + data, createdata, profile_data = loadData() + + if args.startdate: + data['interval_end_time'] = args.startdate + + print(createExpURL) + print(updateExpURL) + print(createProfileURL) + print("experiment_name : %s " % (expnameprfix)) + print("Number of experiments to create : %s" % (expcount)) + print("Number of results to create : %s" % (rescount)) + print("startdate : %s" % (data['interval_end_time'])) + print("minutes jump : %s" % (minutesjump)) + + #Create a performance profile + profile_json_payload = json.dumps(profile_data) + response = requests.post(createProfileURL, data=profile_json_payload, headers=headers) + if response.status_code == 201: + print('Request successful!') + else: + print(f'Request failed with status code {response.status_code}: {response.text}') + + #Create experiment and post results + start_time = time.time() + for i in range(1, expcount + 1): + try: + successfulCnt = 0 + experiment_name = "%s_%s" % (expnameprfix, i) + createdata['experiment_name'] = experiment_name + create_json_payload = json.dumps([createdata]) + #Create experiment + requests.post(createProfileURL, data=profile_json_payload, headers=headers) + response = requests.post(createExpURL, data=create_json_payload, headers=headers, timeout=timeout) + j = 0 + if response.status_code == 201 or response.status_code == 409: + bulkdata = [] + totalResultDates = [] + for j in range(rescount): + interval_start_time = datetime.datetime.strptime(data['interval_end_time'], '%Y-%m-%dT%H:%M:%S.%fZ') + interval_end_time = datetime.datetime.strptime(data['interval_end_time'], + '%Y-%m-%dT%H:%M:%S.%fZ') + datetime.timedelta( + minutes=minutesjump) + data['interval_end_time'] = interval_end_time.strftime('%Y-%m-%dT%H:%M:%S.%fZ') + totalResultDates.append(interval_end_time) + data['experiment_name'] = experiment_name + data['interval_start_time'] = interval_start_time.strftime('%Y-%m-%dT%H:%M:%S.%fZ') + data['interval_end_time'] = interval_end_time.strftime('%Y-%m-%dT%H:%M:%S.%fZ') + bulkdata.append(copy.deepcopy(data)) + postResultsInBulk(experiment_name, bulkdata) + # Get the maximum datetime object + max_datetime = max(totalResultDates) + updateRecommendation(experiment_name, max_datetime,) + else: + print(f'Request failed with status code {response.status_code}: {response.text}') + except requests.exceptions.Timeout: + print('Timeout occurred while connecting to') + except requests.exceptions.RequestException as e: + print('An error occurred while connecting to', e) + except Exception as e: + print('An error occurred ', e) + + elapsed_time = time.time() - start_time + hours, rem = divmod(elapsed_time, 3600) + minutes, seconds = divmod(rem, 60) + print("Time elapsed: {:0>2}:{:0>2}:{:05.2f}".format(int(hours), int(minutes), seconds)) diff --git a/tests/scripts/quickTestScalability.py b/tests/scripts/quickTestScalability.py index 5c54942b3..60ba7bf64 100644 --- a/tests/scripts/quickTestScalability.py +++ b/tests/scripts/quickTestScalability.py @@ -14,17 +14,40 @@ # if value is 2,10 then 2 experiments and 10 results for each # --minutesjump diff b/w endtime and starttime -import json -import datetime -import requests import argparse +import copy +import datetime +import json import multiprocessing import time from multiprocessing import Manager +import requests + + +def postResultsInBulk(expName, bulkData): + json_payload = json.dumps(bulkData) + try: + # Send the request with the payload + response = requests.post(updateExpURL, data=json_payload, headers=headers, timeout=timeout) + # Check the response + if response.status_code == 201: + print( + f"progress {expName} : ExperimentCount : %s/%s Results Count : %s/%s Recommendation count : %s" % ( + completedExperimentCount.value, expcount, len(totalResultDates), len(totalResultDates), + len(completedRecommendation)), end="\r") + pass + else: + print(f'Request failed with status code {response.status_code}: {response.text}') + requests.post(createProfileURL, data=profile_json_payload, headers=headers) + except requests.exceptions.Timeout: + print('Timeout occurred while connecting to') + except requests.exceptions.RequestException as e: + print('An error occurred while connecting to', e) + -def postResult(expName,startDate,endDate): - if args.debug: print("Posting results for %s - %s "%(startDate,endDate)) +def postResult(expName, startDate, endDate): + if args.debug: print("Posting results for %s - %s " % (startDate, endDate)) # update the JSON data with the new interval times data['experiment_name'] = expName data['interval_start_time'] = startDate @@ -32,11 +55,14 @@ def postResult(expName,startDate,endDate): # Convert payload to JSON string json_payload = json.dumps([data]) try: - # Send the request with the payload + # Send the request with the payload response = requests.post(updateExpURL, data=json_payload, headers=headers, timeout=timeout) - # Check the response + # Check the response if response.status_code == 201: - print(f"progress {expName} : ExperimentCount : %s/%s Results Count : %s/%s Recommendation count : %s" %(completedExperimentCount.value,expcount,len(completedResultDatesList),len(totalResultDates),len(completedRecommendation) ), end="\r") + print( + f"progress {expName} : ExperimentCount : %s/%s Results Count : %s/%s Recommendation count : %s" % ( + completedExperimentCount.value, expcount, len(completedResultDatesList), len(totalResultDates), + len(completedRecommendation)), end="\r") pass else: print(f'Request failed with status code {response.status_code}: {response.text}') @@ -44,41 +70,43 @@ def postResult(expName,startDate,endDate): except requests.exceptions.Timeout: print('Timeout occurred while connecting to') except requests.exceptions.RequestException as e: - print('An error occurred while connecting to', e) + print('An error occurred while connecting to', e) finally: - completedResultDatesList.append( datetime.datetime.strptime( endDate , '%Y-%m-%dT%H:%M:%S.%fZ')) + completedResultDatesList.append(datetime.datetime.strptime(endDate, '%Y-%m-%dT%H:%M:%S.%fZ')) + -def updateRecommendation(experiment_name,endDate): +def updateRecommendation(experiment_name, endDate): try: # Send the request with the payload - payloadRecommendationURL = "%s?experiment_name=%s&interval_end_time=%s"%(updateRecommendationURL,experiment_name,endDate.strftime('%Y-%m-%dT%H:%M:%S.%fZ')[:-4] + 'Z') + payloadRecommendationURL = "%s?experiment_name=%s&interval_end_time=%s" % ( + updateRecommendationURL, experiment_name, endDate.strftime('%Y-%m-%dT%H:%M:%S.%fZ')[:-4] + 'Z') if args.debug: print(payloadRecommendationURL) response = requests.post(payloadRecommendationURL, data={}, headers=headers, timeout=timeout) # Check the response if response.status_code == 201: - print(f"progress {experiment_name} : ExperimentCount : %s/%s Results Count : %s/%s Recommendation count : %s" %(completedExperimentCount.value,expcount,len(completedResultDatesList),len(totalResultDates),len(completedRecommendation) ), end="\r") + print( + f"progress {experiment_name} : ExperimentCount : %s/%s Results Count : %s/%s Recommendation count : %s" % ( + completedExperimentCount.value, expcount, len(completedResultDatesList), len(totalResultDates), + len(completedRecommendation)), end="\r") else: - if args.debug: print(f'{payloadRecommendationURL} Request failed with status code {response.status_code}: {response.text}') + if args.debug: print( + f'{payloadRecommendationURL} Request failed with status code {response.status_code}: {response.text}') requests.post(createProfileURL, data=profile_json_payload, headers=headers) except requests.exceptions.Timeout: print('updateRecommendation Timeout occurred while connecting to') except requests.exceptions.RequestException as e: - print('updateRecommendation Timeout occurred while connecting to', e) + print('updateRecommendation Timeout occurred while connecting to', e) finally: completedRecommendation.append(endDate) + def updateRecommendationInBulk(): pendingRecommendation = list(set(totalResultDates).difference(completedRecommendation)) recommendationDataList = [] for i_end_date in pendingRecommendation: - recommendationDataList.append((createdata['experiment_name'],i_end_date)) - num_processes = args.parallelresultcount - pool = multiprocessing.Pool(processes=num_processes) - # Start the parallel execution - pool.starmap(updateRecommendation, recommendationDataList) - # Close the pool and wait for the processes to finish - pool.close() - pool.join() + recommendationDataList.append((createdata['experiment_name'], i_end_date)) + updateRecommendation(recommendationDataList) + def validateRecommendation(): totalResultDates.sort() @@ -91,283 +119,40 @@ def validateRecommendation(): for completedDate in completedResultDatesList: if completedDate not in completedRecommendation: subTotalResulutDates = totalResultDates[:totalResultDates.index(completedDate)] - if(all(x in completedResultDatesList for x in subTotalResulutDates)): - if args.debug: print("You can generate recommendation for completedDate %s \n due to subTotalResulutDates %s \n are subset of completedResultSet %s" %(completedDate,subTotalResulutDates,completedResultDatesList)) - updateRecommendation(createdata['experiment_name'],completedDate) + if (all(x in completedResultDatesList for x in subTotalResulutDates)): + if args.debug: print( + "You can generate recommendation for completedDate %s \n due to subTotalResulutDates %s \n are subset of completedResultSet %s" % ( + completedDate, subTotalResulutDates, completedResultDatesList)) + updateRecommendation(createdata['experiment_name'], completedDate) if (len(totalResultDates) == len(completedResultDatesList)): updateRecommendationInBulk() break else: - if args.debug: print("You CANNOT generate recommendation for completedDate %s \n due to subTotalResulutDates %s \n are not subset of completedResultSet %s" %(completedDate,subTotalResulutDates,completedResultDatesList)) + if args.debug: print( + "You CANNOT generate recommendation for completedDate %s \n due to subTotalResulutDates %s \n are not subset of completedResultSet %s" % ( + completedDate, subTotalResulutDates, completedResultDatesList)) pass if args.debug: print('*************************') time.sleep(1) -def loadData(): - createdata = { - "version": "1.0", - "experiment_name": "quarkus-resteasy-kruize-min-http-response-time-db_10", - "cluster_name": "cluster-one-division-bell", - "performance_profile": "resource-optimization-openshift", - "mode": "monitor", - "target_cluster": "remote", - "kubernetes_objects": [ - { - "type": "deployment", - "name": "tfb-qrh-deployment_5", - "namespace": "default_5", - "containers": [ - { - "container_image_name": "kruize/tfb-db:1.15", - "container_name": "tfb-server-0" - }, - { - "container_image_name": "kruize/tfb-qrh:1.13.2.F_et17", - "container_name": "tfb-server-1" - } - ] - } - ], - "trial_settings": { - "measurement_duration": "15min" - }, - "recommendation_settings": { - "threshold": "0.1" - } - } - data = { - "version": "1.0", - "experiment_name": "quarkus-resteasy-kruize-min-http-response-time-db_4", - "interval_start_time": "2023-01-01T00:00:00.000Z", - "interval_end_time": "2023-01-01T00:00:00.000Z", - "kubernetes_objects": [ - { - "type": "deployment", - "name": "tfb-qrh-deployment_5", - "namespace": "default_5", - "containers": [ - { - "container_image_name": "kruize/tfb-db:1.15", - "container_name": "tfb-server-0", - "metrics": [ - { - "name": "cpuRequest", - "results": { - "aggregation_info": { - "sum": None, - "avg": 0, - "format": "cores" - } - } - }, - { - "name": "cpuLimit", - "results": { - "aggregation_info": { - "sum": None, - "avg": 0, - "format": "cores" - } - } - }, - { - "name": "cpuUsage", - "results": { - "aggregation_info": { - "min": 0, - "max": 0, - "sum": 0, - "avg": 0, - "format": "cores" - } - } - }, - { - "name": "cpuThrottle", - "results": { - "aggregation_info": { - "sum": 0, - "max": 0, - "avg": 0, - "format": "cores" - } - } - }, - { - "name": "memoryRequest", - "results": { - "aggregation_info": { - "sum": 260.85, - "avg": 50.21, - "format": "MiB" - } - } - }, - { - "name": "memoryLimit", - "results": { - "aggregation_info": { - "sum": 700, - "avg": 100, - "format": "MiB" - } - } - }, - { - "name": "memoryUsage", - "results": { - "aggregation_info": { - "min": 50.6, - "max": 198.5, - "sum": 298.5, - "avg": 40.1, - "format": "MiB" - } - } - }, - { - "name": "memoryRSS", - "results": { - "aggregation_info": { - "min": 50.6, - "max": 523.6, - "sum": 123.6, - "avg": 31.91, - "format": "MiB" - } - } - } - ] - }, - { - "container_image_name": "kruize/tfb-qrh:1.13.2.F_et17", - "container_name": "tfb-server-1", - "metrics": [ - { - "name": "cpuRequest", - "results": { - "aggregation_info": { - "sum": 4.4, - "avg": 1.1, - "format": "cores" - } - } - }, - { - "name": "cpuLimit", - "results": { - "aggregation_info": { - "sum": 2.0, - "avg": 0.5, - "format": "cores" - } - } - }, - { - "name": "cpuUsage", - "results": { - "aggregation_info": { - "min": 0.14, - "max": 0.84, - "sum": 0.84, - "avg": 0.12, - "format": "cores" - } - } - }, - { - "name": "cpuThrottle", - "results": { - "aggregation_info": { - "sum": 0.19, - "max": 0.09, - "avg": 0.045, - "format": "cores" - } - } - }, - { - "name": "memoryRequest", - "results": { - "aggregation_info": { - "sum": 250.85, - "avg": 50.21, - "format": "MiB" - } - } - }, - { - "name": "memoryLimit", - "results": { - "aggregation_info": { - "sum": 500, - "avg": 100, - "format": "MiB" - } - } - }, - { - "name": "memoryUsage", - "results": { - "aggregation_info": { - "min": 50.6, - "max": 198.5, - "sum": 198.5, - "avg": 40.1, - "format": "MiB" - } - } - }, - { - "name": "memoryRSS", - "results": { - "aggregation_info": { - "min": 50.6, - "max": 123.6, - "sum": 123.6, - "avg": 31.91, - "format": "MiB" - } - } - } - ] - } - ] - } - ] - } - profile_data = { - "name": "resource-optimization-openshift", - "profile_version": 1, - "k8s_type": "openshift", - "slo": { - "slo_class": "resource_usage", - "direction": "minimize", - "objective_function": { - "function_type": "expression", - "expression": "cpuRequest" - }, - "function_variables": [ - { - "name": "cpuRequest", - "datasource": "prometheus", - "value_type": "double", - "kubernetes_object": "container", - "query": "kube_pod_container_resource_requests{pod=~'$DEPLOYMENT_NAME$-[^-]*-[^-]*$', container='$CONTAINER_NAME$', namespace='$NAMESPACE', resource='cpu', unit='core'}", - "aggregation_functions": [ - { - "function": "avg", - "query": "avg(kube_pod_container_resource_requests{pod=~\"$DEPLOYMENT_NAME$-[^-]*-[^-]*$\", container=\"$CONTAINER_NAME$\", namespace=\"$NAMESPACE\", resource=\"cpu\", unit=\"core\"})" - } - ] - } - ] - } - } - return (data,createdata,profile_data) +def loadData(): + createdata = {"version":"1.0","experiment_name":"quarkus-resteasy-kruize-min-http-response-time-db_10","cluster_name":"cluster-one-division-bell","performance_profile":"resource-optimization-openshift","mode":"monitor","target_cluster":"remote","kubernetes_objects":[{"type":"deployment","name":"tfb-qrh-deployment_5","namespace":"default_5","containers":[{"container_image_name":"kruize/tfb-db:1.15","container_name":"tfb-server-0"},{"container_image_name":"kruize/tfb-qrh:1.13.2.F_et17","container_name":"tfb-server-1"}]}],"trial_settings":{"measurement_duration":"60min"},"recommendation_settings":{"threshold":"0.1"}} + data = {"version":"3.0","experiment_name":"quarkus-resteasy-kruize-min-http-response-time-db_4","interval_start_time":"2023-01-01T00:00:00.000Z","interval_end_time":"2023-01-01T00:00:00.000Z","kubernetes_objects":[{"type":"deployment","name":"tfb-qrh-deployment_5","namespace":"default_5","containers":[{"container_image_name":"kruize/tfb-db:1.15","container_name":"tfb-server-0","metrics":[{"name":"cpuRequest","results":{"aggregation_info":{"sum":None,"avg":0,"format":"cores"}}},{"name":"cpuLimit","results":{"aggregation_info":{"sum":None,"avg":0,"format":"cores"}}},{"name":"cpuUsage","results":{"aggregation_info":{"min":0,"max":0,"sum":0,"avg":0,"format":"cores"}}},{"name":"cpuThrottle","results":{"aggregation_info":{"sum":0,"max":0,"avg":0,"format":"cores"}}},{"name":"memoryRequest","results":{"aggregation_info":{"sum":260.85,"avg":50.21,"format":"MiB"}}},{"name":"memoryLimit","results":{"aggregation_info":{"sum":700,"avg":100,"format":"MiB"}}},{"name":"memoryUsage","results":{"aggregation_info":{"min":50.6,"max":198.5,"sum":298.5,"avg":40.1,"format":"MiB"}}},{"name":"memoryRSS","results":{"aggregation_info":{"min":50.6,"max":523.6,"sum":123.6,"avg":31.91,"format":"MiB"}}}]},{"container_image_name":"kruize/tfb-qrh:1.13.2.F_et17","container_name":"tfb-server-1","metrics":[{"name":"cpuRequest","results":{"aggregation_info":{"sum":4.4,"avg":1.1,"format":"cores"}}},{"name":"cpuLimit","results":{"aggregation_info":{"sum":2.0,"avg":0.5,"format":"cores"}}},{"name":"cpuUsage","results":{"aggregation_info":{"min":0.14,"max":0.84,"sum":0.84,"avg":0.12,"format":"cores"}}},{"name":"cpuThrottle","results":{"aggregation_info":{"sum":0.19,"max":0.09,"avg":0.045,"format":"cores"}}},{"name":"memoryRequest","results":{"aggregation_info":{"sum":250.85,"avg":50.21,"format":"MiB"}}},{"name":"memoryLimit","results":{"aggregation_info":{"sum":500,"avg":100,"format":"MiB"}}},{"name":"memoryUsage","results":{"aggregation_info":{"min":50.6,"max":198.5,"sum":198.5,"avg":40.1,"format":"MiB"}}},{"name":"memoryRSS","results":{"aggregation_info":{"min":50.6,"max":123.6,"sum":123.6,"avg":31.91,"format":"MiB"}}}]}]}]} + profile_data = {"name":"resource-optimization-openshift","profile_version":1,"k8s_type":"openshift","slo":{"slo_class":"resource_usage","direction":"minimize","objective_function":{"function_type":"expression","expression":"cpuRequest"},"function_variables":[{"name":"cpuRequest","datasource":"prometheus","value_type":"double","kubernetes_object":"container","query":"kube_pod_container_resource_requests{pod=~'$DEPLOYMENT_NAME$-[^-]*-[^-]*$', container='$CONTAINER_NAME$', namespace='$NAMESPACE', resource='cpu', unit='core'}","aggregation_functions":[{"function":"avg","query":"avg(kube_pod_container_resource_requests{pod=~\"$DEPLOYMENT_NAME$-[^-]*-[^-]*$\", container=\"$CONTAINER_NAME$\", namespace=\"$NAMESPACE\", resource=\"cpu\", unit=\"core\"})"}]}]}} + return (data, createdata, profile_data) +def recommendationPool(totalResultDates, num_processes): + recommendationDataList = [] + for i_end_date in totalResultDates: + recommendationDataList.append((createdata['experiment_name'], i_end_date)) + recommenderPool = multiprocessing.Pool(processes=num_processes) + # Start the parallel execution + recommenderPool.starmap(updateRecommendation, recommendationDataList) + # Close the pool and wait for the processes to finish + recommenderPool.close() + recommenderPool.join() if __name__ == "__main__": @@ -378,30 +163,33 @@ def loadData(): parser.add_argument('--ip', type=str, help='specify kruize ip') parser.add_argument('--port', type=int, help='specify port') parser.add_argument('--name', type=str, help='specify experiment name') - parser.add_argument('--count', type=str, help='specify input the number of experiments and corresponding results, separated by commas.') + parser.add_argument('--count', type=str, + help='specify input the number of experiments and corresponding results, separated by commas.') parser.add_argument('--startdate', type=str, help='Specify start date and time in "%Y-%m-%dT%H:%M:%S.%fZ" format.') - parser.add_argument('--minutesjump', type=int, help='specify the time difference between the start time and end time of the interval.') - parser.add_argument('--postresults', action='store_true' , help='By enabling flag it genrates results and post to updateResults api.') - parser.add_argument('--parallelresultcount', type=int, help='specify the quantity of processes to execute simultaneously for posting the results.') - parser.add_argument('--generaterecommendation', action='store_true', help='execution of recommendation generation.') + parser.add_argument('--minutesjump', type=int, + help='specify the time difference between the start time and end time of the interval.') + parser.add_argument('--postresults', action='store_true', + help='By enabling flag it genrates results and post to updateResults api.') + parser.add_argument('--parallelresultcount', type=int, + help='specify the quantity of processes to execute simultaneously for posting the results.') + parser.add_argument('--generaterecommendation', action='store_true', help='execution of recommendation generation.') parser.add_argument('--debug', type=bool, help='print debug log.') - - + parser.add_argument('--bulk', action='store_true', help='post bulk results.') # parse the arguments from the command line args = parser.parse_args() if args.port != 0: - createExpURL = 'http://%s:%s/createExperiment'%(args.ip,args.port) - updateExpURL = 'http://%s:%s/updateResults'%(args.ip,args.port) - createProfileURL = 'http://%s:%s/createPerformanceProfile'%(args.ip,args.port) - updateExpURL = 'http://%s:%s/updateResults'%(args.ip,args.port) - updateRecommendationURL = 'http://%s:%s/updateRecommendations'%(args.ip,args.port) + createExpURL = 'http://%s:%s/createExperiment' % (args.ip, args.port) + updateExpURL = 'http://%s:%s/updateResults' % (args.ip, args.port) + createProfileURL = 'http://%s:%s/createPerformanceProfile' % (args.ip, args.port) + updateExpURL = 'http://%s:%s/updateResults' % (args.ip, args.port) + updateRecommendationURL = 'http://%s:%s/updateRecommendations' % (args.ip, args.port) else: - createExpURL = 'http://%s/createExperiment'%(args.ip) - updateExpURL = 'http://%s/updateResults'%(args.ip) - createProfileURL = 'http://%s/createPerformanceProfile'%(args.ip) - updateExpURL = 'http://%s/updateResults'%(args.ip) - updateRecommendationURL = 'http://%s/updateRecommendations'%(args.ip) + createExpURL = 'http://%s/createExperiment' % (args.ip) + updateExpURL = 'http://%s/updateResults' % (args.ip) + createProfileURL = 'http://%s/createPerformanceProfile' % (args.ip) + updateExpURL = 'http://%s/updateResults' % (args.ip) + updateRecommendationURL = 'http://%s/updateRecommendations' % (args.ip) expnameprfix = args.name expcount = int(args.count.split(',')[0]) @@ -412,7 +200,7 @@ def loadData(): 'Content-Type': 'application/json' } timeout = (60, 60) - data,createdata,profile_data = loadData() + data, createdata, profile_data = loadData() if args.startdate: data['interval_end_time'] = args.startdate @@ -420,14 +208,13 @@ def loadData(): print(createExpURL) print(updateExpURL) print(createProfileURL) - print("experiment_name : %s " %(expnameprfix)) - print("Number of experiments to create : %s" %(expcount)) - print("Number of results to create : %s" %(rescount)) - print("startdate : %s" %(data['interval_end_time'])) - print("minutes jump : %s" %(minutesjump)) - print("postresults : %s" %(args.postresults)) - print("generaterecommendation : %s" %(generaterecommendation)) - + print("experiment_name : %s " % (expnameprfix)) + print("Number of experiments to create : %s" % (expcount)) + print("Number of results to create : %s" % (rescount)) + print("startdate : %s" % (data['interval_end_time'])) + print("minutes jump : %s" % (minutesjump)) + print("postresults : %s" % (args.postresults)) + print("generaterecommendation : %s" % (generaterecommendation)) profile_json_payload = json.dumps(profile_data) # Send the request with the payload @@ -436,8 +223,7 @@ def loadData(): if response.status_code == 201: print('Request successful!') else: - print(f'Request failed with status code {response.status_code}: {response.text}') - + print(f'Request failed with status code {response.status_code}: {response.text}') # Create a shared list using multiprocessing.Manager() manager = Manager() @@ -445,14 +231,14 @@ def loadData(): totalResultDates = manager.list() completedResultDatesList = manager.list() completedRecommendation = manager.list() - completedExperimentCount = manager.Value(int,0) - completedResultsCount = manager.Value(int,0) + completedExperimentCount = manager.Value(int, 0) + completedResultsCount = manager.Value(int, 0) start_time = time.time() - for i in range(1,expcount+1): + for i in range(1, expcount + 1): try: successfulCnt = 0 - experiment_name = "%s_%s" %(expnameprfix,i) + experiment_name = "%s_%s" % (expnameprfix, i) createdata['experiment_name'] = experiment_name create_json_payload = json.dumps([createdata]) # Send the request with the payload @@ -460,28 +246,46 @@ def loadData(): # Check the response j = 0 if response.status_code == 201 or response.status_code == 409: - print('Create experiment_name %s Request successful!'%(experiment_name)) + print('Create experiment_name %s Request successful!' % (experiment_name)) completedExperimentCount.value = completedExperimentCount.value + 1 timeDeltaList = [] + bulkdata = [] for j in range(rescount): - interval_start_time = datetime.datetime.strptime(data['interval_end_time'] , '%Y-%m-%dT%H:%M:%S.%fZ') - interval_end_time = datetime.datetime.strptime(data['interval_end_time'] , '%Y-%m-%dT%H:%M:%S.%fZ' ) + datetime.timedelta(minutes=minutesjump) + interval_start_time = datetime.datetime.strptime(data['interval_end_time'], '%Y-%m-%dT%H:%M:%S.%fZ') + interval_end_time = datetime.datetime.strptime(data['interval_end_time'], + '%Y-%m-%dT%H:%M:%S.%fZ') + datetime.timedelta( + minutes=minutesjump) data['interval_end_time'] = interval_end_time.strftime('%Y-%m-%dT%H:%M:%S.%fZ') - timeDeltaList.append((experiment_name,interval_start_time.strftime('%Y-%m-%dT%H:%M:%S.%fZ'),interval_end_time.strftime('%Y-%m-%dT%H:%M:%S.%fZ'))) + timeDeltaList.append((experiment_name, interval_start_time.strftime('%Y-%m-%dT%H:%M:%S.%fZ'), + interval_end_time.strftime('%Y-%m-%dT%H:%M:%S.%fZ'))) totalResultDates.append(interval_end_time) + data['experiment_name'] = experiment_name + data['interval_start_time'] = interval_start_time.strftime('%Y-%m-%dT%H:%M:%S.%fZ') + data['interval_end_time'] = interval_end_time.strftime('%Y-%m-%dT%H:%M:%S.%fZ') + bulkdata.append(copy.deepcopy(data)) + # with open("output.json", 'w') as json_file: + # json.dump(bulkdata, json_file, indent=4) + if args.postresults and args.generaterecommendation: - # Create a pool of processes - recommendationProcess = multiprocessing.Process(target=validateRecommendation) - recommendationProcess.start() - num_processes = args.parallelresultcount - pool = multiprocessing.Pool(processes=num_processes) - # Start the parallel execution - pool.starmap(postResult, timeDeltaList) - # Close the pool and wait for the processes to finish - recommendationProcess.join() - pool.close() - pool.join() + if args.bulk: + postResultsInBulk(experiment_name, bulkdata) + completedResultDatesList = totalResultDates + recommendationDataList = [] + for i_end_date in totalResultDates: + updateRecommendation(experiment_name, i_end_date, ) + else: + # Create a pool of processes + recommendationProcess = multiprocessing.Process(target=validateRecommendation) + recommendationProcess.start() + num_processes = args.parallelresultcount + # Start the parallel execution + pool = multiprocessing.Pool(processes=num_processes) + pool.starmap(postResult, timeDeltaList) + recommendationProcess.join() + # Close the pool and wait for the processes to finish + pool.close() + pool.join() elif args.postresults: num_processes = args.parallelresultcount pool = multiprocessing.Pool(processes=num_processes) @@ -491,20 +295,21 @@ def loadData(): pool.close() pool.join() elif args.generaterecommendation: - recommendationDataList = [] - for i_end_date in totalResultDates: - recommendationDataList.append((createdata['experiment_name'],i_end_date)) - num_processes = args.parallelresultcount - pool = multiprocessing.Pool(processes=num_processes) - # Start the parallel execution - pool.starmap(updateRecommendation, recommendationDataList) - # Close the pool and wait for the processes to finish - pool.close() - pool.join() + recommendationPool(totalResultDates, args.parallelresultcount) + # recommendationDataList = [] + # for i_end_date in totalResultDates: + # recommendationDataList.append((createdata['experiment_name'], i_end_date)) + # num_processes = args.parallelresultcount + # pool = multiprocessing.Pool(processes=num_processes) + # # Start the parallel execution + # pool.starmap(updateRecommendation, recommendationDataList) + # # Close the pool and wait for the processes to finish + # pool.close() + # pool.join() else: print("Invalid choice") else: - print(f'Request failed with status code {response.status_code}: {response.text}') + print(f'Request failed with status code {response.status_code}: {response.text}') except requests.exceptions.Timeout: print('Timeout occurred while connecting to') except requests.exceptions.RequestException as e: @@ -512,19 +317,13 @@ def loadData(): except Exception as e: print('An error occurred ', e) - - print('Request successful! completed : ExperimentCount : %s/%s Results Count : %s/%s Recommendation count : %s' %(completedExperimentCount.value,expcount,len(completedResultDatesList),len(totalResultDates),len(completedRecommendation) )) + print( + 'Request successful! completed : ExperimentCount : %s/%s Results Count : %s/%s Recommendation count : %s' % ( + completedExperimentCount.value, expcount, len(completedResultDatesList), len(totalResultDates), + len(completedRecommendation))) elapsed_time = time.time() - start_time hours, rem = divmod(elapsed_time, 3600) minutes, seconds = divmod(rem, 60) print("Time elapsed: {:0>2}:{:0>2}:{:05.2f}".format(int(hours), int(minutes), seconds)) - - - - #for i in {1..50}; do nohup time python3 -u quickTestScalability.py --ip master-1.kruizevin.lab.psi.pnq2.redhat.com --port 31521 --name 5kexp$i --count 100,1500 --minutesjump=15 > /tmp/5kexp$i.log 2>&1 & done - - - - - +# for i in {1..50}; do nohup time python3 -u quickTestScalability.py --ip master-1.kruizevin.lab.psi.pnq2.redhat.com --port 31521 --name 5kexp$i --count 100,1500 --minutesjump=15 > /tmp/5kexp$i.log 2>&1 & done From 3184fae38b7fa9f7bc8db9c2a5397f056711893a Mon Sep 17 00:00:00 2001 From: msvinaykumar Date: Tue, 22 Aug 2023 08:06:54 +0530 Subject: [PATCH 11/12] syntax error and bulk script wrapper Signed-off-by: msvinaykumar --- tests/scripts/bulkScalabilityTest.py | 35 ++++---- tests/scripts/bulkScalabilityWrapper.sh | 81 +++++++++++++++++++ .../remote_monitoring_tests.sh | 3 +- 3 files changed, 103 insertions(+), 16 deletions(-) create mode 100755 tests/scripts/bulkScalabilityWrapper.sh diff --git a/tests/scripts/bulkScalabilityTest.py b/tests/scripts/bulkScalabilityTest.py index 00a478175..dacb2b763 100644 --- a/tests/scripts/bulkScalabilityTest.py +++ b/tests/scripts/bulkScalabilityTest.py @@ -8,7 +8,7 @@ def loadData(): - createdata = {"version":"1.0","experiment_name":"quarkus-resteasy-kruize-min-http-response-time-db_10","cluster_name":"cluster-one-division-bell","performance_profile":"resource-optimization-openshift","mode":"monitor","target_cluster":"remote","kubernetes_objects":[{"type":"deployment","name":"tfb-qrh-deployment_5","namespace":"default_5","containers":[{"container_image_name":"kruize/tfb-db:1.15","container_name":"tfb-server-0"},{"container_image_name":"kruize/tfb-qrh:1.13.2.F_et17","container_name":"tfb-server-1"}]}],"trial_settings":{"measurement_duration":"60min"},"recommendation_settings":{"threshold":"0.1"}} + createdata = {"version":"1.0","experiment_name":"quarkus-resteasy-kruize-min-http-response-time-db_10","cluster_name":"cluster-one-division-bell","performance_profile":"resource-optimization-openshift","mode":"monitor","target_cluster":"remote","kubernetes_objects":[{"type":"deployment","name":"tfb-qrh-deployment_5","namespace":"default_5","containers":[{"container_image_name":"kruize/tfb-db:1.15","container_name":"tfb-server-0"},{"container_image_name":"kruize/tfb-qrh:1.13.2.F_et17","container_name":"tfb-server-1"}]}],"trial_settings":{"measurement_duration":"15min"},"recommendation_settings":{"threshold":"0.1"}} data = {"version":"3.0","experiment_name":"quarkus-resteasy-kruize-min-http-response-time-db_4","interval_start_time":"2023-01-01T00:00:00.000Z","interval_end_time":"2023-01-01T00:00:00.000Z","kubernetes_objects":[{"type":"deployment","name":"tfb-qrh-deployment_5","namespace":"default_5","containers":[{"container_image_name":"kruize/tfb-db:1.15","container_name":"tfb-server-0","metrics":[{"name":"cpuRequest","results":{"aggregation_info":{"sum":None,"avg":0,"format":"cores"}}},{"name":"cpuLimit","results":{"aggregation_info":{"sum":None,"avg":0,"format":"cores"}}},{"name":"cpuUsage","results":{"aggregation_info":{"min":0,"max":0,"sum":0,"avg":0,"format":"cores"}}},{"name":"cpuThrottle","results":{"aggregation_info":{"sum":0,"max":0,"avg":0,"format":"cores"}}},{"name":"memoryRequest","results":{"aggregation_info":{"sum":260.85,"avg":50.21,"format":"MiB"}}},{"name":"memoryLimit","results":{"aggregation_info":{"sum":700,"avg":100,"format":"MiB"}}},{"name":"memoryUsage","results":{"aggregation_info":{"min":50.6,"max":198.5,"sum":298.5,"avg":40.1,"format":"MiB"}}},{"name":"memoryRSS","results":{"aggregation_info":{"min":50.6,"max":523.6,"sum":123.6,"avg":31.91,"format":"MiB"}}}]},{"container_image_name":"kruize/tfb-qrh:1.13.2.F_et17","container_name":"tfb-server-1","metrics":[{"name":"cpuRequest","results":{"aggregation_info":{"sum":4.4,"avg":1.1,"format":"cores"}}},{"name":"cpuLimit","results":{"aggregation_info":{"sum":2.0,"avg":0.5,"format":"cores"}}},{"name":"cpuUsage","results":{"aggregation_info":{"min":0.14,"max":0.84,"sum":0.84,"avg":0.12,"format":"cores"}}},{"name":"cpuThrottle","results":{"aggregation_info":{"sum":0.19,"max":0.09,"avg":0.045,"format":"cores"}}},{"name":"memoryRequest","results":{"aggregation_info":{"sum":250.85,"avg":50.21,"format":"MiB"}}},{"name":"memoryLimit","results":{"aggregation_info":{"sum":500,"avg":100,"format":"MiB"}}},{"name":"memoryUsage","results":{"aggregation_info":{"min":50.6,"max":198.5,"sum":198.5,"avg":40.1,"format":"MiB"}}},{"name":"memoryRSS","results":{"aggregation_info":{"min":50.6,"max":123.6,"sum":123.6,"avg":31.91,"format":"MiB"}}}]}]}]} profile_data = {"name":"resource-optimization-openshift","profile_version":1,"k8s_type":"openshift","slo":{"slo_class":"resource_usage","direction":"minimize","objective_function":{"function_type":"expression","expression":"cpuRequest"},"function_variables":[{"name":"cpuRequest","datasource":"prometheus","value_type":"double","kubernetes_object":"container","query":"kube_pod_container_resource_requests{pod=~'$DEPLOYMENT_NAME$-[^-]*-[^-]*$', container='$CONTAINER_NAME$', namespace='$NAMESPACE', resource='cpu', unit='core'}","aggregation_functions":[{"function":"avg","query":"avg(kube_pod_container_resource_requests{pod=~\"$DEPLOYMENT_NAME$-[^-]*-[^-]*$\", container=\"$CONTAINER_NAME$\", namespace=\"$NAMESPACE\", resource=\"cpu\", unit=\"core\"})"}]}]}} return (data, createdata, profile_data) @@ -28,7 +28,7 @@ def updateRecommendation(experiment_name, endDate): else: print( f'{payloadRecommendationURL} Request failed with status code {response.status_code}: {response.text}') - requests.post(createProfileURL, data=profile_json_payload, headers=headers) + #requests.post(createProfileURL, data=profile_json_payload, headers=headers) except requests.exceptions.Timeout: print('updateRecommendation Timeout occurred while connecting to') except requests.exceptions.RequestException as e: @@ -44,13 +44,14 @@ def postResultsInBulk(expName, bulkData): pass else: print(f'Request failed with status code {expName} {response.status_code}: {response.text}') - requests.post(createProfileURL, data=profile_json_payload, headers=headers) + #requests.post(createProfileURL, data=profile_json_payload, headers=headers) except requests.exceptions.Timeout: print('Timeout occurred while connecting to') except requests.exceptions.RequestException as e: print('An error occurred while connecting to', e) if __name__ == "__main__": + debug = False # create an ArgumentParser object parser = argparse.ArgumentParser() @@ -92,22 +93,24 @@ def postResultsInBulk(expName, bulkData): if args.startdate: data['interval_end_time'] = args.startdate - print(createExpURL) - print(updateExpURL) - print(createProfileURL) - print("experiment_name : %s " % (expnameprfix)) - print("Number of experiments to create : %s" % (expcount)) - print("Number of results to create : %s" % (rescount)) - print("startdate : %s" % (data['interval_end_time'])) - print("minutes jump : %s" % (minutesjump)) + if debug: + print(createExpURL) + print(updateExpURL) + print(createProfileURL) + print("experiment_name : %s " % (expnameprfix)) + print("Number of experiments to create : %s" % (expcount)) + print("Number of results to create : %s" % (rescount)) + print("startdate : %s" % (data['interval_end_time'])) + print("minutes jump : %s" % (minutesjump)) #Create a performance profile profile_json_payload = json.dumps(profile_data) response = requests.post(createProfileURL, data=profile_json_payload, headers=headers) if response.status_code == 201: - print('Request successful!') + if debug: print('Request successful!') + if expcount > 10 : time.sleep(5) else: - print(f'Request failed with status code {response.status_code}: {response.text}') + if debug: print(f'Request failed with status code {response.status_code}: {response.text}') #Create experiment and post results start_time = time.time() @@ -118,10 +121,12 @@ def postResultsInBulk(expName, bulkData): createdata['experiment_name'] = experiment_name create_json_payload = json.dumps([createdata]) #Create experiment - requests.post(createProfileURL, data=profile_json_payload, headers=headers) + #requests.post(createProfileURL, data=profile_json_payload, headers=headers) response = requests.post(createExpURL, data=create_json_payload, headers=headers, timeout=timeout) j = 0 - if response.status_code == 201 or response.status_code == 409: + if args.startdate: + data['interval_end_time'] = args.startdate + if response.status_code == 201 or response.status_code == 409 or response.status_code == 400: bulkdata = [] totalResultDates = [] for j in range(rescount): diff --git a/tests/scripts/bulkScalabilityWrapper.sh b/tests/scripts/bulkScalabilityWrapper.sh new file mode 100755 index 000000000..e9629bff5 --- /dev/null +++ b/tests/scripts/bulkScalabilityWrapper.sh @@ -0,0 +1,81 @@ +#!/bin/bash + +# Default values +ip="" +port="" +count="" +minutesjump="" +name_prefix="" +initial_startdate="2023-01-01T00:00:00.000Z" +limit_days="15" +interval_hours="6" + +# Parse command-line arguments +while [[ $# -gt 0 ]]; do + case "$1" in + --ip) + ip="$2" + shift 2 + ;; + --port) + port="$2" + shift 2 + ;; + --count) + count="$2" + shift 2 + ;; + --minutesjump) + minutesjump="$2" + shift 2 + ;; + --name) + name_prefix="$2" + shift 2 + ;; + --initialstartdate) + initial_startdate="$2" + shift 2 + ;; + --limitdays) + limit_days="$2" + shift 2 + ;; + --intervalhours) + interval_hours="$2" + shift 2 + ;; + *) + echo "Unknown option: $1" + exit 1 + ;; + esac +done + +if [[ -z "$ip" || -z "$port" || -z "$count" || -z "$minutesjump" || -z "$name_prefix" ]]; then + echo "Missing required arguments." + echo "Usage: $0 --ip --port --count --minutesjump --name --initialstartdate --limitdays --intervalhours " + exit 1 +fi + +# Calculate the number of iterations based on interval and limit days +iterations=$(( $limit_days * 24 / $interval_hours )) + +# Loop for each iteration +for (( i = 0; i < $iterations; i++ )); do + # Calculate the current start date for this iteration + current_startdate=$(date -u -d "$initial_startdate + $(( i * interval_hours )) hours" +"%Y-%m-%dT%H:%M:%S.%3NZ") + + # Build the full command + full_command="python3 -u bulkScalabilityTest.py --ip $ip --port $port --count $count --minutesjump $minutesjump --startdate $current_startdate --name ${name_prefix}" + + # Execute the command + echo "Executing: $full_command" + eval "$full_command" + + # Wait for the command to complete before moving to the next iteration + wait + + # Sleep for a short duration to avoid flooding the system with too many requests + #sleep 5 +done diff --git a/tests/scripts/remote_monitoring_tests/remote_monitoring_tests.sh b/tests/scripts/remote_monitoring_tests/remote_monitoring_tests.sh index 786e243f3..fad4a7a0e 100755 --- a/tests/scripts/remote_monitoring_tests/remote_monitoring_tests.sh +++ b/tests/scripts/remote_monitoring_tests/remote_monitoring_tests.sh @@ -60,6 +60,7 @@ function remote_monitoring_tests() { # Setup kruize if [ ${skip_setup} -eq 0 ]; then echo "Setting up kruize..." | tee -a ${LOG} + echo "${KRUIZE_SETUP_LOG}" setup "${KRUIZE_POD_LOG}" >> ${KRUIZE_SETUP_LOG} 2>&1 echo "Setting up kruize...Done" | tee -a ${LOG} @@ -117,7 +118,7 @@ function remote_monitoring_tests() { popd > /dev/null passed=$(grep -o -E '[0-9]+ passed' ${TEST_DIR}/report-${test}.html | cut -d' ' -f1) - failed=$(grep -o -E '[0-9]+ failed' ${TEST_DIR}/report-${test}.html | cut -d' ' -f1) + failed=$(grep -o -E 'check the boxes to filter the results.*' ${TEST_DIR}/report-${test}.html | grep -o -E '[0-9]+ failed' | cut -d' ' -f1) errors=$(grep -o -E '[0-9]+ errors' ${TEST_DIR}/report-${test}.html | cut -d' ' -f1) TESTS_PASSED=$(($TESTS_PASSED + $passed)) From 8b7f02cd92af0500cd604925f2201695a1abf184 Mon Sep 17 00:00:00 2001 From: msvinaykumar Date: Tue, 22 Aug 2023 08:10:26 +0530 Subject: [PATCH 12/12] renamed files Signed-off-by: msvinaykumar --- .../{bulkScalabilityTest.py => rosSimulationScalabilityTest.py} | 0 ...lkScalabilityWrapper.sh => rosSimulationScalabilityWrapper.sh} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename tests/scripts/{bulkScalabilityTest.py => rosSimulationScalabilityTest.py} (100%) rename tests/scripts/{bulkScalabilityWrapper.sh => rosSimulationScalabilityWrapper.sh} (100%) diff --git a/tests/scripts/bulkScalabilityTest.py b/tests/scripts/rosSimulationScalabilityTest.py similarity index 100% rename from tests/scripts/bulkScalabilityTest.py rename to tests/scripts/rosSimulationScalabilityTest.py diff --git a/tests/scripts/bulkScalabilityWrapper.sh b/tests/scripts/rosSimulationScalabilityWrapper.sh similarity index 100% rename from tests/scripts/bulkScalabilityWrapper.sh rename to tests/scripts/rosSimulationScalabilityWrapper.sh