-
Notifications
You must be signed in to change notification settings - Fork 1
/
synthetic_run.py
136 lines (112 loc) · 5.47 KB
/
synthetic_run.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
### Import Packages
import sys
import json
import os
import datetime
import time
import logging
import subprocess
import requests
import uuid
import re
from tqdm import tqdm
from slugify import slugify
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
### Configure Request Adapter
retry_strategy = Retry(total=0)
adapter = HTTPAdapter(max_retries=retry_strategy, pool_connections=1, pool_maxsize=1)
http = requests.Session()
http.mount("http://", adapter)
### Generate Benchmark Run UID
uid = datetime.datetime.now().strftime("%m-%d-%H:%M:%S")
### Declare Constants
NUM_REQUESTS = int(sys.argv[1])
REQUEST_DELAY = int(sys.argv[2]) # in ms
test = sys.argv[4]
filename = "data/"
if sys.argv[3] == "pypy":
filename += "python-" + test + ".csv"
else:
filename += "java-" + test + ".csv"
BENCHMARKS = sys.argv[5:]
STRATEGIES = [
"cold",
"fixed&request_to_checkpoint=1",
"request_centric&max_capacity=12"
]
RATES = [20, 4, 1]
### Configure Logging Handlers
log_directory = "logs"
if not os.path.exists(log_directory):
os.makedirs(log_directory)
data_directory = "data"
if not os.path.exists(data_directory):
os.makedirs(data_directory)
logger = logging.getLogger()
if sys.argv[3] == "pypy":
logging.basicConfig(filename="logs/python-" + test + ".log", format='%(asctime)s %(filename)s: %(message)s', filemode='a+')
else:
logging.basicConfig(filename="logs/java-" + test + ".log", format='%(asctime)s %(filename)s: %(message)s', filemode='a+')
logger.setLevel(logging.DEBUG)
def check_namespace_pods():
namespace = "openfaas-fn"
cmd = f"kubectl get pods -n {namespace} --no-headers | wc -l"
result = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
return int(result.stdout.strip())
user="pronghornae"
with open(filename, "a") as output_file:
for benchmark in BENCHMARKS:
for strategy in STRATEGIES:
for rate in RATES:
logger.info("Deploying %s function", benchmark)
deploy_cmd = f"faas-cli deploy --image={user}/{benchmark} --name={benchmark} --env=ENV={strategy},true,{rate}"
deploy_proc = subprocess.run(deploy_cmd.split(" "), capture_output=True)
logger.debug("Deploy command response: %s", deploy_proc.stdout.decode("UTF-8"))
time.sleep(5)
logger.info("Executing strategy: %s for benchmark %s with rate %s and mutability %s", strategy, benchmark, rate, "1")
nums = re.compile(r"\d+ ms")
url = f"http://127.0.0.1:8080/function/{benchmark}?mutability=1"
for index, request in tqdm(enumerate(range(NUM_REQUESTS))):
for retry in range(3):
retries = 0
try:
start_time = datetime.datetime.now()
response = http.get(url)
end_time = datetime.datetime.now()
search = nums.search(response.text)
if search is None: # PyPy benchmark
body = json.loads(response.text)
server_side = body.get('server_time')
overhead = body.get('client_overhead', 0)
else: # Java benchmark
server_side = int(search.group(0).split(" ")[0])
overhead = 0
client_side = (end_time - start_time) / datetime.timedelta(microseconds=1)
logger.debug("%s %s %s", server_side, overhead, client_side)
output_file.write(f"{index+ 1},{benchmark},1,{strategy},{rate},{client_side},{server_side},{overhead}\n")
time.sleep(REQUEST_DELAY/1000)
except:
retries += 1
time.sleep(min(retries ** 2, 10))
else:
break
output_file.flush()
logger.info("Completed strategy: %s for benchmark %s with mutability %s", strategy, benchmark, "1")
clean_cmd = f"faas-cli remove {benchmark}"
clean_proc = subprocess.run(clean_cmd.split(" "), capture_output=True)
logger.debug("Clean command response: %s", clean_proc.stdout.decode("UTF-8"))
# Update the delete and redeploy commands
delete_cmd = f"kubectl delete -f {os.path.expanduser('~/pronghorn-artifact/database/pod.yaml')}"
delete_proc = subprocess.run(delete_cmd.split(" "), capture_output=True)
logger.debug("Delete command response: %s", delete_proc.stdout.decode("UTF-8"))
redeploy_cmd = f"kubectl apply -f {os.path.expanduser('~/pronghorn-artifact/database/pod.yaml')}"
redeploy_proc = subprocess.run(redeploy_cmd.split(" "), capture_output=True)
logger.debug("Redeploy command response: %s", redeploy_proc.stdout.decode("UTF-8"))
minio_cleanup_cmd = f"mc rb myminio/checkpoints --force"
minio_cleanup_proc = subprocess.run(minio_cleanup_cmd.split(" "), capture_output=True)
logger.debug("MinIO cleanup command response: %s", minio_cleanup_proc.stdout.decode("UTF-8"))
# Check if there are pods in the openfaas-fn namespace
while check_namespace_pods() > 0:
print("Waiting for pods to terminate...")
time.sleep(10)