forked from tadgh/ArgoRevisit
-
Notifications
You must be signed in to change notification settings - Fork 0
/
ArgoQueries.py
268 lines (192 loc) · 9.72 KB
/
ArgoQueries.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
import logging
import random
import subprocess
import math
import pickle
import json2bulksql
import nobench_gendata
from bench_utils import get_random_data_slice
from Query import Query
from Global import argo_db, psql_db
from Settings import (
FILES_DIR,
ARGO_FILENAME,
ARGO_EXTRA_FILENAME,
ARGO_PICKLE_FILENAME,
DATA_SIZE,
PSQL_USER,
)
__author__ = 'Gary'
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
try:
with open(ARGO_PICKLE_FILENAME, 'rb') as infile:
recommended_strings = pickle.load(infile)
except Exception as e:
log.error("Couldn't find pickle file!! (exception: {0})".format(str(e)))
recommended_strings = []
class PrepFilesArgo(Query):
def __init__(self, filename):
super(PrepFilesArgo, self).__init__("Preparing files for Argo consumption")
self.filename = filename
def db_command(self):
json2bulksql.convertFile(self.filename, 0, True, False)
class Query1Argo(Query):
def __init__(self):
super(Query1Argo, self).__init__("Projection Query 1")
def db_command(self):
return argo_db.execute_sql("SELECT str1, num FROM nobench_main;")
class Query2Argo(Query):
def __init__(self):
super(Query2Argo, self).__init__("Projection Query 2")
def db_command(self):
return argo_db.execute_sql("SELECT nested_obj.str1, nested_obj.num FROM nobench_main;")
class Query3Argo(Query):
def __init__(self):
super(Query3Argo, self).__init__("Projection Query 3")
def db_command(self):
return argo_db.execute_sql("SELECT sparse_110, sparse_119 FROM nobench_main;")
class Query4Argo(Query):
def __init__(self):
super(Query4Argo, self).__init__("Projection Query 4")
def db_command(self):
return argo_db.execute_sql("SELECT sparse_110, sparse_220 FROM nobench_main;")
class Query5Argo(Query):
def __init__(self):
super(Query5Argo, self).__init__("Selection Query 5")
def prepare(self):
seed = random.randint(0, DATA_SIZE - 1)
self.arguments = [nobench_gendata.encode_string(seed)]
def db_command(self):
return argo_db.execute_sql(
'SELECT * FROM nobench_main WHERE str1 = "{}";'.format(self.arguments[0]))
class Query6Argo(Query):
def __init__(self):
super(Query6Argo, self).__init__("Selection Query 6")
def prepare(self):
#Changing the parameters of the query based on the trial size.
self.arguments = get_random_data_slice(DATA_SIZE, 0.001)
def db_command(self):
# return argo_db.execute_sql("SELECT * FROM nobench_main WHERE num BETWEEN 30000 AND 30100;")
return argo_db.execute_sql("SELECT * FROM nobench_main WHERE num >= {} AND num < {};".format(self.arguments[0],
self.arguments[1]))
class Query7Argo(Query):
def __init__(self):
super(Query7Argo, self).__init__("Selection Query 7")
def prepare(self):
#Changing the parameters of the query based on the trial size.
self.arguments = get_random_data_slice(DATA_SIZE, 0.001)
def db_command(self):
return argo_db.execute_sql("SELECT * FROM nobench_main WHERE dyn1 >= {} AND dyn1 < {};".format(self.arguments[0],
self.arguments[1]))
class Query8Argo(Query):
def __init__(self):
super(Query8Argo, self).__init__("Selection Query 8")
def prepare(self):
global recommended_strings
random.seed()
random.shuffle(recommended_strings)
self.arguments.append(recommended_strings[0])
def db_command(self):
#return argo_db.execute_sql('SELECT * FROM nobench_main WHERE "{}" = ANY nested_arr;'.format(self.arguments[0]))
cur = psql_db.cursor()
cur.execute("""SELECT objid FROM argo_nobench_main_str WHERE keystr SIMILAR TO 'nested_arr:[\d]+' AND valstr = %s""", (self.arguments[0],))
return cur
class Query9Argo(Query):
def __init__(self):
super(Query9Argo, self).__init__("Selection Query 9")
def prepare(self):
results = argo_db.execute_sql("SELECT sparse_500 FROM nobench_main")
for index, result in enumerate(results):
if index == 5:
self.arguments.append(result['sparse_500'])
def db_command(self):
return argo_db.execute_sql('SELECT * FROM nobench_main WHERE sparse_500 = "{}";'.format(self.arguments[0]))
class Query10Argo(Query):
def __init__(self):
super(Query10Argo, self).__init__("Aggregation Query 10")
def prepare(self):
#getting 10 percent of data
self.arguments = get_random_data_slice(DATA_SIZE, 0.1)
def db_command(self):
cur = psql_db.cursor()
cur.execute("""DROP TABLE IF EXISTS intermediate;
CREATE TEMP TABLE intermediate AS SELECT objid FROM argo_nobench_main_num WHERE keystr = 'num' and valnum BETWEEN %s AND %s;
SELECT count(*) FROM argo_nobench_main_num WHERE objid in (SELECT objid FROM intermediate) AND keystr = 'thousandth' GROUP BY valnum""", (self.arguments[0], self.arguments[1]))
return cur
class Query11Argo(Query):
def __init__(self):
super(Query11Argo, self).__init__("Join Query 11")
def db_command(self):
return argo_db.execute_sql("""SELECT * FROM nobench_main AS left INNER JOIN
nobench_main AS right ON (left.nested_obj.str =
right.str1) WHERE left.num BETWEEN XXXXX AND YYYYY;""")
class Query12Argo(Query):
def __init__(self):
super(Query12Argo, self).__init__("Data Addition Query 12")
def db_command(self):
PrepFilesArgo(ARGO_EXTRA_FILENAME).execute()
bool_copy_cmd = "COPY argo_nobench_main_bool(objid, keystr, valbool) FROM '{0}' WITH DELIMITER '|';".format(
FILES_DIR + 'nobench_data_argo_extra_bool.txt')
load_bool = subprocess.Popen(["psql", "-w", "-U", PSQL_USER, "-d", "argo", "-c", bool_copy_cmd],
stdout=subprocess.PIPE)
num_copy_cmd = "COPY argo_nobench_main_num(objid, keystr, valnum) FROM '{0}' WITH DELIMITER '|';".format(
FILES_DIR + 'nobench_data_argo_extra_num.txt')
load_num = subprocess.Popen(["psql", "-w", "-U", PSQL_USER, "-d", "argo", "-c", num_copy_cmd],
stdout=subprocess.PIPE)
str_copy_cmd = "COPY argo_nobench_main_str(objid, keystr, valstr) FROM '{0}' WITH DELIMITER '|';".format(
FILES_DIR + 'nobench_data_argo_extra_str.txt')
load_str = subprocess.Popen(["psql", "-w", "-U", PSQL_USER, "-d", "argo", "-c", str_copy_cmd],
stdout=subprocess.PIPE)
load_bool.communicate()
load_num.communicate()
load_str.communicate()
class Query13Argo(Query):
def __init__(self):
super(Query13Argo, self).__init__("Deep Select Query 13")
def prepare(self):
seed = random.randint(0, DATA_SIZE - 1)
self.arguments = [nobench_gendata.encode_string(seed)]
def db_command(self):
return argo_db.execute_sql('SELECT * FROM nobench_main WHERE deep_nested_obj.level_2.level_3.level_4.level_5.level_6.level_7.level_8.deep_str_single = "{}"'.format(self.arguments[0]))
class Query14Argo(Query):
def __init__(self):
super(Query14Argo, self).__init__("Deep Select Query 14")
def prepare(self):
seed = random.randint(0, 9)
self.arguments = [nobench_gendata.encode_string(seed)]
def db_command(self):
return argo_db.execute_sql("""SELECT deep_nested_obj.level_2.level_3.level_4.level_5.level_6.level_7.level_8.deep_str_agg
FROM nobench_main
WHERE deep_nested_obj.level_2.level_3.level_4.level_5.level_6.level_7.level_8.deep_str_agg = "{}";""".format(self.arguments[0]))
class DropCollectionArgo(Query):
def __init__(self):
super(DropCollectionArgo, self).__init__("Dropping Data from Argo")
def db_command(self):
return argo_db.execute_sql("DELETE FROM nobench_main")
class InitialLoadArgo(Query):
def __init__(self):
super(InitialLoadArgo, self).__init__("Loading Initial Data into Argo")
def db_command(self):
print "Starting..."
PrepFilesArgo(ARGO_FILENAME).execute()
bool_copy_cmd = "COPY argo_nobench_main_bool(objid, keystr, valbool) FROM '{0}' WITH DELIMITER '|';".format(
FILES_DIR + 'nobench_data_argo_bool.txt')
load_bool = subprocess.Popen(["psql", "-w", "-U", PSQL_USER, "-d", "argo", "-c", bool_copy_cmd],
stdout=subprocess.PIPE)
num_copy_cmd = "COPY argo_nobench_main_num(objid, keystr, valnum) FROM '{0}' WITH DELIMITER '|';".format(
FILES_DIR + 'nobench_data_argo_num.txt')
load_num = subprocess.Popen(["psql", "-w", "-U", PSQL_USER, "-d", "argo", "-c", num_copy_cmd],
stdout=subprocess.PIPE)
str_copy_cmd = "COPY argo_nobench_main_str(objid, keystr, valstr) FROM '{0}' WITH DELIMITER '|';".format(
FILES_DIR + 'nobench_data_argo_str.txt')
load_str = subprocess.Popen(["psql", "-w", "-U", PSQL_USER, "-d", "argo", "-c", str_copy_cmd],
stdout=subprocess.PIPE)
load_bool.communicate()
load_num.communicate()
load_str.communicate()
def generate_data_argo(items):
global recommended_strings
recommended_strings = nobench_gendata.main_non_cli(items, False, ARGO_FILENAME)
with open(ARGO_PICKLE_FILENAME, 'wb') as outfile:
pickle.dump(recommended_strings, outfile)