Skip to content
Permalink

Comparing changes

Choose two branches to see what’s changed or to start a new pull request. If you need to, you can also or learn more about diff comparisons.

Open a pull request

Create a new pull request by comparing changes across two branches. If you need to, you can also . Learn more about diff comparisons here.
base repository: nebula-contrib/NebulaGraph-Bench
Failed to load repositories. Confirm that selected base ref is valid, then try again.
Loading
base: master
Choose a base ref
...
head repository: asu-idi/nebula-bench
Failed to load repositories. Confirm that selected head ref is valid, then try again.
Loading
compare: merge_hao
Choose a head ref
Can’t automatically merge. Don’t worry, you can still create the pull request.
Loading
8 changes: 7 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -31,4 +31,10 @@ scripts/k6
scripts/nebula-importer

dist
build
build

go/
go1.16.6.linux-amd64.tar.gz
gopath/
research_output.txt

14 changes: 7 additions & 7 deletions env
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#DATA_FOLDER=target/data/test_data
#NEBULA_SPACE=
#NEBULA_USER=root
#NEBULA_PASSWORD=nebula
#NEBULA_ADDRESS=127.0.0.1:9669
#NEBULA_MAX_CONNECTION=100
#INFLUXDB_URL=http://192.168.8.60:8086/k6
DATA_FOLDER=target/data/test_data
NEBULA_SPACE=mytest
NEBULA_USER=root
NEBULA_PASSWORD=nebula
NEBULA_ADDRESS=127.0.0.1:9669
NEBULA_MAX_CONNECTION=100
#INFLUXDB_URL=http://127.0.0.1:8086/k6
18 changes: 18 additions & 0 deletions nebula_bench/scenarios/fetch.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
# -*- encoding: utf-8 -*-
from nebula_bench.common.base import BaseScenario


class BaseFetchScenario(BaseScenario):
abstract = True
nGQL = "fetch prop on Person {} yield properties(vertex)"
csv_path = "social_network/dynamic/person.csv"
csv_index = [0]


class FetchOwn(BaseFetchScenario):
abstract = False
nGQL = "fetch prop on Person {} yield properties(vertex)"

class Fetch1Step(BaseFetchScenario):
abstract = False
nGQL = "GO 1 STEP FROM {} OVER KNOWS YIELD dst(edge) AS d | fetch prop on Person $-.d yield properties(vertex).birthday"
2 changes: 1 addition & 1 deletion nebula_bench/scenarios/find_path.py
Original file line number Diff line number Diff line change
@@ -4,6 +4,6 @@

class FindShortestPath(BaseScenario):
abstract = False
nGQL = "FIND SHORTEST PATH FROM {} TO {} OVER *"
nGQL = "FIND SHORTEST PATH FROM {} TO {} OVER * YIELD path AS p"
csv_path = "social_network/dynamic/person_knows_person.csv"
csv_index = [0, 1]
13 changes: 9 additions & 4 deletions nebula_bench/scenarios/go.py
Original file line number Diff line number Diff line change
@@ -4,21 +4,26 @@

class BaseGoScenario(BaseScenario):
abstract = True
nGQL = "GO 1 STEP FROM {} OVER KNOWS"
nGQL = "GO 1 STEP FROM {} OVER KNOWS YIELD $$.Person.firstName"
csv_path = "social_network/dynamic/person.csv"
csv_index = [0]


class Go1Step(BaseGoScenario):
abstract = False
nGQL = "GO 1 STEP FROM {} OVER KNOWS"
nGQL = "GO 1 STEP FROM {} OVER KNOWS YIELD $$.Person.firstName"


class Go2Step(BaseGoScenario):
abstract = False
nGQL = "GO 2 STEP FROM {} OVER KNOWS"
nGQL = "GO 2 STEP FROM {} OVER KNOWS YIELD $$.Person.firstName"


class Go3Step(BaseGoScenario):
abstract = False
nGQL = "GO 3 STEP FROM {} OVER KNOWS"
nGQL = "GO 3 STEP FROM {} OVER KNOWS YIELD $$.Person.firstName"


class GoEdge(BaseGoScenario):
abstract = False
nGQL = "GO 1 step FROM {} over KNOWS yield properties(edge)"
24 changes: 24 additions & 0 deletions nebula_bench/scenarios/go_notag.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
# -*- encoding: utf-8 -*-
from nebula_bench.common.base import BaseScenario


class BaseGoScenario(BaseScenario):
abstract = True
nGQL = "GO 1 STEP FROM {} OVER KNOWS YIELD properties($$).firstName"
csv_path = "social_network/dynamic/person.csv"
csv_index = [0]


class Go1Step_NoTag(BaseGoScenario):
abstract = False
nGQL = "GO 1 STEP FROM {} OVER KNOWS YIELD properties($$).firstName"


class Go2Step_NoTag(BaseGoScenario):
abstract = False
nGQL = "GO 2 STEP FROM {} OVER KNOWS YIELD properties($$).firstName"


class Go3Step_NoTag(BaseGoScenario):
abstract = False
nGQL = "GO 3 STEP FROM {} OVER KNOWS YIELD properties($$).firstName"
19 changes: 19 additions & 0 deletions nebula_bench/scenarios/match.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
# -*- encoding: utf-8 -*-
from nebula_bench.common.base import BaseScenario


class BaseMatchScenario(BaseScenario):
abstract = True
nGQL = "MATCH ()<-[e]-() RETURN e LIMIT 300;"
csv_path = "social_network/dynamic/person_knows_person.csv"
csv_index = [0]


class MatchAllEdge(BaseMatchScenario):
abstract = False
nGQL = "MATCH ()<-[e]-() RETURN e LIMIT 300;"


class MatchVertex(BaseMatchScenario):
abstract = False
nGQL = "MATCH (v) WHERE id(v) == {} RETURN v;"
120 changes: 120 additions & 0 deletions research_fetch.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
import json
import os
import time
import operator

config_file = '/usr/local/nebula/etc/nebula-storaged.conf'
rocksdb_block_cache_prefix = '--rocksdb_block_cache='
enable_storage_cache_prefix = '--enable_storage_cache='
storage_cache_capacity_prefix = '--storage_cache_capacity='
enable_vertex_pool_prefix = '--enable_vertex_pool='
vertex_pool_capacity_prefix = '--vertex_pool_capacity='
empty_key_pool_capacity_prefix = '--empty_key_pool_capacity='

result_output = "research_output.txt"

fetch1Step_output = "output/result_Fetch1Step.json"
result_file = open(result_output, mode='w', encoding='utf-8')

query_times = 0

def init():
os.system('ulimit -n 130000')

def clear_memory():
os.system('sync')
time.sleep(2)
os.system('sudo sh -c "echo 1 > /proc/sys/vm/drop_caches"')

os.system('sync')
time.sleep(2)
os.system('sudo sh -c "echo 2 > /proc/sys/vm/drop_caches"')

os.system('sync')
time.sleep(2)
os.system('sudo sh -c "echo 3 > /proc/sys/vm/drop_caches"')


def start_bench():
os.system('/usr/local/nebula/scripts/nebula.service start all')
time.sleep(5)
os.system('python3 run.py stress run -scenario fetch.Fetch1Step --args=\'-u 100 -d 1m\'')
time.sleep(10)


def read_output_file(output_file):
global query_times
with open(output_file, 'r') as load_f:
result = json.load(load_f)
metricMap = result['metrics']['latency']
metricMap = dict(sorted(metricMap.items(), key=operator.itemgetter(0)))
result_file.write("latency: " + str(metricMap) + "\n")
checkMap = result['metrics']['checks']
query_times = int(result['metrics']['checks']['passes'])
checkMap = dict(sorted(checkMap.items(), key=operator.itemgetter(0)))
result_file.write("check: " + str(checkMap) + "\n")
result_file.flush()


def change_config(rocksdb_block_cache, storage_cache_capacity, vertex_pool_capacity,empty_key_pool_capacity):
os.system('/usr/local/nebula/scripts/nebula.service stop all')
time.sleep(5)
clear_memory()
time.sleep(5)
file = open(config_file, mode='r', encoding='utf-8')
content = file.read()
file.close()
arr = content.split("\n")
file = open(config_file, mode='w', encoding='utf-8')
for index in range(len(arr)):
if arr[index].startswith(rocksdb_block_cache_prefix):
arr[index] = rocksdb_block_cache_prefix + str(rocksdb_block_cache)
elif arr[index].startswith(storage_cache_capacity_prefix):
arr[index] = storage_cache_capacity_prefix + str(storage_cache_capacity)
elif arr[index].startswith(vertex_pool_capacity_prefix):
arr[index] = vertex_pool_capacity_prefix + str(vertex_pool_capacity)
elif arr[index].startswith(enable_vertex_pool_prefix):
if vertex_pool_capacity == 0:
arr[index] = enable_vertex_pool_prefix + "false"
else:
arr[index] = enable_vertex_pool_prefix + "true"

elif arr[index].startswith(enable_storage_cache_prefix):
if vertex_pool_capacity == 0:
arr[index] = enable_storage_cache_prefix + "false"
else:
arr[index] = enable_storage_cache_prefix + "true"
elif arr[index].startswith(empty_key_pool_capacity_prefix):
arr[index] = empty_key_pool_capacity_prefix + str(empty_key_pool_capacity)
if index != len(arr) - 1:
file.write(arr[index] + "\n")
else:
file.write(arr[index])
file.close()
time.sleep(5)


if __name__ == '__main__':
init()
slice_num = 1
while slice_num <= 16:
mem_total = slice_num * 256
block_cache = mem_total
while block_cache >= 0:
vertex_pool = int((mem_total - block_cache)*0.7)
empty_pool = int((mem_total - block_cache)*0.3)
storage_cache = int((vertex_pool + empty_pool) * 1.3)
change_config(block_cache, storage_cache, vertex_pool, empty_pool)
result_file.write(str(block_cache) + " " + str(storage_cache) + " " + str(vertex_pool) + "\n")

time_start = time.time()
start_bench()
time_end = time.time()

read_output_file(fetch1Step_output)
qps = query_times / (time_end - time_start)
result_file.write("qps: " + str(qps) + "\n\n")
result_file.flush()
block_cache -= int(mem_total / 8)
slice_num += 1
result_file.close()
122 changes: 122 additions & 0 deletions research_fetchOwn.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,122 @@
import json
import os
import time
import operator

config_file = '/usr/local/nebula/etc/nebula-storaged.conf'
rocksdb_block_cache_prefix = '--rocksdb_block_cache='
enable_storage_cache_prefix = '--enable_storage_cache='
storage_cache_capacity_prefix = '--storage_cache_capacity='
enable_vertex_pool_prefix = '--enable_vertex_pool='
vertex_pool_capacity_prefix = '--vertex_pool_capacity='
empty_key_pool_capacity_prefix = '--empty_key_pool_capacity='

result_output = "research_output.txt"

fetchOwn_output = "output/result_FetchOwn.json"
result_file = open(result_output, mode='w', encoding='utf-8')

query_times = 0


def init():
os.system('ulimit -n 130000')


def clear_memory():
os.system('sync')
time.sleep(2)
os.system('sudo sh -c "echo 1 > /proc/sys/vm/drop_caches"')

os.system('sync')
time.sleep(2)
os.system('sudo sh -c "echo 2 > /proc/sys/vm/drop_caches"')

os.system('sync')
time.sleep(2)
os.system('sudo sh -c "echo 3 > /proc/sys/vm/drop_caches"')


def start_bench():
os.system('/usr/local/nebula/scripts/nebula.service start all')
time.sleep(5)
os.system('python3 run.py stress run -scenario fetch.FetchOwn --args=\'-u 100 -d 1m\'')
time.sleep(10)


def read_output_file(output_file):
global query_times
with open(output_file, 'r') as load_f:
result = json.load(load_f)
metricMap = result['metrics']['latency']
metricMap = dict(sorted(metricMap.items(), key=operator.itemgetter(0)))
result_file.write("latency: " + str(metricMap) + "\n")
checkMap = result['metrics']['checks']
query_times = int(result['metrics']['checks']['passes'])
checkMap = dict(sorted(checkMap.items(), key=operator.itemgetter(0)))
result_file.write("check: " + str(checkMap) + "\n")
result_file.flush()


def change_config(rocksdb_block_cache, storage_cache_capacity, vertex_pool_capacity, empty_key_pool_capacity):
os.system('/usr/local/nebula/scripts/nebula.service stop all')
time.sleep(5)
clear_memory()
time.sleep(5)
file = open(config_file, mode='r', encoding='utf-8')
content = file.read()
file.close()
arr = content.split("\n")
file = open(config_file, mode='w', encoding='utf-8')
for index in range(len(arr)):
if arr[index].startswith(rocksdb_block_cache_prefix):
arr[index] = rocksdb_block_cache_prefix + str(rocksdb_block_cache)
elif arr[index].startswith(storage_cache_capacity_prefix):
arr[index] = storage_cache_capacity_prefix + str(storage_cache_capacity)
elif arr[index].startswith(vertex_pool_capacity_prefix):
arr[index] = vertex_pool_capacity_prefix + str(vertex_pool_capacity)
elif arr[index].startswith(enable_vertex_pool_prefix):
if vertex_pool_capacity == 0:
arr[index] = enable_vertex_pool_prefix + "false"
else:
arr[index] = enable_vertex_pool_prefix + "true"

elif arr[index].startswith(enable_storage_cache_prefix):
if vertex_pool_capacity == 0:
arr[index] = enable_storage_cache_prefix + "false"
else:
arr[index] = enable_storage_cache_prefix + "true"
elif arr[index].startswith(empty_key_pool_capacity_prefix):
arr[index] = empty_key_pool_capacity_prefix + str(empty_key_pool_capacity)
if index != len(arr) - 1:
file.write(arr[index] + "\n")
else:
file.write(arr[index])
file.close()
time.sleep(5)


if __name__ == '__main__':
init()
slice_num = 1
while slice_num <= 16:
mem_total = slice_num * 256
block_cache = mem_total
while block_cache >= 0:
vertex_pool = int((mem_total - block_cache) * 0.7)
empty_pool = int((mem_total - block_cache) * 0.3)
storage_cache = int((vertex_pool + empty_pool) * 1.3)
change_config(block_cache, storage_cache, vertex_pool, empty_pool)
result_file.write(str(block_cache) + " " + str(storage_cache) + " " + str(vertex_pool) + "\n")

time_start = time.time()
start_bench()
time_end = time.time()

read_output_file(fetchOwn_output)
qps = query_times / (time_end - time_start)
result_file.write("qps: " + str(qps) + "\n\n")
result_file.flush()
block_cache -= int(mem_total / 8)
slice_num += 1
result_file.close()
Loading