Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add k6 #21

Merged
merged 2 commits into from Jul 13, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
3 changes: 3 additions & 0 deletions .editorconfig
Expand Up @@ -16,3 +16,6 @@ trim_trailing_whitespace = true
max_line_length = off
trim_trailing_whitespace = false

[{*.js,*.j2}]
indent_style = space
indent_size = 2
7 changes: 5 additions & 2 deletions .github/workflows/import.yaml
Expand Up @@ -70,5 +70,8 @@ jobs:

- name: import the data
run: |
python3 run.py nebula importer

python3 run.py nebula importer

- name: run stress testing
run: |
python3 run.py stress run -d 3
3 changes: 2 additions & 1 deletion .gitignore
Expand Up @@ -24,4 +24,5 @@ mysql/data
.pytest_cache
.env
nebula-bench.db
.vscode
.vscode
output
34 changes: 33 additions & 1 deletion README.md
Expand Up @@ -95,7 +95,39 @@ python3 run.py nebula importer --dry-run

### nebula benchmark

Work in progress.
Use [k6](https://github.com/k6io/k6) with [xk6-nebula](https://github.com/HarrisChu/xk6-nebula) extension.
Be careful, the default `k6` in scripts folder is built in Linux, if you want to
run the tool in Mac OS, please download by youself. [xk6-nebula](https://github.com/HarrisChu/xk6-nebula/tags)

Scenarios are in `nebula_bench/scenarios/`.

```bash
# show help
python3 run.py stress run --help

# run all scenarios with 100 virtual users, every scenario lasts 60 seconds.
python3 run.py stress run

# run all scenarios with 10 virtual users, every scenario lasts 3 seconds.
python3 run.py stress run -vu 10 -d 3

# run go.Go1Step scenarios with 10 virtual users, every scenario lasts 3 seconds.
python3 run.py stress run -vu 10 -d 3 -s go.Go1Step
```

k6 config file, summary result and outputs are in `output` folder. e.g.

```bash
# you should install jq to parse json.
# how many checks
jq .metrics.checks output/result_Go1Step.json

# summary latency
jq .metrics.latency output/result_Go1Step.json

# summary error message
awk -F ',' 'NR>1{print $NF}' output/output_Go1Step.csv |sort|uniq -c
```

## and more

Expand Down
35 changes: 34 additions & 1 deletion README_cn.md
Expand Up @@ -88,7 +88,40 @@ python3 run.py nebula importer --dry-run

### nebula benchmark

进行中,当前可以通过手动调整 Jmeter 来测试,具体参考 [jmx](ldbc/jmx/go_step.jmx) 和 [java](util/LdbcGoStep/src/main/java/vesoft/LdbcGoStep.java)。
使用带有 [xk6-nebula](https://github.com/HarrisChu/xk6-nebula) 插件的 [K6](https://github.com/k6io/k6) 来进行压测。
需要注意,默认的 `k6` 是 linux 下编译的,如果需要在 Mac OS 上使用,请自行下载对应的二进制文件。[xk6-nebula](https://github.com/HarrisChu/xk6-nebula/tags)

自动化的场景,在 `nebula_bench/scenarios/` 中。

```bash
# show help
python3 run.py stress run --help

# run all scenarios with 100 virtual users, every scenario lasts 60 seconds.
python3 run.py stress run

# run all scenarios with 10 virtual users, every scenario lasts 3 seconds.
python3 run.py stress run -vu 10 -d 3

# run go.Go1Step scenarios with 10 virtual users, every scenario lasts 3 seconds.
python3 run.py stress run -vu 10 -d 3 -s go.Go1Step
```

k6 config file, summary result and outputs are in `output` folder. e.g.

```bash
# you should install jq to parse json.
# how many checks
jq .metrics.checks output/result_Go1Step.json

# summary latency
jq .metrics.latency output/result_Go1Step.json

# summary error message
awk -F ',' 'NR>1{print $NF}' output/output_Go1Step.csv |sort|uniq -c
```

如果使用 Jmeter,暂时没有自动化操作,可以通过手动调整 Jmeter 来测试,具体参考 [jmx](ldbc/jmx/go_step.jmx) 和 [java](util/LdbcGoStep/src/main/java/vesoft/LdbcGoStep.java)。

## 更多

Expand Down
73 changes: 58 additions & 15 deletions nebula_bench/cli.py
Expand Up @@ -5,6 +5,8 @@
from nebula_bench.utils import logger
from nebula_bench.controller import NebulaController
from nebula_bench.utils import run_process
from nebula_bench.stress import StressFactory


SH_COMMAND = "/bin/bash"

Expand Down Expand Up @@ -102,21 +104,62 @@ def importer(folder, address, user, password, space, vid_type, dry_run):
nc.release()


# @nebula.command(help="initial nebula graph, including create indexes")
# @common
# def init(folder, address, user, password, space):
# nc = NebulaController(
# data_folder=folder,
# user=user,
# password=password,
# address=address,
# space=space,
# vid_type="int",
# )
@nebula.command(help="initial nebula graph, including create indexes")
@common
def init(folder, address, user, password, space):
nc = NebulaController(
data_folder=folder,
user=user,
password=password,
address=address,
space=space,
vid_type="int",
)

nc.init_space()


# nc.init_space()
@cli.group()
def stress():
pass


# @cli.group()
# def stress():
# pass
@stress.command()
@common
@click.option(
"-t",
"--vid-type",
default="int",
help="space vid type, values should be [int, string], default: int",
)
@click.option("-vu", default=100, help="concurrent virtual users, default: 100")
@click.option(
"-d", "--duration", default=60, help="duration for every scenario, unit: second, default: 60"
)
@click.option("-s", "--scenarioes", default="all", help="special scenarioes, e.g. go.Go1Step")
@click.option("-c", "--controller", default="k6", help="using which test tool")
@click.option(
"--dry-run",
default=False,
is_flag=True,
help="Dry run, just dump stress testing config file, default: False",
)
def run(
folder, address, user, password, space, vid_type, scenarioes, controller, vu, duration, dry_run
):
stress = StressFactory.gen_stress(
_type=controller,
folder=folder,
address=address,
user=user,
password=password,
space=space,
vid_type=vid_type,
scenarios=scenarioes,
vu=vu,
duration=duration,
dry_run=dry_run,
)
stress.run()

pass
81 changes: 7 additions & 74 deletions nebula_bench/common/base.py
@@ -1,7 +1,6 @@
# -*- encoding: utf-8 -*-
import re
import time
import gevent
from collections import deque
import csv
from pathlib import Path
Expand Down Expand Up @@ -138,85 +137,19 @@ def __new__(cls, name, bases, attrs, *args, **kwargs):
# super(ScenarioMeta, cls).__new__(cls, name, bases, attrs, *args, **kwargs)
if name == "BaseScenario":
return type.__new__(cls, name, bases, attrs)
report_name = attrs.get("report_name")
result_file_name = attrs.get("result_file_name")
if result_file_name is None:
result_file_name = "_".join(report_name.split(" "))
attrs["result_file_name"] = result_file_name
statement = attrs.get("statement")
parameters = attrs.get("parameters") or ()
latency_warning_us = attrs.get("latency_warning_us")
_generator = StmtGenerator(statement, parameters, setting.DATA_FOLDER)
flag = False

attrs["generator"] = _generator
attrs["client"] = NebulaClient()

def my_task(self):
nonlocal flag

stmt = next(_generator)

# sleep for first request
if not flag:
logger.info("first stmt is {}".format(stmt))
gevent.sleep(3)
flag = True

cur_time = time.monotonic()
r = self.client.execute(stmt)
total_time = time.monotonic() - cur_time
assert isinstance(r, ResultSet)
# warning the latency for slow statement.
if latency_warning_us is not None:
if r.latency() > latency_warning_us:
logger.warning("the statement [{}] latency is {} us".format(stmt, r.latency()))
if r.is_succeeded():
self.environment.events.request_success.fire(
request_type="Nebula",
name=report_name,
response_time=total_time * 1000,
response_length=0,
)
else:
logger.error(
"the statement [{}] is not succeeded, error message is {}".format(
stmt, r.error_msg()
)
)
self.environment.events.request_failure.fire(
request_type="Nebula",
name=report_name,
response_time=total_time * 1000,
response_length=0,
exception=Exception(r.error_msg()),
)

attrs["tasks"] = [my_task]
if attrs.get("name", None) is None:
attrs["name"] = name

return type.__new__(cls, name, bases, attrs)


class BaseScenario(metaclass=ScenarioMeta):
abstract = True
report_name: str
result_file_name: str
statement: str
parameters = ()

def __init__(self, environment):
from locust.user.users import UserMeta

self.environment = environment

def on_start(self):
self.client.add_session()

def on_stop(self):
self.client.release_session()


query = namedtuple("query", ["name", "stmt"])
nGQL: str
stage: dict
csv_path: str
csv_index: list
name: str


class BaseQuery(object):
Expand Down
9 changes: 9 additions & 0 deletions nebula_bench/scenarios/find_path.py
@@ -0,0 +1,9 @@
# -*- encoding: utf-8 -*-
from nebula_bench.common.base import BaseScenario


class FindShortestPath(BaseScenario):
abstract = False
nGQL = "FIND SHORTEST PATH FROM {} TO {} OVER *"
csv_path = "social_network/dynamic/person_knows_person.csv"
csv_index = [0, 1]
23 changes: 23 additions & 0 deletions nebula_bench/scenarios/go.py
@@ -0,0 +1,23 @@
# -*- encoding: utf-8 -*-
from nebula_bench.common.base import BaseScenario


class BaseGoScenario(BaseScenario):
abstract = True
nGQL = "GO 1 STEP FROM {} OVER KNOWS"
csv_path = "social_network/dynamic/person.csv"
csv_index = [0]


class Go1Step(BaseGoScenario):
abstract = False
nGQL = "GO 1 STEP FROM {} OVER KNOWS"


class Go2Step(BaseGoScenario):
abstract = False
nGQL = "GO 2 STEP FROM {} OVER KNOWS"

class Go3Step(BaseGoScenario):
abstract = False
nGQL = "GO 3 STEP FROM {} OVER KNOWS"