diff --git a/.gitignore b/.gitignore index adbb97d..b056a73 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,2 @@ -data/ \ No newline at end of file +data/ +.idea diff --git a/README.md b/README.md index 49db96a..7cc67ad 100644 --- a/README.md +++ b/README.md @@ -28,12 +28,23 @@ Online reporting application to generate reports from JMeter(Taurus), Locust and $ http://IP_ADDRESS:2020 ``` -5. Default credentials +## Migration from v3 to v4 +* Shut down the app but leave DB (postgres) running +* Backup you v3 DB! +* Run following query to modify the DB schema: +``` +ALTER TABLE jtl.items +DROP COLUMN data_id; +``` +* Dump all of your data by running: +``` +docker exec -t pg_dumpall -a -U postgres > backup_v3.sql +``` - ``` - username: admin - password: 2Txnf5prDknTFYTVEXjj - ``` +* Import the data to v4 DB (make sure the DB is up): +``` +docker exec -i psql -U postgres -d jtl_report < backup_v3.sql +``` ## Documentation 📖 For additional information please refer to the [documentation](https://jtlreporter.site/docs/). diff --git a/db/Dockerfile b/db/Dockerfile index 932b89c..0e9278b 100644 --- a/db/Dockerfile +++ b/db/Dockerfile @@ -1,3 +1,3 @@ -FROM postgres:11 +FROM timescale/timescaledb:2.4.1-pg13 ENV POSTGRES_DB jtl_report COPY schema.sql /docker-entrypoint-initdb.d/ \ No newline at end of file diff --git a/db/schema.sql b/db/schema.sql index dd6be58..d0b9d24 100644 --- a/db/schema.sql +++ b/db/schema.sql @@ -1,27 +1,393 @@ -CREATE EXTENSION "uuid-ossp"; +-- +-- PostgreSQL database cluster dump +-- -CREATE SCHEMA IF NOT EXISTS jtl; +SET default_transaction_read_only = off; -CREATE TABLE jtl.projects( - id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), - project_name character varying(50) NOT NULL UNIQUE +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; + + + +SET statement_timeout = 0; +SET lock_timeout = 0; +SET idle_in_transaction_session_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SELECT pg_catalog.set_config('search_path', '', false); +SET check_function_bodies = false; +SET xmloption = content; +SET client_min_messages = warning; +SET row_security = off; + + +ALTER DATABASE jtl_report OWNER TO postgres; + + +CREATE SCHEMA jtl; + +ALTER SCHEMA jtl OWNER TO postgres; + + +CREATE EXTENSION IF NOT EXISTS "uuid-ossp" WITH SCHEMA public; + + + +CREATE TYPE public.data_type AS ENUM ( + 'kpi', + 'error', + 'monitoring_logs' +); + +ALTER TYPE public.data_type OWNER TO postgres; + + + +CREATE TYPE public.item_status AS ENUM ( + '0', + '1', + '2', + '3', + '10' +); + + +ALTER TYPE public.item_status OWNER TO postgres; + + +CREATE TYPE public.report_status AS ENUM ( + 'in_progress', + 'error', + 'ready' +); + +ALTER TYPE public.report_status OWNER TO postgres; + + +SET default_tablespace = ''; + +SET default_with_oids = false; + + + +CREATE TABLE jtl.samples ( + "timestamp" timestamp without time zone NOT NULL, + elapsed integer, + label text, + success boolean, + bytes integer, + sent_bytes integer, + connect integer, + hostname text, + status_code text, + all_threads integer, + grp_threads integer, + latency integer, + response_message text, + item_id uuid, + sut_hostname text +); + +ALTER TABLE jtl.samples OWNER TO postgres; + + + +CREATE TABLE jtl.monitor ( + "timestamp" timestamp without time zone NOT NULL, + cpu numeric, + mem numeric, + name text, + item_id uuid +); + +ALTER TABLE jtl.monitor OWNER TO postgres; + + +CREATE TABLE jtl.api_tokens ( + id uuid DEFAULT public.uuid_generate_v4() NOT NULL, + token character varying(100), + description character varying(200) NOT NULL, + created_by uuid NOT NULL, + create_date timestamp without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL +); + +ALTER TABLE jtl.api_tokens OWNER TO postgres; + + + +CREATE TABLE jtl.charts ( + id uuid DEFAULT public.uuid_generate_v4() NOT NULL, + plot_data jsonb, + item_id uuid ); +ALTER TABLE jtl.charts OWNER TO postgres; + + + +CREATE TABLE jtl.data ( + id uuid DEFAULT public.uuid_generate_v4() NOT NULL, + item_data jsonb NOT NULL, + item_id uuid NOT NULL, + data_type public.data_type +); + +ALTER TABLE jtl.data OWNER TO postgres; + + +CREATE TABLE jtl.item_stat ( + id uuid DEFAULT public.uuid_generate_v4() NOT NULL, + item_id uuid NOT NULL, + stats jsonb NOT NULL, + overview jsonb, + sut jsonb +); + +ALTER TABLE jtl.item_stat OWNER TO postgres; + + + CREATE TABLE jtl.items ( - id uuid DEFAULT uuid_generate_v4() PRIMARY KEY, - test_name character varying(40) NOT NULL, - project_id uuid NOT NULL REFERENCES jtl.projects(id), - jtl_data jsonb NOT NULL, + id uuid DEFAULT public.uuid_generate_v4() NOT NULL, note character varying(150), - environment character varying(20), + environment character varying(100), upload_time timestamp without time zone DEFAULT now(), start_time timestamp without time zone, - duration integer + duration integer, + scenario_id uuid NOT NULL, + base boolean, + status public.item_status DEFAULT '10'::public.item_status, + hostname character varying(200), + report_status public.report_status DEFAULT 'ready'::public.report_status NOT NULL, + threshold_result jsonb, + is_running boolean DEFAULT false ); -CREATE TABLE jtl.item_stat ( - id uuid DEFAULT uuid_generate_v4() PRIMARY KEY, - item_id uuid NOT NULL REFERENCES jtl.items(id), - stats jsonb NOT NULL +ALTER TABLE jtl.items OWNER TO postgres; + + + +CREATE TABLE jtl.notifications ( + id uuid DEFAULT public.uuid_generate_v4() NOT NULL, + name character varying(100) NOT NULL, + url character varying(400) NOT NULL, + scenario_id uuid NOT NULL, + type text NOT NULL ); +ALTER TABLE jtl.notifications OWNER TO postgres; + + +CREATE TABLE jtl.projects ( + id uuid DEFAULT public.uuid_generate_v4() NOT NULL, + project_name character varying(50) NOT NULL +); + +ALTER TABLE jtl.projects OWNER TO postgres; + + + +CREATE TABLE jtl.scenario ( + id uuid DEFAULT public.uuid_generate_v4() NOT NULL, + name character varying(50) NOT NULL, + project_id uuid NOT NULL, + threshold_enabled boolean DEFAULT false NOT NULL, + threshold_error_rate numeric DEFAULT 5 NOT NULL, + threshold_percentile numeric DEFAULT 5 NOT NULL, + threshold_throughput numeric DEFAULT 5 NOT NULL, + analysis_enabled boolean DEFAULT true NOT NULL +); + +ALTER TABLE jtl.scenario OWNER TO postgres; + + + +CREATE TABLE jtl.share_tokens ( + id uuid DEFAULT public.uuid_generate_v4() NOT NULL, + token text NOT NULL, + name character varying(200), + item_id uuid NOT NULL +); + +ALTER TABLE jtl.share_tokens OWNER TO postgres; + + +CREATE TABLE jtl.user_item_chart_settings ( + id uuid DEFAULT public.uuid_generate_v4() NOT NULL, + user_id uuid, + item_id uuid, + chart_settings jsonb NOT NULL +); + +ALTER TABLE jtl.user_item_chart_settings OWNER TO postgres; + + + +CREATE TABLE jtl.users ( + id uuid DEFAULT public.uuid_generate_v4() NOT NULL, + username character varying(100), + password character varying(100), + create_date timestamp without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL +); + +ALTER TABLE jtl.users OWNER TO postgres; + + + +ALTER TABLE ONLY jtl.api_tokens + ADD CONSTRAINT api_tokens_pkey PRIMARY KEY (id); + + + +ALTER TABLE ONLY jtl.api_tokens + ADD CONSTRAINT api_tokens_token_key UNIQUE (token); + + +ALTER TABLE ONLY jtl.charts + ADD CONSTRAINT charts_pkey PRIMARY KEY (id); + + +ALTER TABLE ONLY jtl.item_stat + ADD CONSTRAINT item_stat_pkey PRIMARY KEY (id); + + +ALTER TABLE ONLY jtl.items + ADD CONSTRAINT items_pkey PRIMARY KEY (id); + + +ALTER TABLE ONLY jtl.notifications + ADD CONSTRAINT notifications_pkey PRIMARY KEY (id); + + +ALTER TABLE ONLY jtl.projects + ADD CONSTRAINT projects_pkey PRIMARY KEY (id); + + +ALTER TABLE ONLY jtl.projects + ADD CONSTRAINT projects_project_name_key UNIQUE (project_name); + + +ALTER TABLE ONLY jtl.scenario + ADD CONSTRAINT scenario_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY jtl.share_tokens + ADD CONSTRAINT share_tokens_pkey PRIMARY KEY (id); + + +ALTER TABLE ONLY jtl.user_item_chart_settings + ADD CONSTRAINT user_item_chart_settings_pkey PRIMARY KEY (id); + + +ALTER TABLE ONLY jtl.user_item_chart_settings + ADD CONSTRAINT user_item_chart_settings_user_id_item_id_constraint UNIQUE (user_id, item_id); + + +ALTER TABLE ONLY jtl.users + ADD CONSTRAINT users_pkey PRIMARY KEY (id); + + +ALTER TABLE ONLY jtl.users + ADD CONSTRAINT users_username_key UNIQUE (username); + + + +CREATE INDEX data_item_id_index ON jtl.data USING btree (item_id); + + +CREATE INDEX generator_monitor_timestamp_idx ON jtl.monitor USING btree ("timestamp" DESC); + + + +CREATE INDEX item_stat_item_id_index ON jtl.item_stat USING btree (item_id); + + + +CREATE INDEX items_id_scenario_id_index ON jtl.items USING btree (id, scenario_id); + + +CREATE INDEX projects_id_project_name_index ON jtl.projects USING btree (id, project_name); + + + +CREATE INDEX samples_elapsed_idx ON jtl.samples USING btree (elapsed); + + +CREATE INDEX samples_item_idx ON jtl.samples USING btree (item_id); + + +CREATE INDEX samples_timestamp_idx ON jtl.samples USING btree ("timestamp" DESC); + + +CREATE INDEX scenario_id_project_id_name_index ON jtl.scenario USING btree (id, project_id, name); + + +CREATE INDEX share_tokens_item_id_index ON jtl.share_tokens USING btree (item_id); + + + +CREATE UNIQUE INDEX user_item_chart_settings_user_id_item_id_key ON jtl.user_item_chart_settings USING btree (user_id, item_id); + + + +ALTER TABLE ONLY jtl.api_tokens + ADD CONSTRAINT api_tokens_created_by_fkey FOREIGN KEY (created_by) REFERENCES jtl.users(id) ON DELETE CASCADE; + + + +ALTER TABLE ONLY jtl.charts + ADD CONSTRAINT charts_item_id_fkey FOREIGN KEY (item_id) REFERENCES jtl.items(id) ON DELETE CASCADE; + + + +ALTER TABLE ONLY jtl.data + ADD CONSTRAINT data_item_id_fkey FOREIGN KEY (item_id) REFERENCES jtl.items(id) ON DELETE CASCADE; + + + +ALTER TABLE ONLY jtl.monitor + ADD CONSTRAINT generator_monitor_item_id_fkey FOREIGN KEY (item_id) REFERENCES jtl.items(id) ON DELETE CASCADE; + + +ALTER TABLE ONLY jtl.item_stat + ADD CONSTRAINT item_stat_item_id_fkey FOREIGN KEY (item_id) REFERENCES jtl.items(id) ON DELETE CASCADE; + + + +ALTER TABLE ONLY jtl.items + ADD CONSTRAINT items_scenario_id_fkey FOREIGN KEY (scenario_id) REFERENCES jtl.scenario(id) ON DELETE CASCADE; + + + +ALTER TABLE ONLY jtl.notifications + ADD CONSTRAINT notifications_scenario_id_fkey FOREIGN KEY (scenario_id) REFERENCES jtl.scenario(id); + + + +ALTER TABLE ONLY jtl.samples + ADD CONSTRAINT samples_item_id_fkey FOREIGN KEY (item_id) REFERENCES jtl.items(id) ON DELETE CASCADE; + + + +ALTER TABLE ONLY jtl.scenario + ADD CONSTRAINT scenario_project_id_fkey FOREIGN KEY (project_id) REFERENCES jtl.projects(id) ON DELETE CASCADE; + + + +ALTER TABLE ONLY jtl.share_tokens + ADD CONSTRAINT share_tokens_item_id_fkey FOREIGN KEY (item_id) REFERENCES jtl.items(id) ON DELETE CASCADE; + + +ALTER TABLE ONLY jtl.user_item_chart_settings + ADD CONSTRAINT user_item_chart_settings_item_id_fkey FOREIGN KEY (item_id) REFERENCES jtl.items(id) ON DELETE CASCADE; + + +ALTER TABLE ONLY jtl.user_item_chart_settings + ADD CONSTRAINT user_item_chart_settings_user_id_fkey FOREIGN KEY (user_id) REFERENCES jtl.users(id) ON DELETE CASCADE; + + +select * from pg_extension; + + + +SELECT public.create_hypertable('jtl.samples', 'timestamp'); +SELECT public.create_hypertable('jtl.monitor', 'timestamp'); diff --git a/docker-compose.yml b/docker-compose.yml index 9466b8e..1c8676d 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -2,21 +2,20 @@ version: '2.1' services: fe: - image: novyl/jtl-reporter-fe:v3.4.6 + image: novyl/jtl-reporter-fe:v4.0.0 ports: - "2020:80" depends_on: - db - be - - mongodb db: - image: novyl/jtl-reporter-db + container_name: jtl-reporter-db build: context: ./db/ dockerfile: Dockerfile volumes: - - ./data/jtl_report:/var/lib/postgresql/data + - ./data/jtl_reporter_v4:/var/lib/postgresql/data healthcheck: test: ["CMD-SHELL", "pg_isready -U postgres"] interval: 10s @@ -25,43 +24,31 @@ services: environment: - POSTGRES_HOST_AUTH_METHOD=trust - mongodb: - image: mongo:4.2.5-bionic - container_name: jtl-reporter-mongodb - environment: - - MONGO_INITDB_DATABASE=jtl-data - volumes: - - ./data/jtl_report_mongo:/data/db - - ./mongodb/mongo-init.js:/docker-entrypoint-initdb.d/mongo-init.js:ro - ports: - - "27020:27017" - be: - image: novyl/jtl-reporter-be:v3.4.2 + image: novyl/jtl-reporter-be:v4.0.0 ports: - "5000:5000" environment: - DB_HOST=db - - MONGO_CONNECTION_STRING=mongodb://mongodb:27017 - JWT_TOKEN=27JU4qy73hchTMLoH8w9m # please change this token - JWT_TOKEN_LOGIN=GdK6TrCvX7rJRZJVg4ijt # please change this token, the same must be used for listener service migration: - image: novyl/jtl-reporter-be:v3.4.2 + image: novyl/jtl-reporter-be:v4.0.0 environment: - DATABASE_URL=postgres://postgres@db/jtl_report command: npm run migrate up depends_on: db: condition: service_healthy - + listener: - image: novyl/jtl-reporter-listener-service:v1.0.1 + image: novyl/jtl-reporter-listener-service:v2.0.1 ports: - "6000:6000" - environment: - - MONGO_CONNECTION_STRING=mongodb://mongodb:27017 + environment: + - DB_HOST=db - JWT_TOKEN=GdK6TrCvX7rJRZJVg4ijt # paste the same token as in be service - JWT_TOKEN_LOGIN - - + + diff --git a/mongodb/mongo-init.js b/mongodb/mongo-init.js deleted file mode 100644 index 1e2b5f6..0000000 --- a/mongodb/mongo-init.js +++ /dev/null @@ -1,3 +0,0 @@ -db.createCollection('data-chunks'); -newCol = db.getCollection('data-chunks'); -newCol.createIndex({ dataId: -1 }, { name: "data-id-index" }); \ No newline at end of file diff --git a/scripts/jtl_listener.py b/scripts/jtl_listener.py index b1dda55..c1b35d4 100644 --- a/scripts/jtl_listener.py +++ b/scripts/jtl_listener.py @@ -2,7 +2,7 @@ from time import time, sleep from pathlib import Path from locust.runners import WorkerRunner -import os +import os, sys import requests import socket @@ -40,7 +40,7 @@ def __init__( self.is_worker_runner = isinstance(self.env.runner, WorkerRunner) - self.api_token = os.environ['JTL_API_TOKEN'] + self.api_token = os.environ["JTL_API_TOKEN"] self.project_name = project_name self.scenario_name = scenario_name self.environment = environment @@ -62,21 +62,19 @@ def __init__( "Latency", "IdleTime", "Connect", - "Hostname" + "Hostname", + "failureMessage" ] self.user_count = 0 events = self.env.events - events.request_success.add_listener(self._request_success) - events.request_failure.add_listener(self._request_failure) - - if self.is_worker_runner: + events.request.add_listener(self._request) + if self.is_worker(): events.report_to_master.add_listener(self._report_to_master) else: + events.test_start.add_listener(self._test_start) + events.test_stop.add_listener(self._test_stop) events.worker_report.add_listener(self._worker_report) - events.test_start.add_listener(self._test_start) - events.test_stop.add_listener(self._test_stop) - def _test_start(self, *a, **kw): self._create_results_log() @@ -85,16 +83,14 @@ def _report_to_master(self, client_id, data): self.csv_results = [] def _worker_report(self, client_id, data): - if 'csv' in data: - self.csv_results = data['csv'] - - if self.csv_results and len(self.csv_results) > 0: + self.csv_results += data["csv"] + if len(self.csv_results) >= self.flush_size: self._flush_to_log() def _create_results_log(self): self.filename = "results_" + \ - datetime.fromtimestamp(time()).strftime( - self.results_timestamp_format) + ".csv" + datetime.fromtimestamp(time()).strftime( + self.results_timestamp_format) + ".csv" Path("logs/").mkdir(parents=True, exist_ok=True) results_file = open('logs/' + self.filename, "w") results_file.write(self.field_delimiter.join( @@ -113,7 +109,8 @@ def _flush_to_log(self): def _test_stop(self, *a, environment): sleep(5) # wait for last reports to arrive if self.results_file: - self._flush_to_log() + self.results_file.write(self.row_delimiter.join( + self.csv_results) + self.row_delimiter) if self.project_name and self.scenario_name and self.api_token and self.environment: try: self._upload_file() @@ -127,17 +124,17 @@ def _upload_file(self): status=(None, 1)) url = '%s:5000/api/projects/%s/scenarios/%s/items' % ( self.backend_url, self.project_name, self.scenario_name) - print(files) response = requests.post(url, files=files, headers={ - 'x-access-token': self.api_token}) + 'x-access-token': self.api_token}) if response.status_code != 200: raise Exception("Upload failed: %s" % response.text) - def add_result(self, success, _request_type, name, response_time, response_length, exception, **kw): + def add_result(self, _request_type, name, response_time, response_length, response, context, exception, **kw): timestamp = str(int(round(time() * 1000))) - response_message = "OK" if success == "true" else "KO" + response_message: str = response.reason if "reason" in dir(response) else "" + status_code = response.status_code + success = "false" if exception else "true" # check to see if the additional fields have been populated. If not, set to a default value - status_code = kw["status_code"] if "status_code" in kw else "0" data_type = kw["data_type"] if "data_type" in kw else "unknown" bytes_sent = kw["bytes_sent"] if "bytes_sent" in kw else "0" group_threads = str(self.runner.user_count) @@ -162,7 +159,8 @@ def add_result(self, success, _request_type, name, response_time, response_lengt latency, idle_time, connect, - hostname + hostname, + str(failureMessage) ] # Safe way to generate csv row up to RFC4180 # https://datatracker.ietf.org/doc/html/rfc4180 @@ -170,13 +168,12 @@ def add_result(self, success, _request_type, name, response_time, response_lengt # Example: " -> "" csv_row_str = self.field_delimiter.join(['"' + x.replace('"', '""') + '"' for x in row]) self.csv_results.append(csv_row_str) - if len(self.csv_results) >= self.flush_size and not self.is_worker_runner: + if len(self.csv_results) >= self.flush_size and not self.is_worker(): self._flush_to_log() - def _request_success(self, request_type, name, response_time, response_length, **kw): - self.add_result("true", request_type, name, - response_time, response_length, "", **kw) + def _request(self, request_type, name, response_time, response_length, response, context, exception, **kw): + self.add_result(request_type, name, + response_time, response_length, response, context, exception) - def _request_failure(self, request_type, name, response_time, response_length, exception, **kw): - self.add_result("false", request_type, name, response_time, - response_length, str(exception), **kw) + def is_worker(self): + return "--worker" in sys.argv diff --git a/scripts/jtl_listener_service.py b/scripts/jtl_listener_service.py index 50d99b9..e00fc9e 100644 --- a/scripts/jtl_listener_service.py +++ b/scripts/jtl_listener_service.py @@ -23,7 +23,6 @@ def __init__( hostname: str = socket.gethostname(), timestamp_format="%Y-%m-%d %H:%M:%S", ): - print(env.host) self.env = env self.runner = self.env.runner self.project_name = project_name @@ -41,12 +40,11 @@ def __init__( self.results_timestamp_format = "%Y_%m_%d_%H_%M_%S" self._finished = False self.user_count = 0 + self.cpu_usage = [] self.jwt_token = None events = self.env.events - events.request_success.add_listener(self._request_success) - events.request_failure.add_listener(self._request_failure) - + events.request.add_listener(self._request) events.worker_report.add_listener(self._worker_report) events.report_to_master.add_listener(self._report_to_master) events.test_start.add_listener(self._test_start) @@ -54,18 +52,29 @@ def __init__( def _run(self): while True: - if len(self.results) >= self.flush_size: + if self.item_id and len(self.results) >= self.flush_size: results_to_log = self.results[:self.flush_size] + cpu_usage_to_log = self.cpu_usage[:self.flush_size] del self.results[:self.flush_size] - self._log_results(results_to_log) + del self.cpu_usage[:self.flush_size] + self._log_results(results_to_log, cpu_usage_to_log) else: if self._finished: results_len = len(self.results) - self._log_results(self.results) + cpu_usage_len = len(self.cpu_usage) + self._log_results(self.results, self.cpu_usage) del self.results[:results_len] + del self.cpu_usage[:cpu_usage_len] break gevent.sleep(0.05) + def _master_cpu_monitor(self): + while True: + self.cpu_usage.append({ "name": "master", "cpu": self.get_cpu(), "timestamp": int(round(time() * 1000)) }) + if self._finished: + break + gevent.sleep(5) + def _user_count(self): while True: self.user_count = self.runner.user_count @@ -103,23 +112,24 @@ def _start_test_run(self): return response.json() except Exception: logging.error("Starting async item in Reporter failed") - logging.error(Exception) raise Exception - def _log_results(self, results): + def _log_results(self, results, cpu_usage): try: payload = { - "dataId": self.data_id, - "samples": results + "itemId": self.item_id, + "samples": results, + "monitor": cpu_usage, + } headers = { "x-access-token": self.jwt_token } - requests.post( - f"{self.listener_url}/api/v1/test-run/log-samples", json=payload, headers=headers) + response = requests.post( + f"{self.listener_url}/api/v2/test-run/log-samples", json=payload, headers=headers) except Exception: - logging.error("Unable to to get token") + logging.error("Logging results failed") raise Exception def _stop_test_run(self): @@ -130,25 +140,30 @@ def _stop_test_run(self): response = requests.post( f"{self.be_url}/api/projects/{self.project_name}/scenarios/{self.scenario_name}/items/{self.item_id}/stop-async", headers=headers) - return response.json() except Exception: - logging.error(Exception) + logging.error("Stopping test run has failed") raise Exception def _test_start(self, *a, **kw): - if self._is_master(): - self.jwt_token = self._login() - logging.info("Setting up background tasks") - self._finished = False - self._background = gevent.spawn(self._run) - self._background_user = gevent.spawn(self._user_count) - - response = self._start_test_run() - self.data_id = response["dataId"] - self.item_id = response["itemId"] + if not self.is_worker(): + try: + self.jwt_token = self._login() + response = self._start_test_run() + self.item_id = response["itemId"] + + logging.info("Setting up background tasks") + self._finished = False + self._background = gevent.spawn(self._run) + self._background_master_monitor = gevent.spawn(self._master_cpu_monitor) + self._background_user = gevent.spawn(self._user_count) + logging.info(response) + except Exception: + logging.error("Error while starting the test") + sys.exit(1) def _report_to_master(self, client_id, data): data["results"] = self.results + data["cpu_usage"] = { "name": client_id, "timestamp": int(round(time() * 1000)) , "cpu": self.get_cpu() } self.results = [] def _worker_report(self, client_id, data): @@ -156,25 +171,29 @@ def _worker_report(self, client_id, data): for result in data['results']: result["allThreads"] = self.user_count self.results += data['results'] + if 'cpu_usage' in data: + self.cpu_usage.append(data["cpu_usage"]) def _test_stop(self, *a, **kw): - sleep(5) # wait for last reports to arrive - logging.info( - f"Test is stopping, number of remaining results to be uploaded yet: {len(self.results)}") - self._finished = True - self._background.join(timeout=None) - self._background_user.join(timeout=None) - logging.info(f"Results :::::: {len(self.results)}") - self._stop_test_run() - - def add_result(self, success, _request_type, name, response_time, response_length, exception, **kw): - timestamp = str(int(round(time() * 1000))) - response_message = "OK" if success == "true" else "KO" - status_code = kw["status_code"] if "status_code" in kw else "0" + if not self.is_worker(): + sleep(10) # wait for last reports to arrive + logging.info( + f"Test is stopping, number of remaining results to be uploaded yet: {len(self.results)}") + self._finished = True + self._background.join(timeout=5) + self._background_user.join(timeout=5) + self._background_master_monitor.join(timeout=None) + logging.info(f"Number of results not uploaded {len(self.results)}") + self._stop_test_run() + + def add_result(self, _request_type, name, response_time, response_length, response, context, exception): + timestamp = int(round(time() * 1000)) + response_message: str = response.reason if "reason" in dir(response) else "" + status_code = response.status_code group_threads = str(self.runner.user_count) all_threads = str(self.runner.user_count) - latency = kw["latency"] if "latency" in kw else 0 - connect = kw["connect"] if "connect" in kw else 0 + latency = 0 + connect = 0 result = { "timeStamp": timestamp, @@ -182,23 +201,23 @@ def add_result(self, success, _request_type, name, response_time, response_lengt "label": name, "responseCode": str(status_code), "responseMessage": response_message, - "success": success, - "failureMessage": exception, + "success": "false" if exception else "true", + "failureMessage": str(exception), "bytes": str(response_length), "grpThreads": str(group_threads), "allThreads": str(all_threads), - "Latency": latency, - "Connect": connect, + "latency": latency, + "connect": connect, } self.results.append(result) - def _request_success(self, request_type, name, response_time, response_length, **kw): - self.add_result("true", request_type, name, - response_time, response_length, "", **kw) + def _request(self, request_type, name, response_time, response_length, response, context, exception, **kw): + self.add_result(request_type, name, + response_time, response_length, response, context, exception) + + def is_worker(self): + return "--worker" in sys.argv - def _request_failure(self, request_type, name, response_time, response_length, exception, **kw): - self.add_result("false", request_type, name, response_time, - response_length, str(exception), **kw) + def get_cpu(self): + return self.runner.current_cpu_usage - def _is_master(self): - return "--master" in sys.argv diff --git a/scripts/upload_jtl.py b/scripts/upload_jtl.py index c44cc32..39e2479 100644 --- a/scripts/upload_jtl.py +++ b/scripts/upload_jtl.py @@ -40,7 +40,6 @@ def get_note(note): files = dict( kpi=open(latest_folder + '/kpi.jtl', 'rb'), - errors=open(latest_folder + '/error.jtl', 'rb'), environment=(None, args.environment), status=(None, args.exit_code), note=(None, get_note(args.exit_reason))