Skip to content

Commit

Permalink
cleanup common lib
Browse files Browse the repository at this point in the history
  • Loading branch information
gurevichdmitry committed Mar 26, 2024
1 parent a9d116e commit 792a2ea
Show file tree
Hide file tree
Showing 3 changed files with 0 additions and 353 deletions.
97 changes: 0 additions & 97 deletions tests/commonlib/io_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -217,103 +217,6 @@ def in_place_copy(source, destination):
for line in sfile:
dfile.write(line)

@staticmethod
def edit_process_file(container_name: str, dictionary, resource: str):
"""
This function edits a process file
@param container_name: Container node
@param dictionary: Process parameters to set/unset
@param resource: File / Resource path
@return: None
"""
if container_name == "":
raise ValueError("Unknown container name is sent")

current_resource = Path(resource)
if not current_resource.is_file():
raise FileNotFoundError(f"File {resource} does not exist or mount missing.")

# Open and load the YAML into variable
with current_resource.open(encoding="utf-8") as file:
r_file = yaml.safe_load(file)

# Get process configuration arguments
arguments = r_file["spec"]["containers"][0]["command"]

# Collect set/unset keys and values from the dictionary
set_dict = dictionary.get("set", {})
unset_list = dictionary.get("unset", [])

# Cycle across set items from the dictionary
for s_key, s_value in set_dict.items():
# Find if set key exists already in the configuration arguments
if any(s_key == x.split("=")[0] for x in arguments):
# Replace the value of the key with the new value from the set items
arguments = [f"{s_key}={s_value}" if arg.split("=")[0] == s_key else arg for arg in arguments]
else:
# In case of non-existing key in the configuration arguments,
# append the key/value from set items
arguments.append(f"{s_key}={s_value}")

# Cycle across unset items from the dictionary
for us_key in unset_list:
# Filter out the unset keys from the configuration arguments
arguments = [x for x in arguments if us_key != x.split("=")[0]]

# Override the configuration arguments with the newly built configuration arguments
r_file["spec"]["containers"][0]["command"] = arguments

# Write the newly built configuration arguments
with current_resource.open(mode="w", encoding="utf-8") as file:
yaml.dump(r_file, file)

@staticmethod
def edit_config_file(container_name: str, dictionary, resource: str):
"""
This function edits a config file
@param container_name: Container node
@param dictionary: Config parameters to set/unset
@param resource: Config path
@return: None
"""
if container_name == "":
raise ValueError("Unknown container name is sent")

current_resource = Path(resource)
if not current_resource.is_file():
raise FileExistsError(f"File {resource} does not exist or mount missing.")

# Open and load the YAML into variable
with current_resource.open(encoding="utf-8") as file:
r_file = yaml.safe_load(file)

# Collect set/unset keys and values from the dictionary
set_dict = dictionary.get("set", {})
unset_list = dictionary.get("unset", [])

# Merge two dictionaries with priority for the set items
r_file = {**r_file, **set_dict}

# Cycle across unset items from the dictionary
for us_key in unset_list:
# Parsed dot separated key values
keys = us_key.split(".")
key_to_del = keys.pop()
r_dict = r_file

# Advance inside the dictionary for nested keys
for key in keys:
r_dict = r_dict.get(key, None)
if r_dict is None:
# Non-existing nested key
break
# Remove nested keys when all path exists
if r_dict:
del r_dict[key_to_del]
# Write the newly built config
with current_resource.open(mode="w", encoding="utf-8") as file:
yaml.dump(r_file, file)

@staticmethod
def get_beat_status_from_json(response: str, beat_name: str) -> str:
"""
Expand Down
74 changes: 0 additions & 74 deletions tests/commonlib/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,34 +130,6 @@ def get_logs_evaluation(
return None


def dict_contains(small, big):
"""
Checks if the small dict like object is contained inside the big object
@param small: dict like object
@param big: dict like object
@return: true iff the small dict like object is contained inside the big object
"""
if isinstance(small, dict):
if not set(small.keys()) <= set(big.keys()):
return False
for key in small.keys():
if not dict_contains(small.get(key), big.get(key)):
return False
return True

return small == big


def get_resource_identifier(body):
def resource_identifier(resource):
if getattr(resource, "to_dict", None):
return dict_contains(body, resource.to_dict())
if getattr(resource, "__dict__", None):
return dict_contains(body, dict(resource))

return resource_identifier


def wait_for_cycle_completion(elastic_client, nodes: list) -> bool:
"""
Wait for all agents to finish sending findings to ES.
Expand Down Expand Up @@ -213,52 +185,6 @@ def is_timeout(start_time: time, timeout: int) -> bool:
return time.time() - start_time > timeout


def command_contains_arguments(command, arguments_dict):
args = command.split()[1:]
args_dict = {}
for arg in args:
key, val = arg.split("=", 1)
args_dict[key] = val

set_dict = arguments_dict.get("set", {})
unset_list = arguments_dict.get("unset", [])

for key, val in set_dict.items():
arg_val = args_dict.get(key)
if val != arg_val:
return False

for key in unset_list:
if key in args_dict:
return False

return True


def config_contains_arguments(config, arguments_dict):
set_dict = arguments_dict.get("set", {})
unset_list = arguments_dict.get("unset", [])

if not dict_contains(set_dict, config):
return False

for arg in unset_list:
current = config
arg_set = True

for arg_part in arg.split("."):
if (not isinstance(current, dict)) or (arg_part not in current):
arg_set = False
break

current = current[arg_part]

if arg_set:
return False

return True


def get_findings(elastic_client, config_timeout, query, sort, match_type):
"""
Retrieves data from an Elasticsearch index using the specified query and sort parameters.
Expand Down
182 changes: 0 additions & 182 deletions tests/product/tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,191 +3,9 @@
product tests.
"""

from pathlib import Path
import time
import json
import pytest
from loguru import logger
from kubernetes.client import ApiException
from kubernetes.utils import FailToCreateError
from commonlib.io_utils import get_k8s_yaml_objects

from product.tests.parameters import TEST_PARAMETERS


DEPLOY_YML = "../../test_environments/cloudbeat-pytest.yml"
KUBE_RULES_ENV_YML = "../../test_environments/mock-pod.yml"
POD_RESOURCE_TYPE = "Pod"


@pytest.fixture(scope="module", name="cloudbeat_start_stop")
def data(k8s, api_client, cloudbeat_agent):
"""
This fixture starts cloudbeat, in case cloudbeat exists
restart will be performed
@param k8s: Kubernetes wrapper object
@param api_client: Docker or FileSystem client
@param cloudbeat_agent: Cloudbeat configuration
@return:
"""
file_path = Path(__file__).parent / DEPLOY_YML
if k8s.get_agent_pod_instances(
agent_name=cloudbeat_agent.name,
namespace=cloudbeat_agent.namespace,
):
k8s.delete_from_yaml(get_k8s_yaml_objects(file_path=file_path))
k8s.start_agent(yaml_file=file_path, namespace=cloudbeat_agent.namespace)
time.sleep(5)
yield k8s, api_client, cloudbeat_agent
k8s_yaml_list = get_k8s_yaml_objects(file_path=file_path)
k8s.delete_from_yaml(yaml_objects_list=k8s_yaml_list) # stop agent


@pytest.fixture(scope="module", name="config_node_pre_test")
def config_node_pre_test(cloudbeat_start_stop):
"""
This fixture performs extra operations required in
file system rules tests.
Before test execution creates temporary files
After test execution delete files created in Before section
@param cloudbeat_start_stop: Cloudbeat fixture execution
@return: Kubernetes object, Api client, Cloudbeat configuration
"""
k8s_client, api_client, cloudbeat_agent = cloudbeat_start_stop

nodes = k8s_client.get_cluster_nodes()

temp_file_list = [
"/var/lib/etcd/some_file.txt",
"/etc/kubernetes/pki/some_file.txt",
"/etc/kubernetes/pki/some_dir/some_file.txt",
]

config_files = {
"/etc/kubernetes/pki/admission_config.yaml": """apiVersion: apiserver.config.k8s.io/v1
kind: AdmissionConfiguration
plugins:
- name: EventRateLimit
path: /etc/kubernetes/pki/event_config.yaml""",
"/etc/kubernetes/pki/event_config.yaml": """apiVersion: eventratelimit.admission.k8s.io/v1alpha1
kind: Configuration
limits:
- type: Namespace
qps: 50
burst: 100
cacheSize: 2000
- type: User
qps: 10
burst: 50""",
}

# create temporary files:
for node in nodes:
if node.metadata.name != cloudbeat_agent.node_name:
continue
for temp_file in temp_file_list:
api_client.exec_command(
container_name=node.metadata.name,
command="mkdir",
param_value=str(Path(temp_file).parent),
resource="",
)
api_client.exec_command(
container_name=node.metadata.name,
command="touch",
param_value=temp_file,
resource="",
)

# create config files:
for config_file, contents in config_files.items():
api_client.exec_command(
container_name=node.metadata.name,
command="cat",
param_value=contents,
resource=config_file,
)

yield k8s_client, api_client, cloudbeat_agent

# delete temporary files:
for node in nodes:
if node.metadata.name != cloudbeat_agent.node_name:
continue
for temp_file in temp_file_list:
api_client.exec_command(
container_name=node.metadata.name,
command="unlink",
param_value=temp_file,
resource="",
)


@pytest.fixture(scope="module", name="clean_test_env")
def clean_test_env(cloudbeat_start_stop):
"""
Sets up a testing env with needed kube resources
"""
k8s_client, api_client, cloudbeat_agent = cloudbeat_start_stop

file_path = Path(__file__).parent / KUBE_RULES_ENV_YML
k8s_resources = get_k8s_yaml_objects(file_path=file_path)

for yml_resource in k8s_resources:
# check if we already have one - delete if so
resource_type, metadata = yml_resource["kind"], yml_resource["metadata"]
relevant_metadata = {k: metadata[k] for k in ("name", "namespace") if k in metadata}
try:
# try getting the resource before deleting it - will raise exception if not found
k8s_client.get_resource(resource_type=resource_type, **relevant_metadata)
k8s_client.delete_resources(resource_type=resource_type, **relevant_metadata)
k8s_client.wait_for_resource(
resource_type=resource_type,
status_list=["DELETED"],
**relevant_metadata,
)
except ApiException as not_found:
logger.error(
f"no {relevant_metadata['name']} online - setting up a new one: {not_found}",
)
# create resource

k8s_client.create_from_dict(data=yml_resource, **relevant_metadata)

yield k8s_client, api_client, cloudbeat_agent
# teardown
k8s_client.delete_from_yaml(yaml_objects_list=k8s_resources)


@pytest.fixture(scope="module", name="test_env")
def test_env(cloudbeat_start_stop):
"""
Sets up a testing env with needed kube resources
"""
k8s, api_client, cloudbeat_agent = cloudbeat_start_stop

file_path = Path(__file__).parent / KUBE_RULES_ENV_YML
k8s_resources = get_k8s_yaml_objects(file_path=file_path)

try:
k8s.create_from_yaml(yaml_file=file_path, namespace=cloudbeat_agent.namespace)
except FailToCreateError as conflict:
logger.error([json.loads(c.body)["message"] for c in conflict.api_exceptions])

for yml_resource in k8s_resources:
resource_type, metadata = yml_resource["kind"], yml_resource["metadata"]
relevant_metadata = {k: metadata[k] for k in ("name", "namespace") if k in metadata}
k8s.wait_for_resource(
resource_type=resource_type,
status_list=["RUNNING", "ADDED"],
**relevant_metadata,
)

yield k8s, api_client, cloudbeat_agent
# teardown
k8s.delete_from_yaml(yaml_objects_list=k8s_resources) # stop agent


def pytest_generate_tests(metafunc):
"""
This function generates the test cases to run using the set of
Expand Down

0 comments on commit 792a2ea

Please sign in to comment.