Skip to content
50 changes: 44 additions & 6 deletions kcidev/libs/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,54 +17,92 @@ def load_toml(settings, subcommand):
fname = "kci-dev.toml"
config = None

logging.debug(f"Loading config for subcommand: {subcommand}")
logging.debug(f"Settings path: {settings}")

if os.path.exists(settings):
if os.path.isfile(settings):
with open(settings, "rb") as f:
config = tomllib.load(f)
logging.info(f"Loading config from: {settings}")
try:
with open(settings, "rb") as f:
config = tomllib.load(f)
logging.debug(f"Successfully loaded config from {settings}")
except Exception as e:
logging.error(f"Failed to parse TOML file {settings}: {e}")
kci_err(f"Failed to parse config file: {e}")
raise click.Abort()
else:
logging.error(f"Settings path is not a file: {settings}")
kci_err("The --settings location is not a kci-dev config file")
raise click.Abort()
return config

home_dir = os.path.expanduser("~")
user_path = os.path.join(home_dir, ".config", "kci-dev", fname)
logging.debug(f"Checking user config path: {user_path}")
if os.path.exists(user_path):
with open(user_path, "rb") as f:
config = tomllib.load(f)
logging.info(f"Loading config from user directory: {user_path}")
try:
with open(user_path, "rb") as f:
config = tomllib.load(f)
logging.debug("Successfully loaded user config")
except Exception as e:
logging.error(f"Failed to parse user config {user_path}: {e}")
kci_err(f"Failed to parse config file: {e}")
raise click.Abort()
return config

global_path = os.path.join("/", "etc", fname)
logging.debug(f"Checking global config path: {global_path}")
if os.path.exists(global_path):
with open(global_path, "rb") as f:
config = tomllib.load(f)
logging.info(f"Loading config from global directory: {global_path}")
try:
with open(global_path, "rb") as f:
config = tomllib.load(f)
logging.debug("Successfully loaded global config")
except Exception as e:
logging.error(f"Failed to parse global config {global_path}: {e}")
kci_err(f"Failed to parse config file: {e}")
raise click.Abort()
return config

# config and results subcommand work without a config file
if subcommand != "config" and subcommand != "results":
if not config:
logging.warning(f"No config file found for subcommand {subcommand}")
kci_err(
f"No config file found, please use `kci-dev config` to create a config file"
)
raise click.Abort()
else:
logging.debug(f"No config file required for {subcommand} subcommand")

return config


def config_path(settings):
fname = "kci-dev.toml"

logging.debug(f"Looking for config file, settings: {settings}")

if os.path.exists(settings):
logging.debug(f"Found config at settings path: {settings}")
return settings

home_dir = os.path.expanduser("~")
user_path = os.path.join(home_dir, ".config", "kci-dev", fname)
if os.path.exists(user_path):
logging.debug(f"Found config at user path: {user_path}")
return user_path

global_path = os.path.join("/", "etc", fname)
if os.path.exists(global_path):
logging.debug(f"Found config at global path: {global_path}")
return global_path

logging.debug("No config file found in any location")
return None


def kci_info(content):
logging.info(content)
Expand Down
52 changes: 52 additions & 0 deletions kcidev/libs/dashboard.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import json
import logging
import urllib
from datetime import datetime, timedelta
from functools import wraps
Expand All @@ -20,32 +21,54 @@ def wrapper(endpoint, params, use_json, body=None, max_retries=3):
# Status codes that should trigger a retry
RETRY_STATUS_CODES = [429, 500, 502, 503, 504, 507]

logging.info(f"Dashboard API request: {func.__name__} to {endpoint}")
logging.debug(f"Full URL: {url}")
if body:
logging.debug(f"Request body: {json.dumps(body, indent=2)}")

while retries <= max_retries:
try:
logging.debug(f"Attempt {retries + 1}/{max_retries + 1} for {endpoint}")
r = func(url, params, use_json, body)

logging.debug(f"Response status code: {r.status_code}")

if r.status_code in RETRY_STATUS_CODES:
retries += 1
if retries <= max_retries:
logging.warning(
f"Retrying request due to status {r.status_code} (attempt {retries}/{max_retries})"
)
continue
else:
logging.error(
f"Failed after {max_retries} retries with status {r.status_code}"
)
kci_err(f"Failed after {max_retries} retries with 500 error.")
raise click.Abort()

r.raise_for_status()

data = r.json()
logging.debug(f"Response data size: {len(json.dumps(data))} bytes")

if "error" in data:
logging.error(f"API returned error: {data.get('error')}")
if use_json:
kci_msg(data)
else:
kci_msg("json error: " + str(data["error"]))
raise click.Abort()

logging.info(f"Successfully completed {func.__name__} request")
return data

except requests.exceptions.RequestException as e:
logging.error(f"Request exception for {endpoint}: {str(e)}")
kci_err(f"Failed to fetch from {DASHBOARD_API}: {str(e)}.")
raise click.Abort()

logging.error("Unexpected failure in API request - exhausted all attempts")
kci_err("Unexpected failure in API request")
raise click.Abort()

Expand All @@ -71,6 +94,9 @@ def dashboard_fetch_summary(origin, giturl, branch, commit, arch, use_json):
}
if arch is not None:
params["filter_architecture"] = arch

logging.info(f"Fetching summary for commit {commit} on {branch} branch")
logging.debug(f"Parameters: origin={origin}, git_url={giturl}, arch={arch}")
return dashboard_api_fetch(endpoint, params, use_json)


Expand All @@ -91,6 +117,11 @@ def dashboard_fetch_builds(
params["filter_start_date"] = start_date
if end_date is not None:
params["filter_end_date"] = end_date

logging.info(f"Fetching builds for commit {commit} on {branch} branch")
logging.debug(
f"Filters: arch={arch}, tree={tree}, start_date={start_date}, end_date={end_date}"
)
return dashboard_api_fetch(endpoint, params, use_json)


Expand All @@ -111,6 +142,11 @@ def dashboard_fetch_boots(
params["filter_start_date"] = start_date
if end_date is not None:
params["filter_end_date"] = end_date

logging.info(f"Fetching boots for commit {commit} on {branch} branch")
logging.debug(
f"Filters: arch={arch}, tree={tree}, start_date={start_date}, end_date={end_date}"
)
return dashboard_api_fetch(endpoint, params, use_json)


Expand All @@ -131,23 +167,31 @@ def dashboard_fetch_tests(
params["filter_start_date"] = start_date
if end_date is not None:
params["filter_end_date"] = end_date

logging.info(f"Fetching tests for commit {commit} on {branch} branch")
logging.debug(
f"Filters: arch={arch}, tree={tree}, start_date={start_date}, end_date={end_date}"
)
return dashboard_api_fetch(endpoint, params, use_json)


def dashboard_fetch_test(test_id, use_json):
endpoint = f"test/{test_id}"
logging.info(f"Fetching test details for test ID: {test_id}")
return dashboard_api_fetch(endpoint, {}, use_json)


def dashboard_fetch_build(build_id, use_json):
endpoint = f"build/{build_id}"
logging.info(f"Fetching build details for build ID: {build_id}")
return dashboard_api_fetch(endpoint, {}, use_json)


def dashboard_fetch_tree_list(origin, use_json):
params = {
"origin": origin,
}
logging.info(f"Fetching tree list for origin: {origin}")
return dashboard_api_fetch("tree-fast", params, use_json)


Expand All @@ -160,6 +204,10 @@ def dashboard_fetch_hardware_list(origin, use_json):
"endTimeStampInSeconds": int(now.timestamp()),
"startTimestampInSeconds": int(last_week.timestamp()),
}
logging.info(f"Fetching hardware list for origin: {origin}")
logging.debug(
f"Date range: {last_week.strftime('%Y-%m-%d')} to {now.strftime('%Y-%m-%d')}"
)
return dashboard_api_fetch("hardware/", params, use_json)


Expand All @@ -179,27 +227,31 @@ def _create_hardware_request_body(origin):
def dashboard_fetch_hardware_summary(name, origin, use_json):
# TODO: add extra filters: Commits, date, filter, origin
body = _create_hardware_request_body(origin)
logging.info(f"Fetching hardware summary for: {name} (origin: {origin})")
return dashboard_api_post(
f"hardware/{urllib.parse.quote_plus(name)}/summary", {}, use_json, body
)


def dashboard_fetch_hardware_boots(name, origin, use_json):
body = _create_hardware_request_body(origin)
logging.info(f"Fetching hardware boots for: {name} (origin: {origin})")
return dashboard_api_post(
f"hardware/{urllib.parse.quote_plus(name)}/boots", {}, use_json, body
)


def dashboard_fetch_hardware_builds(name, origin, use_json):
body = _create_hardware_request_body(origin)
logging.info(f"Fetching hardware builds for: {name} (origin: {origin})")
return dashboard_api_post(
f"hardware/{urllib.parse.quote_plus(name)}/builds", {}, use_json, body
)


def dashboard_fetch_hardware_tests(name, origin, use_json):
body = _create_hardware_request_body(origin)
logging.info(f"Fetching hardware tests for: {name} (origin: {origin})")
return dashboard_api_post(
f"hardware/{urllib.parse.quote_plus(name)}/tests", {}, use_json, body
)
36 changes: 32 additions & 4 deletions kcidev/libs/files.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import gzip
import logging
import os
import re

Expand All @@ -10,17 +11,44 @@


def to_valid_filename(filename):
return INVALID_FILE_CHARS.sub("", filename)
cleaned = INVALID_FILE_CHARS.sub("", filename)
if cleaned != filename:
logging.debug(f"Cleaned filename: '{filename}' -> '{cleaned}'")
return cleaned


def download_logs_to_file(log_url, log_file):
logging.info(f"Downloading log from: {log_url}")
logging.debug(f"Target file: {log_file}")
try:
log_gz = requests.get(log_url)
log = gzip.decompress(log_gz.content)
# Download compressed log
logging.debug("Fetching compressed log file")
response = requests.get(log_url)
response.raise_for_status()

# Decompress log
logging.debug(f"Downloaded {len(response.content)} bytes, decompressing")
log = gzip.decompress(response.content)
logging.debug(f"Decompressed to {len(log)} bytes")

# Save to file
log_file = to_valid_filename(log_file)
logging.debug(f"Writing log to: {log_file}")
with open(log_file, mode="wb") as file:
file.write(log)

log_path = "file://" + os.path.join(os.getcwd(), log_file)
logging.info(f"Log saved successfully: {log_path}")
return log_path
except:
except requests.exceptions.RequestException as e:
logging.error(f"Failed to download log from {log_url}: {e}")
kci_err(f"Failed to fetch log {log_url}.")
except gzip.BadGzipFile as e:
logging.error(f"Failed to decompress log from {log_url}: {e}")
kci_err(f"Failed to decompress log {log_url}.")
except OSError as e:
logging.error(f"Failed to write log file {log_file}: {e}")
kci_err(f"Failed to write log file {log_file}.")
except Exception as e:
logging.error(f"Unexpected error downloading log from {log_url}: {e}")
kci_err(f"Failed to fetch log {log_url}.")
Loading