Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[changelog/Added] Make it possible to add custom validation commands #124

Merged
merged 3 commits into from Jan 30, 2020
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
56 changes: 55 additions & 1 deletion apigentools/commands/command.py
Expand Up @@ -3,11 +3,15 @@
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
import abc
import glob
import logging
import os

import chevron

from apigentools.utils import run_command
from apigentools.utils import run_command, volumes_from

log = logging.getLogger(__name__)


class Command(abc.ABC):
Expand Down Expand Up @@ -63,6 +67,56 @@ def setup_git_config(self, cwd=None):
cwd=cwd,
)

def _render_command_args(self, args, chevron_vars):
""" Recursively renders all args, including list items and dict values """
retval = args

if isinstance(args, str):
retval = chevron.render(args, chevron_vars)
elif isinstance(args, list):
retval = []
for i in args:
retval.append(self._render_command_args(i, chevron_vars))
elif isinstance(args, dict):
retval = {}
for k, v in args.items():
retval[k] = self._render_command_args(v, chevron_vars)

return retval

def run_config_command(
self, command, what_command, additional_env=None, chevron_vars=None
):
log.info("Running command '%s'", command.description)

if chevron_vars is None:
chevron_vars = {}
chevron_vars["cwd"] = os.getcwd()

to_run = []
for part in self._render_command_args(command.commandline, chevron_vars):
if isinstance(part, dict):
allowed_functions = {"glob": glob.glob, "volumes_from": volumes_from}
function_name = part.get("function")
function = allowed_functions.get(function_name)
if function:
result = function(*part.get("args", []), **part.get("kwargs", {}))
# NOTE: we may need to improve this logic if/when we add more functions
if isinstance(result, list):
to_run.extend(result)
else:
to_run.append(result)
else:
raise ValueError(
"Unknow function '{f}' in command '{d}' for '{l}'".format(
f=function_name, d=command.description, l=what_command
)
)
else:
to_run.append(str(part))

run_command(to_run, additional_env=additional_env)

@abc.abstractmethod
def run(self):
pass
29 changes: 3 additions & 26 deletions apigentools/commands/generate.py
Expand Up @@ -52,32 +52,9 @@ def run_language_commands(self, language, phase, cwd):
log.info("No '%s' commands found for language '%s'", phase, language)

for command in commands:
log.info("Running command '%s'", command.description)
to_run = []
for part in command.commandline:
if isinstance(part, dict):
allowed_functions = {"glob": glob.glob}
function_name = part.get("function")
function = allowed_functions.get(function_name)
if function:
result = function(
*part.get("args", []), **part.get("kwargs", {})
)
# NOTE: we may need to improve this logic if/when we add more functions
if isinstance(result, list):
to_run.extend(result)
else:
to_run.append(result)
else:
raise ValueError(
"Unknow function '{f}' in command '{d}' for language '{l}'".format(
f=function_name, d=command.description, l=language
)
)
else:
to_run.append(str(part))

run_command(to_run, additional_env=lc.command_env)
self.run_config_command(
command, "language '{l}'".format(l=language), lc.command_env
)

def render_downstream_templates(self, language, downstream_templates_dir):
""" Render the templates included in this repository under `downstream-templates/`
Expand Down
16 changes: 14 additions & 2 deletions apigentools/commands/validate.py
Expand Up @@ -20,16 +20,28 @@ def validate_spec(self, fs_path, language, version):
log_string += " ({})".format(fs_path)
try:
run_command([self.config.codegen_exec, "validate", "-i", fs_path])
self.run_validation_commands(fs_path)
log.info("Validation %s for API version %s successful", log_string, version)
return True
except:
log.error(
except Exception as e:
log_method = log.error
if self.args.verbose:
log_method = log.exception
log_method(
"Validation %s for API version %s failed, see the output above for errors",
log_string,
version,
)
return False

def run_validation_commands(self, spec_path):
vcs = self.config.get_validation_commands()
if vcs:
log.info("Running custom validation commands")

for cmd in vcs:
self.run_config_command(cmd, "validation", chevron_vars={"spec": spec_path})

def run(self):
cmd_result = 0
languages = self.args.languages or self.config.languages
Expand Down
11 changes: 9 additions & 2 deletions apigentools/config.py
Expand Up @@ -17,6 +17,7 @@ def __init__(self, raw_dict):
"spec_versions": [],
"generate_extra_args": [],
"user_agent_client_name": "OpenAPI",
"validation_commands": [],
}
self.language_configs = {}
for lang, conf in raw_dict.get("languages", {}).items():
Expand All @@ -33,6 +34,12 @@ def __getattr__(self, attr):
def get_language_config(self, lang):
return self.language_configs[lang]

def get_validation_commands(self):
cmd_objects = []
for cmd in self.validation_commands:
cmd_objects.append(ConfigCommand("validation", cmd))
return cmd_objects

@classmethod
def from_file(cls, fpath):
with open(fpath) as f:
Expand Down Expand Up @@ -70,7 +77,7 @@ def get_stage_commands(self, stage):
cmds = self.raw_dict.get("commands", {}).get(stage, [])
cmd_objects = []
for cmd in cmds:
cmd_objects.append(LanguageCommand(stage, cmd))
cmd_objects.append(ConfigCommand(stage, cmd))
return cmd_objects

@property
Expand Down Expand Up @@ -111,7 +118,7 @@ def spec_sections(self):
return res


class LanguageCommand:
class ConfigCommand:
def __init__(self, stage, config):
self.stage = stage
self.config = config
Expand Down
25 changes: 25 additions & 0 deletions apigentools/utils.py
Expand Up @@ -414,3 +414,28 @@ def validate_duplicates(loaded_keys, full_spec_keys):
for key in loaded_keys:
if key in full_spec_keys:
raise ValueError("Duplicate field {} found in spec. Exiting".format(key))


def volumes_from(alt_volumes):
retval = []
is_image_run = env_or_val("APIGENTOOLS_IMAGE", None)
if is_image_run:
if os.path.exists("/proc/self/cgroup"):
print("exists")
with open("/proc/self/cgroup") as f:
for line in f.readlines():
print(line)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Are those two debug logs ?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, forgot to remove these... good catch :)

if ":/docker/" in line:
container_id = line.split(":/docker/")
retval.append("--volumes-from")
retval.append(container_id[-1].strip())
break
if not retval:
log.warning(
"APIGENTOOLS_IMAGE is set, but docker container ID not found in /proc/self/cgroup"
)
if not retval:
for av in alt_volumes:
retval.append("-v")
retval.append(av)
return retval
31 changes: 27 additions & 4 deletions docs/spec_repo.md
Expand Up @@ -107,12 +107,13 @@ The structure of the general config file is as follows, starting with top level

* `codegen_exec` - Name of the executable of the code generating tool.
* `container_apigentools_image` - Container image to use by the `container-apigentools` script by default.
* `generate_extra_args` - Additional arguments to pass to the `openapi-generator` call.
* `languages` - Settings for individual languages; contains a mapping of language names to their settings.
* individual language settings:
* `commands` - Commands to execute before/after code generation; commands are executed in two *phases* - `pre` (executed before code generation) or `post` (executed after code generation). Note that each command is run once for each of the langauge's `spec_versions`, and inside the directory with code for that spec version.
* *phase* commands - Each phase can contain a list of commands to be executed; each command is represented as a map:
* `description` - A description of the command.
* `commandline` - The command itself; the items in the list are strings and, potentially, *functions* that represent callbacks to the Python code. Each function is represented as a map:
* `commandline` - The command itself; the items in the list are templatable strings and, potentially, *functions* that represent callbacks to the Python code. Each function is represented as a map:
* `function` - Name of the function (see below for list of recognized functions).
* `args` - List of args to pass to the function in Python code (as in `*args`).
* `kwargs` - Mapping of args to pass to the function in Python code (as in `**kwargs`).
Expand All @@ -129,13 +130,35 @@ The structure of the general config file is as follows, starting with top level
* `spec_sections` - Mapping of major spec versions (these must be in `spec_versions`) to lists of files with paths/tags/components definitions to merge when creating full spec. Files not explicitly listed here are ignored.
* `spec_versions` - List of major versions currently known and operated on. These must be subdirectories of the `spec` directory.
* `user_agent_client_name` - The HTTP User Agent string will be set to `{user_agent_client_name}/{package_version}/{language}`.
* `generate_extra_args` - Additional arguments to pass to the `openapi-generator` call.
* `validation_commands` - List of commands to run during validation phase. The commands have the same structure and mechanics like language commands (see above). Validation commands will be executed for each full spec that is created.

### Functions in Language Phase Commands
### Functions in Commands

This section lists recognized functions for language phase commands, as mentioned in the section above.
This section lists recognized functions for language phase and validation commands, as mentioned in the section above.

* `glob` - runs Python's `glob.glob`
* `volumes_from` - useful when running a dockerized tools and you need them to run from apigentools container

When used as this:
```json
{
"function": "volumes_from",
"kwargs": {
"alt_volumes": ["{{cwd}}:{{cwd}}"]
}
}
```
It will expand to `--volumes-from <current-container-id>` when running in container and to `-v {{cwd}}:{{cwd}}` when not running in container (furthermore, both `{{cwd}}` occurrences will be templated and noted below).

### Template Values in Commands

The `commandline` arguments of commands can also use following templating values (Mustache templating is used):

* `{{cwd}}` - Current working directory

These templating values are available to `validation_commands`:

* `{{spec}}` - Path to the spec currently being validated (path is relative to Spec Repo directory). If there are multiple full spec files, this will always point to the one currently being validated.

## Section Files

Expand Down
4 changes: 2 additions & 2 deletions tests/unit/test_config.py
Expand Up @@ -6,7 +6,7 @@

import pytest

from apigentools.config import Config, LanguageCommand, LanguageConfig
from apigentools.config import Config, ConfigCommand, LanguageConfig

FIXTURE_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "fixtures")

Expand Down Expand Up @@ -40,7 +40,7 @@ def test_config():
assert java.upstream_templates_dir == "Java"

pre = java.get_stage_commands("pre")[0]
assert type(pre) == LanguageCommand
assert type(pre) == ConfigCommand
assert pre.description == "Some command"
assert pre.commandline == ["some", "cmd"]

Expand Down