diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 143c148..d48cf88 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -72,33 +72,13 @@ jobs: register-python-argcomplete nfctl - name: Run the NF CLI demo to test installed version + id: test_demo shell: bash env: NETFOUNDRY_CLIENT_ID: ${{ secrets.NETFOUNDRY_CLIENT_ID }} NETFOUNDRY_PASSWORD: ${{ secrets.NETFOUNDRY_PASSWORD }} NETFOUNDRY_OAUTH_URL: ${{ secrets.NETFOUNDRY_OAUTH_URL }} - run: | - set -o xtrace - set -o pipefail - - nfctl config \ - general.network=$(nfctl demo --echo-name --prefix 'gh-${{ github.run_id }}') \ - general.yes=True \ - general.verbose=yes || true # FIXME: sometimes config command exits with an error - nfctl demo \ - --size medium \ - --regions us-ashburn-1 us-phoenix-1 \ - --provider OCI - nfctl \ - list services - nfctl \ - get service name=echo% > /tmp/echo.yml - nfctl \ - delete service name=echo% - nfctl \ - create service --file /tmp/echo.yml - nfctl \ - delete network + run: ./scripts/test-demo.sh - name: Publish Test Package uses: pypa/gh-action-pypi-publish@v1.13.0 @@ -162,3 +142,43 @@ jobs: platforms: linux/amd64,linux/arm64 push: true tags: ${{ steps.compose_tags.outputs.container_tags }} + + cleanup-delay: + if: failure() + needs: [build_pypi_and_docker] + runs-on: ubuntu-latest + steps: + - name: Wait 30 minutes before cleanup + run: | + echo "Test demo failed to complete. Waiting 30 minutes before cleanup to allow investigation..." + sleep 1800 + + cleanup-network: + if: always() && needs.build_pypi_and_docker.result == 'failure' + needs: [cleanup-delay] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v5 + + - name: Set up Python + uses: actions/setup-python@v6 + with: + python-version: '3.12' + + - name: Install nfctl + run: | + python -m pip install --upgrade pip + pip install . + + - name: Delete test network + env: + NETFOUNDRY_CLIENT_ID: ${{ secrets.NETFOUNDRY_CLIENT_ID }} + NETFOUNDRY_PASSWORD: ${{ secrets.NETFOUNDRY_PASSWORD }} + NETFOUNDRY_OAUTH_URL: ${{ secrets.NETFOUNDRY_OAUTH_URL }} + run: | + # Use wildcard pattern to match network created by this run + NETWORK_PATTERN="gh-${GITHUB_RUN_ID}-%" + echo "Attempting to delete network matching: ${NETWORK_PATTERN}" + + # Try to delete the network, ignore errors if it doesn't exist + nfctl delete network "name=${NETWORK_PATTERN}" --yes || echo "Network may not exist or already deleted" diff --git a/netfoundry/ctl.py b/netfoundry/ctl.py index 38b19be..f596333 100644 --- a/netfoundry/ctl.py +++ b/netfoundry/ctl.py @@ -13,6 +13,7 @@ import signal import jwt import tempfile +from concurrent.futures import ThreadPoolExecutor, as_completed from json import dumps as json_dumps from json import load as json_load from json import loads as json_loads @@ -22,6 +23,7 @@ from subprocess import CalledProcessError from sys import exit as sysexit from sys import stderr, stdin, stdout +from threading import Lock from xml.sax.xmlreader import InputSource from jwt.exceptions import PyJWTError @@ -44,7 +46,7 @@ # import milc cli from milc import cli, questions # noqa: E402 -# set milc options using new API +# set milc options (requires milc >= 1.8.0) cli.milc_options(name='nfctl', author='NetFoundry', version=f'v{netfoundry_version}') # this creates the config subcommand from milc.subcommand import config # noqa: F401,E402 @@ -94,7 +96,7 @@ def __call__(self, parser, namespace, values, option_string=None): @cli.argument('-B', '--borders', default=True, action='store_boolean', help='print cell borders in text tables') @cli.argument('-H', '--headers', default=True, action='store_boolean', help='print column headers in text tables') @cli.argument('-Y', '--yes', action='store_true', arg_only=True, help='answer yes to potentially-destructive operations') -@cli.argument('-W', '--wait', help='seconds to wait for long-running processes to finish', default=900) +@cli.argument('-W', '--wait', type=int, help='seconds to wait for long-running processes to finish', default=900) @cli.argument('--proxy', help=argparse.SUPPRESS) @cli.argument('--gateway', default="gateway", help=argparse.SUPPRESS) @cli.entrypoint('configure the CLI to manage a network') @@ -961,32 +963,55 @@ def demo(cli): else: spinner.succeed(f"Found a hosted router in {region}") - spinner.text = f"Creating {len(fabric_placements)} hosted router(s)" - with spinner: - for region in fabric_placements: - er_name = f"Hosted Router {region} [{cli.config.demo.provider}]" - if not network.edge_router_exists(er_name): - er = network.create_edge_router( - name=er_name, - attributes=[ - "#hosted_routers", - "#demo_exits", - f"#{cli.config.demo.provider}", - ], - provider=cli.config.demo.provider, - location_code=region, - tunneler_enabled=False, # workaround for MOP-18098 (missing tunneler binding in ziti-router config) - ) - hosted_edge_routers.extend([er]) - spinner.succeed(f"Created {cli.config.demo.provider} router in {region}") + # Helper function to create or validate a single router (runs in parallel) + def create_or_validate_router(region): + """Create or validate router for a region. Returns (region, router_dict, message).""" + er_name = f"Hosted Router {region} [{cli.config.demo.provider}]" + if not network.edge_router_exists(er_name): + er = network.create_edge_router( + name=er_name, + attributes=[ + "#hosted_routers", + "#demo_exits", + f"#{cli.config.demo.provider}", + ], + provider=cli.config.demo.provider, + location_code=region, + tunneler_enabled=False, # workaround for MOP-18098 (missing tunneler binding in ziti-router config) + ) + message = f"Created {cli.config.demo.provider} router in {region}" + return (region, er, message) + else: + er_matches = network.edge_routers(name=er_name, only_hosted=True) + if len(er_matches) == 1: + er = er_matches[0] else: - er_matches = network.edge_routers(name=er_name, only_hosted=True) - if len(er_matches) == 1: - er = er_matches[0] - else: - raise RuntimeError(f"unexpectedly found more than one matching router for name '{er_name}'") - if er['status'] in RESOURCES["edge-routers"].status_symbols["error"] + RESOURCES["edge-routers"].status_symbols["deleting"] + RESOURCES["edge-routers"].status_symbols["deleted"]: - raise RuntimeError(f"hosted router '{er_name}' has unexpected status '{er['status']}'") + raise RuntimeError(f"unexpectedly found more than one matching router for name '{er_name}'") + if er['status'] in RESOURCES["edge-routers"].status_symbols["error"] + RESOURCES["edge-routers"].status_symbols["deleting"] + RESOURCES["edge-routers"].status_symbols["deleted"]: + raise RuntimeError(f"hosted router '{er_name}' has unexpected status '{er['status']}'") + return (region, er, None) # No message for existing routers + + # Parallelize router creation with thread-safe spinner updates + spinner.text = f"Creating {len(fabric_placements)} hosted router(s)" + spinner_lock = Lock() + new_routers = [] + + with ThreadPoolExecutor(max_workers=min(len(fabric_placements), 5)) as executor: + # Submit all router creation tasks + future_to_region = {executor.submit(create_or_validate_router, region): region for region in fabric_placements} + + # Collect results as they complete + for future in as_completed(future_to_region): + region, er, message = future.result() + new_routers.append(er) + + # Thread-safe spinner update for newly created routers + if message: + with spinner_lock: + spinner.succeed(message) + + # Add all new routers to the list + hosted_edge_routers.extend(new_routers) if not len(hosted_edge_routers) > 0: raise RuntimeError("unexpected problem with router placements, found zero hosted routers") @@ -994,7 +1019,7 @@ def demo(cli): spinner.text = f"Waiting for {len(hosted_edge_routers)} hosted router(s) to provision" with spinner: for router in hosted_edge_routers: - network.wait_for_statuses(expected_statuses=RESOURCES["edge-routers"].status_symbols["complete"], id=router['id'], type="edge-router", wait=2222, progress=False) + network.wait_for_statuses(expected_statuses=RESOURCES["edge-routers"].status_symbols["complete"], id=router['id'], type="edge-router", wait=cli.config.general.wait, progress=False) # ensure the router tunneler is available # network.wait_for_entity_name_exists(entity_name=router['name'], entity_type='endpoint') # router_tunneler = network.find_resources(type='endpoint', name=router['name'])[0] @@ -1091,31 +1116,6 @@ def demo(cli): services[svc]['properties'] = network.services(name=svc)[0] spinner.succeed(sub("Finding", "Found", spinner.text)) - # create a customer-hosted ER unless exists - customer_router_name = "Branch Exit Router" - spinner.text = f"Finding customer router '{customer_router_name}'" - with spinner: - if not network.edge_router_exists(name=customer_router_name): - spinner.text = sub("Finding", "Creating", spinner.text) - customer_router = network.create_edge_router( - name=customer_router_name, - attributes=["#branch_exit_routers"], - tunneler_enabled=True) - else: - customer_router = network.edge_routers(name=customer_router_name)[0] - spinner.succeed(sub("Finding", "Found", spinner.text)) - - spinner.text = f"Waiting for customer router {customer_router_name} to be ready for registration" - # wait for customer router to be PROVISIONED so that registration will be available - with spinner: - try: - network.wait_for_statuses(expected_statuses=RESOURCES["edge-routers"].status_symbols["complete"], id=customer_router['id'], type="edge-router", wait=222, progress=False) - customer_router_registration = network.rotate_edge_router_registration(id=customer_router['id']) - except Exception as e: - raise RuntimeError(f"error getting router registration, got {e}") - else: - spinner.succeed(f"Customer router ready to register with key '{customer_router_registration['registrationKey']}'") - # create unless exists app_wan_name = "Default Service Policy" spinner.text = "Finding service policy" diff --git a/netfoundry/network.py b/netfoundry/network.py index 0018e4e..5651210 100644 --- a/netfoundry/network.py +++ b/netfoundry/network.py @@ -458,6 +458,14 @@ def patch_resource(self, patch: dict, type: str = None, id: str = None, wait: in headers=headers, json=pruned_patch ) + if after_response.status_code in range(400, 600): + self.logger.debug( + '%s\n%s %s\r\n%s\r\n\r\n%s', + '-----------RESPONSE-----------', + after_response.status_code, after_response.reason, + '\r\n'.join('{}: {}'.format(k, v) for k, v in after_response.headers.items()), + after_response.text + ) after_response.raise_for_status() # raise any gross errors immediately after_response_code = after_response.status_code if after_response_code in [STATUS_CODES.codes.OK, STATUS_CODES.codes.ACCEPTED]: diff --git a/netfoundry/organization.py b/netfoundry/organization.py index 492342f..91cbe3a 100644 --- a/netfoundry/organization.py +++ b/netfoundry/organization.py @@ -159,6 +159,13 @@ def __init__(self, self.expiry_seconds = round(self.expiry - epoch) self.audience = token_cache['audience'] + # Check if cached token is expired + if self.expiry_seconds < 0: + self.logger.debug(f"cached token is expired ({self.expiry_seconds}s ago), forcing renewal") + self.token = None + self.expiry = None + self.audience = None + # if the token was found but not the expiry then try to parse to extract the expiry so we can enforce minimum lifespan seconds if self.token and not self.expiry: try: @@ -280,14 +287,16 @@ def __init__(self, self.expiry_seconds = round(self.expiry - epoch) self.logger.debug(f"bearer token expiry in {self.expiry_seconds}s") - # renew token if not existing or imminent expiry, else continue - if not self.token or self.expiry_seconds < expiry_minimum: + # renew token if not existing, expired, or imminent expiry, else continue + if not self.token or self.expiry_seconds < 0 or self.expiry_seconds < expiry_minimum: # we've already done the work to determine the cached token is expired or imminently-expiring, might as well save other runs the same trouble self.logout() self.expiry = None self.audience = None if self.token and self.expiry_seconds < expiry_minimum: self.logger.debug(f"token expiry {self.expiry_seconds}s is less than configured minimum {expiry_minimum}s") + if self.expiry_seconds < 0: + self.logger.debug(f"token is expired ({abs(self.expiry_seconds)}s ago), forcing renewal") if not credentials_configured: raise NFAPINoCredentials("unable to renew because credentials are not configured") else: @@ -430,7 +439,7 @@ def get_caller_identity(self): except Exception as e: self.logger.debug(f"failed to get caller identity from url: '{url}', trying next until last, caught {e}") else: - return(caller) + return caller raise RuntimeError("failed to get caller identity from any url") def get_identity(self, identity_id: str): @@ -444,7 +453,7 @@ def get_identity(self, identity_id: str): except Exception as e: raise RuntimeError(f"failed to get identity from url: '{url}', caught {e}") else: - return(identity) + return identity def find_identities(self, type: str = 'identities', **kwargs): """Get identities as a collection. @@ -473,7 +482,7 @@ def find_identities(self, type: str = 'identities', **kwargs): except Exception as e: raise RuntimeError(f"failed to get identities from url: '{url}', caught {e}") else: - return(identities) + return identities get_identities = find_identities def find_roles(self, **kwargs): @@ -503,7 +512,7 @@ def find_roles(self, **kwargs): except Exception as e: raise RuntimeError(f"failed to get roles from url: '{url}', caught {e}") else: - return(roles) + return roles def get_role(self, role_id: str): """Get roles as a collection.""" @@ -514,7 +523,7 @@ def get_role(self, role_id: str): except Exception as e: raise RuntimeError(f"failed to get role from url: '{url}', caught {e}") else: - return(role) + return role def find_organizations(self, **kwargs): """Find organizations as a collection. @@ -536,7 +545,7 @@ def find_organizations(self, **kwargs): except Exception as e: raise RuntimeError(f"failed to get organizations from url: '{url}', caught {e}") else: - return(organizations) + return organizations get_organizations = find_organizations def get_organization(self, id): @@ -551,7 +560,7 @@ def get_organization(self, id): except Exception as e: raise RuntimeError(f"failed to get organization from url: '{url}', caught {e}") else: - return(organization) + return organization def get_network_group(self, network_group_id): """ @@ -565,7 +574,7 @@ def get_network_group(self, network_group_id): except Exception as e: raise RuntimeError(f"failed to get network_group from url: '{url}', caught {e}") else: - return(network_group) + return network_group def get_network(self, network_id: str, embed: object = None, accept: str = None): """Describe a Network by ID. @@ -593,7 +602,7 @@ def get_network(self, network_id: str, embed: object = None, accept: str = None) url = self.audience+'core/v2/networks/'+network_id network, status_symbol = get_generic_resource_by_url(setup=self, url=url, accept=accept, **params) - return(network) + return network def find_network_groups_by_organization(self, **kwargs): """Find network groups as a collection. @@ -604,7 +613,7 @@ def find_network_groups_by_organization(self, **kwargs): network_groups = list() for i in find_generic_resources(setup=self, url=url, embedded=RESOURCES['network-groups']._embedded, **kwargs): network_groups.extend(i) - return(network_groups) + return network_groups get_network_groups_by_organization = find_network_groups_by_organization network_groups = get_network_groups_by_organization @@ -631,7 +640,7 @@ def find_networks_by_organization(self, name: str = None, deleted: bool = False, except Exception as e: raise RuntimeError(f"failed to get networks from url: '{url}', caught {e}") else: - return(networks) + return networks get_networks_by_organization = find_networks_by_organization def network_exists(self, name: str, deleted: bool = False): @@ -641,9 +650,9 @@ def network_exists(self, name: str, deleted: bool = False): :param deleted: include deleted networks in results """ if self.count_networks_with_name(name=name, deleted=deleted) > 0: - return(True) + return True else: - return(False) + return False def count_networks_with_name(self, name: str, deleted: bool = False, unique: bool = True): """ @@ -686,5 +695,5 @@ def find_networks_by_group(self, network_group_id: str, deleted: bool = False, a except Exception as e: raise RuntimeError(f"failed to get networks from url: '{url}', caught {e}") else: - return(networks) + return networks get_networks_by_group = find_networks_by_group diff --git a/netfoundry/utility.py b/netfoundry/utility.py index dbcfa87..a059a54 100644 --- a/netfoundry/utility.py +++ b/netfoundry/utility.py @@ -43,15 +43,15 @@ def plural(singular): # if already plural then return, else pluralize p = inflect.engine() if singular[-1:] == 's': - return(singular) + return singular else: - return(p.plural_noun(singular)) + return p.plural_noun(singular) def singular(plural): """Singularize a plural form.""" p = inflect.engine() - return(p.singular_noun(plural)) + return p.singular_noun(plural) def kebab2camel(kebab: str, case: str = "lower"): # "lower" dromedary or "upper" Pascal @@ -322,12 +322,17 @@ def create_generic_resource(setup: object, url: str, body: dict, headers: dict = if response.status_code in range(400, 600): req = response.request setup.logger.debug( - '%s\n%s\r\n%s\r\n\r\n%s', - '-----------START-----------', + '%s\n%s\r\n%s\r\n\r\n%s\n%s\n%s %s\r\n%s\r\n\r\n%s', + '-----------REQUEST-----------', req.method + ' ' + req.url, '\r\n'.join('{}: {}'.format(k, v) for k, v in req.headers.items()), - req.body + req.body, + '-----------RESPONSE-----------', + response.status_code, response.reason, + '\r\n'.join('{}: {}'.format(k, v) for k, v in response.headers.items()), + response.text ) + setup.logger.error(f"HTTP {response.status_code} error response body: {response.text}") response.raise_for_status() resource = response.json() @@ -388,6 +393,14 @@ def get_generic_resource_by_url(setup: object, url: str, headers: dict = dict(), try: response.raise_for_status() except HTTPError: + if response.status_code in range(400, 600): + setup.logger.debug( + '%s\n%s %s\r\n%s\r\n\r\n%s', + '-----------RESPONSE-----------', + response.status_code, response.reason, + '\r\n'.join('{}: {}'.format(k, v) for k, v in response.headers.items()), + response.text + ) if resource_type.name in ["process-executions"] and status_symbol == "FORBIDDEN": # FIXME: MOP-18095 workaround the create network process ID mismatch bug url_parts = urlparse(url) path_parts = url_parts.path.split('/') @@ -483,14 +496,22 @@ def find_generic_resources(setup: object, url: str, headers: dict = dict(), embe proxies=setup.proxies, verify=setup.verify, ) + if response.status_code in range(400, 600): + setup.logger.debug( + '%s\n%s %s\r\n%s\r\n\r\n%s', + '-----------RESPONSE-----------', + response.status_code, response.reason, + '\r\n'.join('{}: {}'.format(k, v) for k, v in response.headers.items()), + response.text + ) response.raise_for_status() resource_page = response.json() - + # Handle non-paginated endpoints that return direct lists if isinstance(resource_page, list): yield resource_page return - + if isinstance(resource_page, dict) and resource_page.get('page'): try: total_pages = resource_page['page']['totalPages'] @@ -623,9 +644,7 @@ class ResourceType(ResourceTypeParent): embeddable: bool # legal to request embedding in a parent resource in same domain parent: str = field(default=str()) # optional parent ResourceType instance name status: str = field(default='status') # name of property where symbolic status is expressed - _embedded: str = field(default='default') # the key under which lists are found in the API - # e.g. networkControllerList (computed if not provided as dromedary - # case singular) + _embedded: str = field(default='default') # the key under which lists are found in the API e.g. networkControllerList (computed if not provided as dromedary case singular) create_responses: list = field(default_factory=list) # expected HTTP response codes for create operation no_update_props: list = field(default_factory=list) # expected HTTP response codes for create operation create_template: dict = field(default_factory=lambda: { @@ -932,7 +951,14 @@ def send(self, request, **kwargs): http = Session() # no cache HTTP_CACHE_EXPIRE = 33 -http_cache = CachedSession(cache_name=f"{get_user_cache_dir()}/http_cache", backend='sqlite', expire_after=HTTP_CACHE_EXPIRE) +http_cache = CachedSession( + cache_name=f"{get_user_cache_dir()}/http_cache_tz", + backend='sqlite', + expire_after=HTTP_CACHE_EXPIRE, + allowable_codes=(200, 203, 300, 301, 308), + timeout=DEFAULT_TIMEOUT, + stale_if_error=True +) # Mount it for both http and https usage adapter = TimeoutHTTPAdapter(timeout=DEFAULT_TIMEOUT, max_retries=RETRY_STRATEGY) http.mount("https://", adapter) diff --git a/scripts/README.md b/scripts/README.md new file mode 100644 index 0000000..2902190 --- /dev/null +++ b/scripts/README.md @@ -0,0 +1,85 @@ +# Test Scripts + +## test-demo.sh + +Test script for the `nfctl demo` command. Can be run locally or in GitHub Actions. + +### Usage + +**In GitHub Actions:** + +```yaml +- name: Run demo test + env: + NETFOUNDRY_CLIENT_ID: ${{ secrets.NETFOUNDRY_CLIENT_ID }} + NETFOUNDRY_PASSWORD: ${{ secrets.NETFOUNDRY_PASSWORD }} + NETFOUNDRY_OAUTH_URL: ${{ secrets.NETFOUNDRY_OAUTH_URL }} + run: ./scripts/test-demo.sh +``` + +The script automatically detects GitHub Actions via `GITHUB_RUN_ID` and uses it in the network name prefix. + +**Locally:** + +```bash +# Use default prefix (local-) +./scripts/test-demo.sh + +# Use custom prefix +DEMO_PREFIX=mytest ./scripts/test-demo.sh + +# Specify organization and network group +NETFOUNDRY_ORGANIZATION=acme \ +NETFOUNDRY_NETWORK_GROUP=testing \ +DEMO_PREFIX=mytest \ +./scripts/test-demo.sh +``` + +### What it does + +1. Creates a temporary directory and config file (cleaned up on exit) +2. Generates a unique network name using `--echo-name` +3. Configures nfctl with all settings in the temp config: + - Network name (generated) + - Organization (from `NETFOUNDRY_ORGANIZATION` if set) + - Network group (from `NETFOUNDRY_NETWORK_GROUP` if set) + - Auto-confirm and verbose flags +4. Runs the demo with medium size, AWS provider, us-west-2 and us-east-1 regions +5. Tests service operations (list, get, delete, create) +6. Cleans up by deleting the network and removing temp directory + +### Environment Variables + +**Script Configuration:** + +- `GITHUB_RUN_ID` - Auto-detected in GitHub Actions, used for network prefix +- `DEMO_PREFIX` - Custom prefix for local runs (default: `local-`) +- `NETFOUNDRY_PROFILE` - Profile name for token cache isolation (default: `default`) + +**Standard NetFoundry Environment Variables:** + +- `NETFOUNDRY_ORGANIZATION` - Optional organization name (omitted if unset) +- `NETFOUNDRY_NETWORK_GROUP` - Optional network group name (omitted if unset) +- `NETFOUNDRY_CLIENT_ID` - NetFoundry API credentials +- `NETFOUNDRY_PASSWORD` - NetFoundry API credentials +- `NETFOUNDRY_OAUTH_URL` - NetFoundry OAuth URL +- `NETFOUNDRY_API_ACCOUNT` - Path to API credentials JSON file + +These standard variables match those used by `nfctl login --eval` for consistency. + +**Profile Usage:** + +The `NETFOUNDRY_PROFILE` variable allows you to isolate token caches for different accounts. Each profile uses a separate cache file (`~/.cache/netfoundry/.json`), preventing conflicts when working with multiple NetFoundry accounts. + +```bash +# Use a specific profile +NETFOUNDRY_PROFILE=advdev \ +NETFOUNDRY_API_ACCOUNT=~/.config/netfoundry/advdev.json \ +./scripts/test-demo.sh +``` + +### Features + +- **Isolated config**: Each run uses a temporary config file that doesn't interfere with your existing nfctl configuration +- **Auto-cleanup**: Temporary directory is automatically removed on exit (success or failure) +- **Config-based scoping**: Organization and network group are set in the config file (from environment variables) rather than passed as CLI options on every command diff --git a/scripts/test-demo.sh b/scripts/test-demo.sh new file mode 100755 index 0000000..a093dd6 --- /dev/null +++ b/scripts/test-demo.sh @@ -0,0 +1,67 @@ +#!/bin/bash +# Test script for nfctl demo command +# Can be run locally or in GitHub Actions + +set -o errexit +set -o xtrace +set -o pipefail + +# Create temporary directory and config file +TEMP_DIR=$(mktemp -d) +TEMP_CONFIG="${TEMP_DIR}/nfctl.ini" +echo "Using temporary config: ${TEMP_CONFIG}" + +# Cleanup on exit +trap 'rm -rf "${TEMP_DIR}"' EXIT + +# Determine prefix based on environment +if [[ -n "${GITHUB_RUN_ID}" ]]; then + # Running in GitHub Actions + PREFIX="gh-${GITHUB_RUN_ID}" +else + # Running locally - use timestamp or custom prefix + PREFIX="${DEMO_PREFIX:-local-$(date +%s)}" +fi + +echo "Using demo prefix: ${PREFIX} (override with DEMO_PREFIX)" + +# Set profile (default: "default") +: "${NETFOUNDRY_PROFILE:=default}" +echo "Using profile: ${NETFOUNDRY_PROFILE} (override with NETFOUNDRY_PROFILE)" + +# Helper function to run nfctl with the temp config and profile +nfctl() { + command nfctl --profile "${NETFOUNDRY_PROFILE}" --config-file "${TEMP_CONFIG}" "$@" +} + +# Configure nfctl with generated network name and basic settings +nfctl config \ + "general.network=$(command nfctl demo --echo-name --prefix "${PREFIX}")" \ + general.yes=True \ + general.verbose=yes || true # FIXME: sometimes config command exits with an error + +# Set optional organization and network group from standard NetFoundry env vars +if [[ -n "${NETFOUNDRY_ORGANIZATION}" ]]; then + nfctl config "general.organization=${NETFOUNDRY_ORGANIZATION}" +fi +if [[ -n "${NETFOUNDRY_NETWORK_GROUP}" ]]; then + nfctl config "general.network_group=${NETFOUNDRY_NETWORK_GROUP}" +fi + +# Run the demo +nfctl --wait 3000 demo \ + --size medium \ + --regions us-west-2 us-east-1 \ + --provider AWS + +# Test service operations +nfctl list services + +nfctl get service name=echo% > /tmp/echo.yml + +nfctl delete service name=echo% + +nfctl create service --file /tmp/echo.yml + +# Cleanup: delete the network +nfctl delete network diff --git a/setup.cfg b/setup.cfg index b01084a..c5105cd 100644 --- a/setup.cfg +++ b/setup.cfg @@ -24,7 +24,7 @@ include_package_data = True packages = find: install_requires = inflect >= 5.3 - milc >= 1.6.6 + milc >= 1.8.0 packaging >= 20.9 platformdirs >= 2.4 pygments >= 2.11