diff --git a/KEEPER_DRIVE_COMMANDS.md b/KEEPER_DRIVE_COMMANDS.md new file mode 100644 index 000000000..06bba3688 --- /dev/null +++ b/KEEPER_DRIVE_COMMANDS.md @@ -0,0 +1,745 @@ +# KeeperDrive Commands + +## Commands + +KeeperDrive commands manage folders, records, sharing, and permissions using the Keeper v3 API. All commands are prefixed with `kd-` and require authentication and a synced vault. + +To get help on a particular command, run: + +`help ` + + +| Command | Description | +| ------------------------ | ------------------------------------------------------------------- | +| `[kd-mkdir]` | Create a new KeeperDrive folder | +| `[kd-rndir]` | Rename a folder, change its color, or update permission inheritance | +| `[kd-list]` | List KeeperDrive folders and records | +| `[kd-rmdir]` | Remove one or more KeeperDrive folders | +| `[kd-share-folder]` | Grant or remove a user's access to a folder | +| `[kd-record-add]` | Create a new KeeperDrive record | +| `[kd-record-update]` | Update an existing KeeperDrive record | +| `[kd-rm]` | Remove (trash or unlink) one or more KeeperDrive records | +| `[kd-ln]` | Link a record into a KeeperDrive folder | +| `[kd-shortcut]` | Manage records that appear in more than one folder | +| `[kd-share-record]` | Grant, update, or revoke a user's access to a record | +| `[kd-record-permission]` | Bulk-update sharing permissions across records in a folder | +| `[kd-transfer-record]` | Transfer record ownership to another user | +| `[kd-record-details]` | Get metadata for one or more records | +| `[kd-get]` | Show full details for a record or folder | + + +**Permission roles** (used across folder and record sharing commands): + + +| Role | Description | +| ----------------------- | --------------------------------------------- | +| `viewer` | Read-only access | +| `shared-manager` | Can manage access grants | +| `content-manager` | Can add/edit records | +| `content-share-manager` | Can add/remove/edit records and manage access | +| `full-manager` | Full control | + + +--- + +### kd-mkdir command: + +**Command:** `kd-mkdir` + +**Detail:** Create a single KeeperDrive folder. `/` is a reserved character; use `//` to embed a literal slash in a folder name. + +**Parameters:** + +Folder name (nested paths are not supported — create one folder at a time) + +**Switches:** + +`--color ` Folder color: `none` `red` `orange` `yellow` `green` `blue` `gray` + +`--no-inherit` Do not inherit parent folder permissions + +**Examples:** + +``` +kd-mkdir "My Projects" +kd-mkdir "Sensitive" --color red --no-inherit +kd-mkdir "Reports//2026" +``` + +1. Create a single folder at the vault root (or under the current folder when navigated into one) +2. Create a folder with red color and no permission inheritance from parent +3. Create a folder whose name literally contains a slash (`Reports/2026`); `//` escapes a literal `/` + +--- + +### kd-rndir command: + +**Command:** `kd-rndir` + +**Detail:** Rename a folder, change its color, or update its permission-inheritance setting. At least one of `--name`, `--color`, `--inherit`, or `--no-inherit` is required. + +**Parameters:** + +Folder UID, name, or path + +**Switches:** + +`-n NAME`, `--name NAME` New folder name + +`--color ` New color: `none` `red` `orange` `yellow` `green` `blue` `gray` + +`--inherit` Enable permission inheritance from parent folder + +`--no-inherit` Disable permission inheritance from parent folder + +`-q`, `--quiet` Suppress confirmation message + +**Examples:** + +``` +kd-rndir "Old Name" --name "New Name" +kd-rndir abc123 --color blue +kd-rndir abc123 --name "Archive Q4" --color gray --inherit +kd-rndir abc123 --name "Finance" -q +``` + +1. Rename a folder by its current name +2. Change a folder's color using its UID +3. Rename, recolor, and enable permission inheritance in one command +4. Rename a folder silently with no confirmation output + +--- + +### kd-list command: + +**Command:** `kd-list` + +**Detail:** List KeeperDrive folders and records from the local cache. If neither `--folders` nor `--records` is specified, both are shown. Each row reports whether the item is shared (and to how many non-owner accessors). + +**Switches:** + +`--folders` Show only folders + +`--records` Show only records + +`--format <{table, csv, json}>` Choose the format of the output (default: `table`) + +`--output ` Write output to file (ignored for `table` format) + +**Examples:** + +``` +kd-list +kd-list --records --format json --output records.json +kd-list --format csv --output export.csv +``` + +1. List all KeeperDrive folders and records +2. List only folders with detailed parent/location information +3. List only records and export to a JSON file +4. Export full listing to a CSV file + +--- + +### kd-rmdir command: + +**Command:** `kd-rmdir` + +**Detail:** Remove one or more KeeperDrive folders. Always shows a preview of the impact before asking for confirmation. + +**Parameters:** + +Folder UID(s) or name(s) to remove (max 100 per invocation) + +**Switches:** + +`-o OP`, `--operation OP` Removal mode: `folder-trash` (default, recoverable) or `delete-permanent` (irreversible) + +`-f`, `--force` Skip the confirmation prompt and execute immediately after preview + +`--dry-run` Preview only — do not delete anything (mutually exclusive with `--force`) + +`-q`, `--quiet` Suppress per-folder impact detail in the preview output + +**Examples:** + +``` +kd-rmdir "Old Projects" +kd-rmdir abc123 --dry-run +kd-rmdir abc123 def456 --operation delete-permanent --force +kd-rmdir "Archive" --quiet +``` + +1. k +2. Preview the deletion impkdact without committing any changes +3. Permanently delete two folders with no confirmation prompt +4. Delete a folder with minimal preview output + +> **Warning:** `--operation delete-permanent` is irreversible. All sub-folders and records inside will be permanently destroyed. + +--- + +### kd-share-folder command: + +**Command:** `kd-share-folder` + +**Detail:** Grant or remove a user's access to one or more KeeperDrive folders. The action is controlled by `--action` (default: `grant`). + +**Parameters:** + +Folder UID(s), name(s), or path(s) + +**Switches:** + +`-e EMAIL`, `--email EMAIL` Recipient email address. Repeatable: `-e user1@example.com -e user2@example.com`. Use `@existing` (or its alias `@current`) to target all current users in the folder (excluding yourself). + +`-a ACTION`, `--action ACTION` `grant` (default — also updates existing shares) or `remove` + +`-r ROLE`, `--role ROLE` Permission role (default: `viewer`). See permission roles table above. + +`--expire-at TIMESTAMP` Expiration as ISO datetime `yyyy-MM-ddTHH:MM:SSZ` or `never` + +`--expire-in PERIOD` Expiration as period: `30d` `6mo` `1y` `24h` `30mi` or `never` (mutually exclusive with `--expire-at`) + +**Examples:** + +``` +kd-share-folder "My Projects" -e colleague@example.com +kd-share-folder abc123 -e manager@example.com -r full-manager +kd-share-folder abc123 -e temp@example.com -r viewer --expire-in 7d +kd-share-folder abc123 -e user@example.com -a remove +kd-share-folder abc123 def456 -e user1@example.com -e user2@example.com -r viewer +kd-share-folder "Team Folder" -e @existing -a remove +``` + +1. Grant default viewer access to a folder +2. Grant full-manager access using a folder UID +3. Grant viewer access that expires in 7 days +4. Remove a user's access to a folder +5. Grant viewer access to multiple folders and multiple users in one command +6. Remove all existing users from a folder at once + +--- + +### kd-record-add command: + +**Command:** `kd-record-add` + +**Detail:** Create a new KeeperDrive record. Fields are specified using `type=value` or `type.label=value` dot notation. See `--syntax-help` for full field notation details. + +**Switches:** + +`-t TITLE`, `--title TITLE` Record title (required) + +`-rt TYPE`, `--record-type TYPE` Record type, e.g. `login`, `general` (required) + +`-n NOTES`, `--notes NOTES` Record notes + +`--folder FOLDER` Folder UID or name to store the record (vault root if omitted) + +`-f`, `--force` Ignore field-validation warnings + +`--syntax-help` Display full field notation syntax help + +**Examples:** + +``` +kd-record-add -t "Gmail" -rt login login=user@gmail.com password=Secret123 +kd-record-add -t "Dev Server" -rt login --folder "Infrastructure" login=root password=Pass123 url=ssh://dev.example.com +kd-record-add -t "API Key" -rt general --folder "Secrets" "License ID"=9ACB123 +kd-record-add --syntax-help +``` + +1. Create a login record at the vault root +2. Create a login record in a specific folder with multiple fields +3. Create a general record with a custom-labeled field +4. Display full field syntax help + +--- + +### kd-record-update command: + +**Command:** `kd-record-update` + +**Detail:** Update an existing KeeperDrive record's title, type, notes, or field values. One or more record UIDs or titles must be specified with `-r`. + +**Switches:** + +`-r UID`, `--record UID` Record UID or title (required). Repeatable: `-r uid1 -r uid2` + +`-t TITLE`, `--title TITLE` New record title + +`-rt TYPE`, `--record-type TYPE` New record type + +`-n NOTES`, `--notes NOTES` Append or replace record notes + +`-f`, `--force` Ignore field-validation warnings + +`--syntax-help` Display full field notation syntax help + +**Examples:** + +``` +kd-record-update -r rec123 -t "Updated Title" +kd-record-update -r rec123 password=NewPass123 +kd-record-update -r rec123 -t "Production DB" -n "Rotated 2025-03-01" password=NewPass +kd-record-update -r rec123 -r rec456 -t "Shared Title" +``` + +1. Update only the title of a record +2. Update a single field value +3. Update title, notes, and a field value together +4. Apply the same title update to two records at once + +--- + +### kd-rm command: + +**Command:** `kd-rm` + +**Detail:** Remove (trash or unlink) one or more KeeperDrive records. Always shows a preview before executing. + +**Parameters:** + +Record UID(s) or title(s) to remove (max 500 per invocation) + +**Switches:** + +`-f FOLDER`, `--folder FOLDER` Folder UID or name providing context for the operation (required when `--operation unlink`) + +`-o OP`, `--operation OP` Removal mode (default: `owner-trash`) + +`--force` Skip the confirmation prompt and execute after preview + +`--dry-run` Preview only — do not delete anything (mutually exclusive with `--force`) + +**Operation types:** + + +| Operation | Effect | +| -------------- | --------------------------------------------------------------- | +| `owner-trash` | Move record to owner's trash (recoverable) — default | +| `folder-trash` | Remove record from a folder and trash it | +| `unlink` | Remove record from a specific folder only — requires `--folder` | + + +**Examples:** + +``` +kd-rm rec123abc +kd-rm rec123abc --folder "Projects" --operation unlink +kd-rm rec123abc rec456def --dry-run +kd-rm rec123abc rec456def --force +kd-rm rec123abc --operation folder-trash --folder "Archive" +``` + +1. Trash a record (shows preview, then prompts for confirmation) +2. Unlink a record from a specific folder without deleting it +3. Preview the impact for two records without making any changes +4. Trash two records immediately with no prompt +5. Remove a record from a specific folder and send it to trash + +--- + +### kd-ln command: + +**Command:** `kd-ln` + +**Detail:** Link a record into a KeeperDrive folder using short positional syntax. This adds the record to the folder without removing it from its current location. + +**Parameters:** + +`src` — Record UID or title + +`dst` — Destination folder UID or name + +**Examples:** + +``` +kd-ln rec123abc "My Projects" +kd-ln rec123abc abc123folder +``` + +1. Link a record by title into a folder by name +2. Link a record by UID into a folder by UID + +--- + +### kd-shortcut command: + +**Command:** `kd-shortcut` + +**Detail:** Manage KeeperDrive records that appear in more than one folder. Supports two sub-commands: `list` and `keep`. + +#### kd-shortcut list + +List all records linked to more than one folder. + +**Parameters:** + +`target` — Optional record UID, title, or folder path to filter results + +**Switches:** + +`--format <{table, csv, json}>` Choose the format of the output (default: `table`) + +`--output FILE` Write csv/json output to a file + +**Examples:** + +``` +kd-shortcut list +kd-shortcut list "My Record" +kd-shortcut list "Projects" --format json +kd-shortcut list --format csv --output shortcuts.csv +``` + +1. List all records that appear in more than one folder +2. List shortcuts for a specific record by title +3. List shortcuts for records in a specific folder in JSON format +4. Export the full shortcuts list to a CSV file + +#### kd-shortcut keep + +Keep a record in exactly one folder, removing it from all others. + +**Parameters:** + +`target` — Record UID or title + +`folder` — Folder UID or path to keep the record in (defaults to current folder) + +**Switches:** + +`-f`, `--force` Skip the confirmation prompt before removing shortcuts + +**Examples:** + +``` +kd-shortcut keep rec123abc "My Projects" +kd-shortcut keep "My Record" "Preferred Folder" --force +``` + +1. Keep a record only in `My Projects`, removing it from all other folders (prompts for confirmation) +2. Remove shortcuts without a confirmation prompt + +--- + +### kd-share-record command: + +**Command:** `kd-share-record` + +**Detail:** Grant, update, or revoke a user's access to a record. All three actions are handled by one command via `--action` (default: `grant`). + +**Parameters:** + +Record UID or folder UID (use with `-R` for bulk sharing across all records in a folder) + +**Switches:** + +`-e EMAIL`, `--email EMAIL` Recipient email address (required). Repeatable: `-e user1@example.com -e user2@example.com` + +`-a ACTION`, `--action ACTION` `grant` (default — also updates existing shares), `revoke`, or `owner` (transfer ownership) + +`-r ROLE`, `--role ROLE` Permission role. Required for `grant` action. See permission roles table above. + +`-R`, `--recursive` Apply to all records in a folder and its sub-folders + +`--contacts-only` Only share with known contacts already in your vault + +`-f`, `--force` Skip confirmation prompts + +`--dry-run` Display permission changes without committing them + +`--expire-at TIMESTAMP` Expiration as ISO datetime or `never` + +`--expire-in PERIOD` Expiration as period: `30d` `6mo` `1y` `24h` `30mi` or `never` (mutually exclusive with `--expire-at`) + +**Examples:** + +``` +kd-share-record rec123abc -e colleague@example.com -r viewer +kd-share-record rec123abc -e user1@example.com -e user2@example.com -r viewer +kd-share-record rec123abc -e colleague@example.com -a revoke +kd-share-record rec123abc -e temp@example.com -r viewer --expire-in 30d +kd-share-record folderabc -e team@example.com -r viewer -R +kd-share-record rec123abc -e newowner@example.com -a owner +kd-share-record rec123abc -e user@example.com -r full-manager --dry-run +``` + +1. Grant viewer access to a record +2. Grant viewer access to multiple recipients at once +3. Revoke a user's access to a record +4. Grant viewer access that expires in 30 days +5. Bulk share all records in a folder (and sub-folders) with a team +6. Transfer ownership of a record to another user +7. Preview permission changes without committing them + +> **Note:** `-a grant` on a record that is already shared with the recipient will automatically update their existing share to the new role. `-a owner` transfers record ownership — only one recipient is allowed and you will lose access to the record after the transfer. + +--- + +### kd-record-permission command: + +**Command:** `kd-record-permission` + +**Detail:** Bulk-update sharing permissions across all records in a folder and optionally its sub-folders. Always shows a change plan before executing. + +**Parameters:** + +`folder` — Folder UID or path (optional; omit to target vault root) + +**Switches:** + +`-a ACTION`, `--action ACTION` `grant` or `revoke` (required) + +`-r ROLE`, `--role ROLE` Role to grant, or role to filter when revoking. Required for `grant` action. + +`-R`, `--recursive` Apply to all sub-folders recursively + +`-f`, `--force` Apply changes without a confirmation prompt + +`--dry-run` Preview changes without committing them + +**Examples:** + +``` +kd-record-permission -a grant -r viewer "My Projects" --dry-run +kd-record-permission -a revoke -r viewer "Archive" -R --force +kd-record-permission -a grant -r viewer +kd-record-permission -a revoke "Old Folder" --dry-run +``` + +1. Preview: grant viewer permission to all records in `My Projects` +2. Revoke viewer permission from all records in `Archive` and its sub-folders without prompting +3. Grant viewer permission to all records at the vault root level +4. Preview what would be revoked across all records in a folder + +--- + +### kd-transfer-record command: + +**Command:** `kd-transfer-record` + +**Detail:** Transfer record ownership to another user. After transfer you permanently lose access to the record. This cannot be easily reversed. + +**Parameters:** + +`record_uids` — UID(s) of the record(s) to transfer + +`new_owner_email` — Email address of the new owner + +**Examples:** + +``` +kd-transfer-record rec123abc newowner@example.com +kd-transfer-record rec123abc rec456def newowner@example.com +``` + +1. Transfer ownership of a single record to another user +2. Transfer ownership of multiple records to the same new owner + +> **Warning:** After the transfer you will no longer have access to these records. + +--- + +### kd-record-details command: + +**Command:** `kd-record-details` + +**Detail:** Get metadata (title, type, version, revision) for one or more records. + +**Parameters:** + +One or more record UIDs or titles + +**Switches:** + +`--format <{table, json}>` Choose the format of the output (default: `table`) + +**Examples:** + +``` +kd-record-details rec123abc +kd-record-details rec123abc rec456def rec789ghi +kd-record-details rec123abc --format json +``` + +1. Show metadata for a single record +2. Show metadata for multiple records at once +3. Show metadata for a record in JSON format + +--- + +### kd-get command: + +**Command:** `kd-get` + +**Detail:** Show full details for a KeeperDrive record or folder by UID or title — including fields, notes, and access list. + +**Parameters:** + +Record UID, folder UID, or title/name + +**Switches:** + +`--format <{detail, json}>` Choose the format of the output: `detail` (default) or `json` + +`-v`, `--verbose` Show full per-permission flag breakdown for each accessor + +`--unmask` Reveal masked field values (passwords, secrets) + +**Examples:** + +``` +kd-get rec123abc +kd-get "Gmail Account" --unmask +kd-get abc123folder --verbose +kd-get rec123abc --format json +kd-get rec123abc --format json --verbose +``` + +1. Show the details of a specific record +2. Show a record's details and reveal its masked password field +3. Show a folder's details with full permission flags per accessor +4. Show a record's details in JSON format +5. Show a record in JSON format with full per-accessor permission flags + +--- + +## Quick Reference + +### Folder commands + + +| Command | Short description | +| ----------------- | ----------------------------- | +| `kd-mkdir` | Create a folder | +| `kd-rndir` | Rename / recolor a folder | +| `kd-list` | List folders and records | +| `kd-rmdir` | Remove folder(s) | +| `kd-share-folder` | Grant or remove folder access | + + +### Record commands + + +| Command | Short description | +| ------------------ | ----------------------------------------------- | +| `kd-record-add` | Create a record | +| `kd-record-update` | Update a record | +| `kd-rm` | Remove / trash / unlink a record | +| `kd-ln` | Link a record into a folder | +| `kd-shortcut list` | List multi-folder records | +| `kd-shortcut keep` | Keep record in one folder, unlink from the rest | + + +### Sharing commands + + +| Command | Short description | +| ---------------------- | ----------------------------------------------- | +| `kd-share-record` | Grant / revoke / transfer ownership of a record | +| `kd-record-permission` | Bulk update sharing across a folder | +| `kd-transfer-record` | Transfer record ownership | +| `kd-share-folder` | Grant or revoke folder access | + + +### Inspection commands + + +| Command | Short description | +| ------------------- | ---------------------------------- | +| `kd-get` | Show full record or folder details | +| `kd-record-details` | Get record metadata (batch) | + + +--- + +## Common Workflows + +### Set up a shared project folder + +```bash +# 1. Create folder +kd-mkdir "Client Projects" --color blue + +# 2. Add a record +kd-record-add -t "Client Portal" -rt login --folder "Client Projects" \ + login=admin@client.com password=Secret123 url=https://portal.client.com + +# 3. Share the folder with a colleague +kd-share-folder "Client Projects" -e colleague@company.com -r content-manager + +# 4. Verify the listing +kd-list --folders +``` + +### Share a record with time-limited access + +```bash +# Grant 30-day viewer access +kd-share-record rec123abc -e contractor@external.com -r viewer --expire-in 30d + +# Preview what will change (dry-run first) +kd-share-record rec123abc -e contractor@external.com -r full-manager --dry-run + +# Revoke when done +kd-share-record rec123abc -e contractor@external.com -a revoke +``` + +### Clean up multi-folder shortcuts + +```bash +# Find all records in multiple folders +kd-shortcut list + +# Keep a record in only one folder +kd-shortcut keep "My Record" "Preferred Folder" +``` + +### Safely remove a folder + +```bash +# Preview impact +kd-rmdir "Old Archive" --dry-run + +# Trash (recoverable) +kd-rmdir "Old Archive" + +# Or permanently delete (irreversible) +kd-rmdir "Old Archive" --operation delete-permanent --force +``` + +### Bulk-revoke permissions across a folder tree + +```bash +# Preview what will be revoked +kd-record-permission -a revoke -r viewer "Archive" -R --dry-run + +# Apply without prompting +kd-record-permission -a revoke -r viewer "Archive" -R --force +``` + +--- + +## Expiration Format + +Both `--expire-at` and `--expire-in` are accepted by `kd-share-folder` and `kd-share-record`. They are mutually exclusive — use one or the other. + + +| Format | Example | Meaning | +| ------------ | ---------------------- | ------------------- | +| ISO datetime | `2027-06-01T00:00:00Z` | Exact UTC expiry | +| `never` | `never` | No expiration | +| Days | `30d` | 30 days from now | +| Hours | `24h` | 24 hours from now | +| Minutes | `30mi` | 30 minutes from now | +| Months | `6mo` | 6 months from now | +| Years | `1y` | 1 year from now | + + +--- + +--- + diff --git a/keepercommander/__init__.py b/keepercommander/__init__.py index fa0b95ba8..f9ffffaab 100644 --- a/keepercommander/__init__.py +++ b/keepercommander/__init__.py @@ -10,4 +10,4 @@ # Contact: commander@keepersecurity.com # -__version__ = '17.2.16' +__version__ = '18.0.0' diff --git a/keepercommander/api.py b/keepercommander/api.py index 71a5b1875..1b1f22a58 100644 --- a/keepercommander/api.py +++ b/keepercommander/api.py @@ -197,7 +197,7 @@ def get_record(params, record_uid): try: rec = Record(record_uid) data = json.loads(cached_rec['data_unencrypted']) - extra = json.loads(cached_rec['extra_unencrypted']) if 'extra_unencrypted' in cached_rec else None + extra = json.loads(cached_rec['extra_unencrypted']) if cached_rec.get('extra_unencrypted') else None rec.load(data, version=version, revision=cached_rec['revision'], extra=extra) if not resolve_record_view_path(params, record_uid): rec.mask_password() diff --git a/keepercommander/autocomplete.py b/keepercommander/autocomplete.py index f2dafddd3..0346c71e2 100644 --- a/keepercommander/autocomplete.py +++ b/keepercommander/autocomplete.py @@ -21,7 +21,10 @@ from .commands.folder import mv_parser from .commands.base import GroupCommand, Command from .commands.connect import ConnectCommand +from .command_categories import COMMAND_CATEGORIES from .commands import commands, enterprise_commands, msp_commands + +_KEEPER_DRIVE_COMMANDS = COMMAND_CATEGORIES.get('KeeperDrive Commands', set()) from .subfolder import try_resolve_path as sf_try_resolve_path @@ -132,7 +135,9 @@ def get_completions(self, document, complete_event): if document.is_cursor_at_the_end: pos = document.text.find(' ') if pos == -1: - cmds = [x for x in commands if x.startswith(document.text)] + hide_kd = self.params.is_feature_disallowed('keeper_drive') + cmds = [x for x in commands + if x.startswith(document.text) and not (hide_kd and x in _KEEPER_DRIVE_COMMANDS)] if self.aliases: al_cmds = [x[0] for x in self.aliases.items() if type(x[1]) == tuple and x[0].startswith(document.text)] cmds.extend(al_cmds) diff --git a/keepercommander/cli.py b/keepercommander/cli.py index 0f6f2429c..37271357e 100644 --- a/keepercommander/cli.py +++ b/keepercommander/cli.py @@ -39,10 +39,13 @@ from .commands.utils import LoginCommand from .commands import msp from .constants import OS_WHICH_CMD, KEEPER_PUBLIC_HOSTS, KEEPER_SERVERS +from .command_categories import COMMAND_CATEGORIES from .error import CommandError, Error from .params import KeeperParams from .subfolder import BaseFolderNode +KEEPER_DRIVE_COMMANDS = COMMAND_CATEGORIES.get('KeeperDrive Commands', set()) + current_command = None # type: Union[None, CliCommand] stack = [] register_commands(commands, aliases, command_info) @@ -68,7 +71,7 @@ logging.getLogger('asyncio').setLevel(logging.WARNING) -def display_command_help(show_enterprise=False, show_shell=False, show_legacy=False): +def display_command_help(show_enterprise=False, show_shell=False, show_legacy=False, show_keeper_drive=True): from .command_categories import get_command_category, get_category_order from .display import bcolors from colorama import Fore, Style @@ -141,6 +144,8 @@ def clean_description(desc): continue if category == 'Legacy Commands' and not show_legacy: continue + if category == 'KeeperDrive Commands' and not show_keeper_drive: + continue if category == 'KeeperPAM Commands': for cmd_display, description in sorted(pam_subcommands): @@ -380,7 +385,9 @@ def is_msp(params_local): else: cmd = ali - if cmd in commands or cmd in enterprise_commands or cmd in msp_commands: + is_kd_hidden = cmd in KEEPER_DRIVE_COMMANDS and params.is_feature_disallowed('keeper_drive') + + if not is_kd_hidden and (cmd in commands or cmd in enterprise_commands or cmd in msp_commands): command = commands.get(cmd) or enterprise_commands.get(cmd) or msp_commands.get(cmd) global current_command current_command = command @@ -431,7 +438,10 @@ def is_msp(params_local): else: if not params.session_token and utils.is_email(orig_cmd): return LoginCommand().execute(params, email=orig_cmd, new_login=False) - display_command_help(show_enterprise=(params.enterprise is not None)) + display_command_help( + show_enterprise=(params.enterprise is not None), + show_keeper_drive=not params.is_feature_disallowed('keeper_drive') + ) def runcommands(params, commands=None, command_delay=0, quiet=False): @@ -856,7 +866,11 @@ def get_prompt(params): break if f.parent_uid is not None: - f = params.folder_cache[f.parent_uid] + if f.parent_uid in params.folder_cache: + f = params.folder_cache[f.parent_uid] + else: + # Parent UID not in folder_cache (e.g., KD folders with special root UID) + f = params.root_folder else: if f.type == BaseFolderNode.SharedFolderFolderType: f = params.folder_cache[f.shared_folder_uid] diff --git a/keepercommander/command_categories.py b/keepercommander/command_categories.py index dbb4945b2..fcc6a29aa 100644 --- a/keepercommander/command_categories.py +++ b/keepercommander/command_categories.py @@ -109,6 +109,14 @@ 'epm' }, + # KeeperDrive Commands + 'KeeperDrive Commands': { + 'kd-mkdir', 'kd-record-add', 'kd-record-update', 'kd-rndir', 'kd-list', + 'kd-share-folder', 'kd-record-details', 'kd-share-record', + 'kd-record-permission', 'kd-transfer-record', + 'kd-ln', 'kd-rm', 'kd-rmdir', 'kd-shortcut', 'kd-get' + }, + # Legacy Commands 'Legacy Commands': { 'rotate', 'connect', 'ssh', 'ssh-agent', 'rdp', 'rsync', 'set', 'echo', @@ -145,5 +153,6 @@ def get_category_order(): 'Miscellaneous Commands', 'KeeperPAM Commands', 'EPM Commands', + 'KeeperDrive Commands', 'Legacy Commands' ] diff --git a/keepercommander/commands/base.py b/keepercommander/commands/base.py index d075197a6..eb8f2ff1f 100644 --- a/keepercommander/commands/base.py +++ b/keepercommander/commands/base.py @@ -152,6 +152,10 @@ def register_commands(commands, aliases, command_info): device_management.register_commands(commands) device_management.register_command_info(aliases, command_info) + from . import keeper_drive + keeper_drive.register_commands(commands) + keeper_drive.register_command_info(aliases, command_info) + if sys.version_info.major == 3 and sys.version_info.minor >= 10 and (utils.is_windows_11() or sys.platform == 'darwin'): from ..biometric import BiometricCommand commands['biometric'] = BiometricCommand() diff --git a/keepercommander/commands/discoveryrotation.py b/keepercommander/commands/discoveryrotation.py index 8cc87cdb3..4d53a9d9a 100644 --- a/keepercommander/commands/discoveryrotation.py +++ b/keepercommander/commands/discoveryrotation.py @@ -23,7 +23,8 @@ import requests from keeper_secrets_manager_core.utils import url_safe_str_to_bytes -from .base import (Command, GroupCommand, user_choice, dump_report_data, report_output_parser, field_to_title, +from .base import (Command, GroupCommand, user_choice, dump_report_data, report_output_parser, + json_output_parser, field_to_title, FolderMixin, RecordMixin, toggle_pam_legacy_commands) from .folder import FolderMoveCommand from .ksm import KSMCommand @@ -2610,7 +2611,8 @@ def execute(self, params, **kwargs): class PAMRouterGetRotationInfo(Command): - parser = argparse.ArgumentParser(prog='dr-router-get-rotation-info-parser') + parser = argparse.ArgumentParser(prog='dr-router-get-rotation-info-parser', + parents=[json_output_parser]) parser.add_argument('--record-uid', '-r', required=True, dest='record_uid', action='store', help='Record UID to rotate') @@ -2620,76 +2622,115 @@ def get_parser(self): def execute(self, params, **kwargs): record_uid = kwargs.get('record_uid') + format_type = kwargs.get('format', 'table') record_uid_bytes = url_safe_str_to_bytes(record_uid) rri = record_rotation_get(params, record_uid_bytes) rri_status_name = router_pb2.RouterRotationStatus.Name(rri.status) if rri_status_name == 'RRS_ONLINE': - print(f'Rotation Status: {bcolors.OKBLUE}Ready to rotate ({rri_status_name}){bcolors.ENDC}') configuration_uid = utils.base64_url_encode(rri.configurationUid) - print(f'PAM Config UID: {bcolors.OKBLUE}{configuration_uid}{bcolors.ENDC}') - print(f'Node ID: {bcolors.OKBLUE}{rri.nodeId}{bcolors.ENDC}') - - print( - f"Gateway Name where the rotation will be performed: {bcolors.OKBLUE}{(rri.controllerName if rri.controllerName else '-')}{bcolors.ENDC}") - print( - f"Gateway Uid: {bcolors.OKBLUE}{(utils.base64_url_encode(rri.controllerUid) if rri.controllerUid else '-')} {bcolors.ENDC}") + gateway_name = rri.controllerName if rri.controllerName else '-' + gateway_uid = utils.base64_url_encode(rri.controllerUid) if rri.controllerUid else '-' def is_resource_ok(resource_id, params, configuration_uid): if resource_id not in params.record_cache: return False - configuration = vault.KeeperRecord.load(params, configuration_uid) if not isinstance(configuration, vault.TypedRecord): return False - field = configuration.get_typed_field('pamResources') if not (field and isinstance(field.value, list) and len(field.value) == 1): return False - rv = field.value[0] if not isinstance(rv, dict): return False - resources = rv.get('resourceRef') return isinstance(resources, list) and resource_id in resources + admin_resource_uid = None if rri.resourceUid: - resource_id = utils.base64_url_encode(rri.resourceUid) - resource_ok = is_resource_ok(resource_id, params, configuration_uid) - print(f"Admin Resource Uid: {bcolors.OKBLUE if resource_ok else bcolors.FAIL}{resource_id}" - f"{bcolors.ENDC}") + admin_resource_uid = utils.base64_url_encode(rri.resourceUid) - # print(f"Router Cookie: {bcolors.OKBLUE}{(rri.cookie if rri.cookie else '-')}{bcolors.ENDC}") - # print(f"scriptName: {bcolors.OKGREEN}{rri.scriptName}{bcolors.ENDC}") - if rri.pwdComplexity: - print(f"Password Complexity: {bcolors.OKGREEN}{rri.pwdComplexity}{bcolors.ENDC}") + # Password complexity + pwd_complexity_raw = rri.pwdComplexity if rri.pwdComplexity else None + pwd_complexity_detail = None + if pwd_complexity_raw: try: record = params.record_cache.get(record_uid) if record: - complexity = crypto.decrypt_aes_v2(utils.base64_url_decode(rri.pwdComplexity), + complexity = crypto.decrypt_aes_v2(utils.base64_url_decode(pwd_complexity_raw), record['record_key_unencrypted']) - c = json.loads(complexity.decode()) - print(f"Password Complexity Data: {bcolors.OKBLUE}" - f"Length: {c.get('length')}; Lowercase: {c.get('lowercase')}; " - f"Uppercase: {c.get('caps')}; " - f"Digits: {c.get('digits')}; " - f"Symbols: {c.get('special')}; " - f"Symbols Chars: {c.get('specialChars')} {bcolors.ENDC}") - except: - pass + pwd_complexity_detail = json.loads(complexity.decode()) + except Exception: + pwd_complexity_detail = None + + # Schedule information + schedule_type = None + schedule_data = None + rq = pam_pb2.PAMGenericUidsRequest() + schedules_proto = router_get_rotation_schedules(params, rq) + if schedules_proto: + for s in schedules_proto.schedules: + if s.recordUid == record_uid_bytes: + if s.noSchedule is True: + schedule_type = 'manual' + else: + schedule_type = 'scheduled' + schedule_data = s.scheduleData if s.scheduleData else None + break + + if format_type == 'json': + result = { + 'status': rri_status_name, + 'ready_to_rotate': True, + 'pam_config_uid': configuration_uid, + 'node_id': rri.nodeId, + 'gateway_name': gateway_name, + 'gateway_uid': gateway_uid, + 'admin_resource_uid': admin_resource_uid, + 'password_complexity': pwd_complexity_raw, + 'password_complexity_detail': pwd_complexity_detail, + 'schedule_type': schedule_type, + 'schedule_data': schedule_data, + 'disabled': rri.disabled, + 'script_name': rri.scriptName if rri.scriptName else None, + } + return json.dumps(result, indent=2) + + # --- table output (original behaviour preserved) --- + print(f'Rotation Status: {bcolors.OKBLUE}Ready to rotate ({rri_status_name}){bcolors.ENDC}') + print(f'PAM Config UID: {bcolors.OKBLUE}{configuration_uid}{bcolors.ENDC}') + print(f'Node ID: {bcolors.OKBLUE}{rri.nodeId}{bcolors.ENDC}') + print( + f"Gateway Name where the rotation will be performed: {bcolors.OKBLUE}{gateway_name}{bcolors.ENDC}") + print( + f"Gateway Uid: {bcolors.OKBLUE}{gateway_uid} {bcolors.ENDC}") + + if admin_resource_uid: + resource_ok = is_resource_ok(admin_resource_uid, params, configuration_uid) + print(f"Admin Resource Uid: {bcolors.OKBLUE if resource_ok else bcolors.FAIL}{admin_resource_uid}" + f"{bcolors.ENDC}") + + # print(f"Router Cookie: {bcolors.OKBLUE}{(rri.cookie if rri.cookie else '-')}{bcolors.ENDC}") + # print(f"scriptName: {bcolors.OKGREEN}{rri.scriptName}{bcolors.ENDC}") + if pwd_complexity_raw: + print(f"Password Complexity: {bcolors.OKGREEN}{pwd_complexity_raw}{bcolors.ENDC}") + if pwd_complexity_detail: + c = pwd_complexity_detail + print(f"Password Complexity Data: {bcolors.OKBLUE}" + f"Length: {c.get('length')}; Lowercase: {c.get('lowercase')}; " + f"Uppercase: {c.get('caps')}; " + f"Digits: {c.get('digits')}; " + f"Symbols: {c.get('special')}; " + f"Symbols Chars: {c.get('specialChars')} {bcolors.ENDC}") else: print(f"Password Complexity: {bcolors.OKGREEN}[not set]{bcolors.ENDC}") print(f"Is Rotation Disabled: {bcolors.OKGREEN}{rri.disabled}{bcolors.ENDC}") - # Get schedule information - rq = pam_pb2.PAMGenericUidsRequest() - schedules_proto = router_get_rotation_schedules(params, rq) if schedules_proto: - schedules = list(schedules_proto.schedules) - for s in schedules: + for s in schedules_proto.schedules: if s.recordUid == record_uid_bytes: if s.noSchedule is True: print(f"Schedule Type: {bcolors.OKBLUE}Manual Rotation{bcolors.ENDC}") @@ -2707,6 +2748,8 @@ def is_resource_ok(resource_id, params, configuration_uid): print(f"\nCommand to manually rotate: {bcolors.OKGREEN}pam action rotate -r {record_uid}{bcolors.ENDC}") else: + if format_type == 'json': + return json.dumps({'status': rri_status_name, 'ready_to_rotate': False}) print(f'{bcolors.WARNING}Rotation Status: Not ready to rotate ({rri_status_name}){bcolors.ENDC}') diff --git a/keepercommander/commands/discoveryrotation_v1.py b/keepercommander/commands/discoveryrotation_v1.py index 11b19b8b9..a754b39de 100644 --- a/keepercommander/commands/discoveryrotation_v1.py +++ b/keepercommander/commands/discoveryrotation_v1.py @@ -21,7 +21,7 @@ import requests from keeper_secrets_manager_core.utils import url_safe_str_to_bytes -from .base import Command, GroupCommand, user_choice, dump_report_data, report_output_parser, field_to_title, FolderMixin +from .base import Command, GroupCommand, user_choice, dump_report_data, report_output_parser, json_output_parser, field_to_title, FolderMixin from .discoveryrotation import PAMLegacyCommand from .folder import FolderMoveCommand from .ksm import KSMCommand @@ -1173,7 +1173,8 @@ def execute(self, params, **kwargs): class PAMRouterGetRotationInfo(Command): - parser = argparse.ArgumentParser(prog='dr-router-get-rotation-info-parser') + parser = argparse.ArgumentParser(prog='dr-router-get-rotation-info-parser', + parents=[json_output_parser]) parser.add_argument('--record-uid', '-r', required=True, dest='record_uid', action='store', help='Record UID to rotate') @@ -1183,23 +1184,61 @@ def get_parser(self): def execute(self, params, **kwargs): record_uid = kwargs.get('record_uid') + format_type = kwargs.get('format', 'table') record_uid_bytes = url_safe_str_to_bytes(record_uid) rri = record_rotation_get(params, record_uid_bytes) rri_status_name = router_pb2.RouterRotationStatus.Name(rri.status) if rri_status_name == 'RRS_ONLINE': + configuration_uid = utils.base64_url_encode(rri.configurationUid) + gateway_name = rri.controllerName if rri.controllerName else '-' + gateway_uid = utils.base64_url_encode(rri.controllerUid) if rri.controllerUid else '-' + + admin_resource_uid = None + if rri.resourceUid: + admin_resource_uid = utils.base64_url_encode(rri.resourceUid) + + # Password complexity + pwd_complexity_raw = rri.pwdComplexity if rri.pwdComplexity else None + pwd_complexity_detail = None + if pwd_complexity_raw: + try: + record = params.record_cache.get(record_uid) + if record: + complexity = crypto.decrypt_aes_v2(utils.base64_url_decode(pwd_complexity_raw), + record['record_key_unencrypted']) + pwd_complexity_detail = json.loads(complexity.decode()) + except Exception: + pwd_complexity_detail = None + + if format_type == 'json': + result = { + 'status': rri_status_name, + 'ready_to_rotate': True, + 'pam_config_uid': configuration_uid, + 'node_id': rri.nodeId, + 'gateway_name': gateway_name, + 'gateway_uid': gateway_uid, + 'admin_resource_uid': admin_resource_uid, + 'password_complexity': pwd_complexity_raw, + 'password_complexity_detail': pwd_complexity_detail, + 'disabled': rri.disabled, + 'script_name': rri.scriptName if rri.scriptName else None, + } + return json.dumps(result, indent=2) + + # --- table output (original behaviour preserved) --- print(f'Rotation Status: {bcolors.OKBLUE}Ready to rotate ({rri_status_name}){bcolors.ENDC}') configuration_uid = utils.base64_url_encode(rri.configurationUid) print(f'PAM Config UID: {bcolors.OKBLUE}{configuration_uid}{bcolors.ENDC}') print(f'Node ID: {bcolors.OKBLUE}{rri.nodeId}{bcolors.ENDC}') - print(f"Gateway Name where the rotation will be performed: {bcolors.OKBLUE}{(rri.controllerName if rri.controllerName else '-')}{bcolors.ENDC}") - print(f"Gateway Uid: {bcolors.OKBLUE}{(utils.base64_url_encode(rri.controllerUid) if rri.controllerUid else '-') } {bcolors.ENDC}") - if rri.resourceUid: - resource_id = utils.base64_url_encode(rri.resourceUid) + print(f"Gateway Name where the rotation will be performed: {bcolors.OKBLUE}{gateway_name}{bcolors.ENDC}") + print(f"Gateway Uid: {bcolors.OKBLUE}{gateway_uid} {bcolors.ENDC}") + if admin_resource_uid: resource_ok = False - if resource_id in params.record_cache: + if admin_resource_uid in params.record_cache: configuration = vault.KeeperRecord.load(params, configuration_uid) if isinstance(configuration, vault.TypedRecord): field = configuration.get_typed_field('pamResources') @@ -1208,27 +1247,24 @@ def execute(self, params, **kwargs): if isinstance(rv, dict): resources = rv.get('resourceRef') if isinstance(resources, list): - resource_ok = resource_id in resources - print(f"Admin Resource Uid: {bcolors.OKBLUE if resource_ok else bcolors.FAIL}{resource_id}{bcolors.ENDC}") + resource_ok = admin_resource_uid in resources + print(f"Admin Resource Uid: {bcolors.OKBLUE if resource_ok else bcolors.FAIL}{admin_resource_uid}{bcolors.ENDC}") # print(f"Router Cookie: {bcolors.OKBLUE}{(rri.cookie if rri.cookie else '-')}{bcolors.ENDC}") # print(f"scriptName: {bcolors.OKGREEN}{rri.scriptName}{bcolors.ENDC}") - if rri.pwdComplexity: - print(f"Password Complexity: {bcolors.OKGREEN}{rri.pwdComplexity}{bcolors.ENDC}") - try: - record = params.record_cache.get(record_uid) - if record: - complexity = crypto.decrypt_aes_v2(utils.base64_url_decode(rri.pwdComplexity), record['record_key_unencrypted']) - c = json.loads(complexity.decode()) - print(f"Password Complexity Data: {bcolors.OKBLUE}Length: {c.get('length')}; Lowercase: {c.get('lowercase')}; Uppercase: {c.get('caps')}; Digits: {c.get('digits')}; Symbols: {c.get('special')} {bcolors.ENDC}") - except: - pass + if pwd_complexity_raw: + print(f"Password Complexity: {bcolors.OKGREEN}{pwd_complexity_raw}{bcolors.ENDC}") + if pwd_complexity_detail: + c = pwd_complexity_detail + print(f"Password Complexity Data: {bcolors.OKBLUE}Length: {c.get('length')}; Lowercase: {c.get('lowercase')}; Uppercase: {c.get('caps')}; Digits: {c.get('digits')}; Symbols: {c.get('special')} {bcolors.ENDC}") else: print(f"Password Complexity: {bcolors.OKGREEN}[not set]{bcolors.ENDC}") print(f"Is Rotation Disabled: {bcolors.OKGREEN}{rri.disabled}{bcolors.ENDC}") print(f"\nCommand to manually rotate: {bcolors.OKGREEN}pam action rotate -r {record_uid}{bcolors.ENDC}") else: + if format_type == 'json': + return json.dumps({'status': rri_status_name, 'ready_to_rotate': False}) print(f'{bcolors.WARNING}Rotation Status: Not ready to rotate ({rri_status_name}){bcolors.ENDC}') diff --git a/keepercommander/commands/folder.py b/keepercommander/commands/folder.py index f857b9f43..8774e2100 100644 --- a/keepercommander/commands/folder.py +++ b/keepercommander/commands/folder.py @@ -260,17 +260,23 @@ def folder_flags(f): if fmt in ('json', 'csv'): combined_table = [] - combined_headers = ['type', 'uid', 'name', 'details'] + combined_headers = ['type', 'uid', 'name', 'details', 'source'] if len(folders) > 0: for f in folders: - row = ['folder', f.uid, f.name, f'Flags: {folder_flags(f)}, Parent: {f.parent_uid or "/"}'] + # Check if folder is from Keeper Drive + is_keeper_drive = hasattr(params, 'keeper_drive_folders') and f.uid in params.keeper_drive_folders + source = 'KeeperDrive' if is_keeper_drive else 'Legacy' + row = ['folder', f.uid, f.name, f'Flags: {folder_flags(f)}, Parent: {f.parent_uid or "/"}', source] combined_table.append(row) if len(records) > 0: for record in records: + # Check if record is from Keeper Drive + is_keeper_drive = hasattr(params, 'keeper_drive_records') and record.record_uid in params.keeper_drive_records + source = 'KeeperDrive' if is_keeper_drive else 'Legacy' row = ['record', record.record_uid, record.title, - f'Type: {record.record_type}, Description: {vault_extensions.get_record_description(record)}'] + f'Type: {record.record_type}, Description: {vault_extensions.get_record_description(record)}', source] combined_table.append(row) combined_table.sort(key=lambda x: (x[0], (x[2] or '').lower())) @@ -279,12 +285,15 @@ def folder_flags(f): else: if len(folders) > 0: table = [] - headers = ['folder_uid', 'name', 'flags', 'parent_uid'] + headers = ['folder_uid', 'name', 'flags', 'parent_uid', 'source'] colors = {} for f in folders: if f.color: colors[f.name] = f.color - row = [f.uid, f.name, folder_flags(f), f.parent_uid or '/'] + # Check if folder is from Keeper Drive + is_keeper_drive = hasattr(params, 'keeper_drive_folders') and f.uid in params.keeper_drive_folders + source = 'KeeperDrive' if is_keeper_drive else 'Legacy' + row = [f.uid, f.name, folder_flags(f), f.parent_uid or '/', source] table.append(row) table.sort(key=lambda x: (x[1] or '').lower()) # Only apply colorization if not JSON format @@ -297,9 +306,12 @@ def folder_flags(f): if len(records) > 0: table = [] - headers = ['record_uid', 'type', 'title', 'description'] + headers = ['record_uid', 'type', 'title', 'description', 'source'] for record in records: - row = [record.record_uid, record.record_type, record.title, vault_extensions.get_record_description(record)] + # Check if record is from Keeper Drive + is_keeper_drive = hasattr(params, 'keeper_drive_records') and record.record_uid in params.keeper_drive_records + source = 'KeeperDrive' if is_keeper_drive else 'Legacy' + row = [record.record_uid, record.record_type, record.title, vault_extensions.get_record_description(record), source] table.append(row) table.sort(key=lambda x: (x[2] or '').lower()) headers = base.fields_to_titles(headers) @@ -348,7 +360,7 @@ def get_parser(self): def execute(self, params, **kwargs): folder_name = kwargs['folder'] if 'folder' in kwargs else '' if folder_name: - if folder_name in params.folder_cache: + if folder_name in params.folder_cache or folder_name in params.keeper_drive_folders: params.current_folder = folder_name else: rs = try_resolve_path(params, folder_name) @@ -844,6 +856,16 @@ def execute(self, params, **kwargs): if src_folder.type == BaseFolderNode.RootFolderType: raise CommandError('mv', 'Root folder cannot be a source folder') + if src_folder.type == BaseFolderNode.KeeperDriveFolderType: + if dst_folder.type in {BaseFolderNode.SharedFolderType, BaseFolderNode.SharedFolderFolderType}: + raise CommandError('mv', 'Drive folders cannot be moved inside a Shared folder.') + raise CommandError('mv', 'Moving drive folders is currently not supported.') + + if dst_folder.type == BaseFolderNode.KeeperDriveFolderType: + if src_folder.type in {BaseFolderNode.SharedFolderType, BaseFolderNode.SharedFolderFolderType}: + raise CommandError('mv', 'Shared folders cannot be moved inside a drive folder.') + raise CommandError('mv', 'Folders cannot be moved inside a drive folder.') + dp = set() f = dst_folder while f is not None and f.uid is not None: @@ -885,6 +907,22 @@ def execute(self, params, **kwargs): FolderMoveCommand.prepare_transition_keys(params, src_folder, transition_keys, params.data_key) else: + if src_folder.type == BaseFolderNode.KeeperDriveFolderType: + raise CommandError('mv', 'Moving drive records is currently not supported.') + + if record_uid in getattr(params, 'keeper_drive_records', {}) \ + and dst_folder.type != BaseFolderNode.KeeperDriveFolderType: + raise CommandError( + 'mv', + 'KeeperDrive records cannot be linked or moved into legacy folders.' + ) + + if dst_folder.type == BaseFolderNode.KeeperDriveFolderType: + raise CommandError( + 'mv', + 'Legacy records cannot be linked or moved into a KeeperDrive folder.' + ) + move = { 'uid': record_uid, 'type': 'record', @@ -1755,23 +1793,146 @@ def get_teams_info(teams): return result def tree_node(node): - node_uid = node.record_uid if isinstance(node, Record) else node.uid or '' - node_name = node.title if isinstance(node, Record) else node.name + node_uid = node.record_uid if isinstance(node, Record) else (node.uid if hasattr(node, 'uid') else '') + node_name = node.title if isinstance(node, Record) else (node.name if hasattr(node, 'name') else 'Unknown') + + # Check if it's a KeeperDrive item and get proper name + is_keeper_drive = False + if isinstance(node, Record): + is_keeper_drive = hasattr(params, 'keeper_drive_records') and node.record_uid in params.keeper_drive_records + elif hasattr(node, 'type') and node.type == 'keeper_drive_folder': + is_keeper_drive = True + elif isinstance(node, BaseFolderNode) and not isinstance(node, Record): + is_keeper_drive = hasattr(params, 'keeper_drive_folders') and node_uid in params.keeper_drive_folders + # Get folder name from keeper_drive_folders if available + if is_keeper_drive and node_uid in params.keeper_drive_folders: + kd_folder_name = params.keeper_drive_folders[node_uid].get('name', node_name) + if kd_folder_name: + node_name = kd_folder_name + node_name = f'{node_name} ({node_uid})' if verbose else node_name share_info = get_share_info(node) if isinstance(node, SharedFolderNode) and shares else '' - node_name = f'{Style.DIM}{node_name} [Record]{Style.NORMAL}' if isinstance(node, Record) \ - else f'{node_name}{Style.BRIGHT} [SHARED]{Style.NORMAL}{share_info}' if isinstance(node, SharedFolderNode) \ - else node_name - - dir_nodes = [] if isinstance(node, Record) \ - else [params.folder_cache.get(fuid) for fuid in node.subfolders] + + # Format node name based on type + if isinstance(node, Record): + kd_label = ' [KD Record]' if is_keeper_drive else ' [Record]' + node_name = f'{Style.DIM}{node_name}{kd_label}{Style.NORMAL}' + elif isinstance(node, SharedFolderNode): + node_name = f'{node_name}{Style.BRIGHT} [SHARED]{Style.NORMAL}{share_info}' + elif is_keeper_drive: + node_name = f'{node_name}{Style.BRIGHT} [KD Folder]{Style.NORMAL}' + + dir_nodes = [] + if not isinstance(node, Record): + # Get regular subfolders from folder_cache + if hasattr(node, 'subfolders'): + dir_nodes = [params.folder_cache.get(fuid) for fuid in node.subfolders if params.folder_cache.get(fuid)] + + # Check if this is root folder and add KeeperDrive root-level folders + is_root = (isinstance(node, BaseFolderNode) and (node.type == '/' or node_uid == '')) or \ + (hasattr(node, 'type') and node.type == 'keeper_drive_folder' and not node_uid) + + if is_root and hasattr(params, 'keeper_drive_folders') and params.keeper_drive_folders: + # Add all KeeperDrive folders that are at root level + for kd_uid, kd_folder in params.keeper_drive_folders.items(): + parent_uid = kd_folder.get('parent_uid') + # Check if this folder is at root: + # - parent_uid is None, empty string, 'root', or the special root UID + # - Also check if parent doesn't exist in keeper_drive_folders (orphan = root level) + is_root_folder = ( + parent_uid is None or + parent_uid == '' or + parent_uid == 'root' or + parent_uid == 'AAAAAAAAAAAAAAAAAPmtNA' or + (parent_uid and parent_uid not in params.keeper_drive_folders) + ) + if is_root_folder: + # Check if already in dir_nodes + already_added = any(hasattr(n, 'uid') and n.uid == kd_uid for n in dir_nodes if n) + if not already_added: + # Check if in folder_cache first + if kd_uid in params.folder_cache: + kd_node = params.folder_cache.get(kd_uid) + if kd_node: + dir_nodes.append(kd_node) + else: + # Create a temporary folder node for KeeperDrive folders not in folder_cache + temp_node = type('FolderNode', (), { + 'uid': kd_uid, + 'name': kd_folder.get('name', 'Unnamed'), + 'type': 'keeper_drive_folder', + 'subfolders': [] + })() + dir_nodes.append(temp_node) + + # Add KeeperDrive subfolders if this is a KeeperDrive folder + elif not isinstance(node, Record) and hasattr(params, 'keeper_drive_folders') and node_uid: + # Find child folders for this KeeperDrive folder + for child_uid, child_folder in params.keeper_drive_folders.items(): + parent_uid = child_folder.get('parent_uid', '') + if parent_uid == node_uid: + # Check if already in dir_nodes + already_added = any(hasattr(n, 'uid') and n.uid == child_uid for n in dir_nodes if n) + if not already_added: + if child_uid in params.folder_cache: + child_node = params.folder_cache.get(child_uid) + if child_node: + dir_nodes.append(child_node) + else: + # Create a temporary folder node + temp_node = type('FolderNode', (), { + 'uid': child_uid, + 'name': child_folder.get('name', 'Unnamed'), + 'type': 'keeper_drive_folder', + 'subfolders': [] + })() + dir_nodes.append(temp_node) + rec_nodes = [] if show_records and isinstance(node, BaseFolderNode): - node_uid = '' if node.type == '/' else node.uid - rec_uids = {rec for recs in get_contained_record_uids(params, node_uid).values() for rec in recs} + node_uid_for_recs = '' if node.type == '/' else node.uid + + # Get legacy records + rec_uids = {rec for recs in get_contained_record_uids(params, node_uid_for_recs).values() for rec in recs} records = [api.get_record(params, rec_uid) for rec_uid in rec_uids] records = [r for r in records if isinstance(r, Record)] rec_nodes.extend(records) + + # Add KeeperDrive records for this folder + if hasattr(params, 'keeper_drive_folder_records'): + # For root folder, collect KD records that are not inside any known KD sub-folder + if is_root: + kd_folders = getattr(params, 'keeper_drive_folders', {}) + shown_rec_uids = set(rec_uids) + # Records associated with container UIDs that are NOT real KD sub-folders + # (includes the KD root UID and any other non-folder containers) + for folder_uid, kd_rec_uids in params.keeper_drive_folder_records.items(): + if folder_uid not in kd_folders: + for rec_uid in kd_rec_uids: + if rec_uid not in shown_rec_uids: + rec = api.get_record(params, rec_uid) + if isinstance(rec, Record): + rec_nodes.append(rec) + shown_rec_uids.add(rec_uid) + # Also show KD records that have NO folder association at all + if hasattr(params, 'keeper_drive_records'): + all_filed = set() + for uids in params.keeper_drive_folder_records.values(): + all_filed.update(uids) + for rec_uid in params.keeper_drive_records: + if rec_uid not in all_filed and rec_uid not in shown_rec_uids: + rec = api.get_record(params, rec_uid) + if isinstance(rec, Record): + rec_nodes.append(rec) + shown_rec_uids.add(rec_uid) + # For specific folders + elif node_uid_for_recs in params.keeper_drive_folder_records: + kd_rec_uids = params.keeper_drive_folder_records[node_uid_for_recs] + for rec_uid in kd_rec_uids: + if rec_uid not in rec_uids: + rec = api.get_record(params, rec_uid) + if isinstance(rec, Record): + rec_nodes.append(rec) dir_nodes.sort(key=lambda f: f.name.lower() if f.name else '', reverse=False) rec_nodes.sort(key=lambda r: r.title.lower(), reverse=False) diff --git a/keepercommander/commands/keeper_drive/__init__.py b/keepercommander/commands/keeper_drive/__init__.py new file mode 100644 index 000000000..e9bce5fd4 --- /dev/null +++ b/keepercommander/commands/keeper_drive/__init__.py @@ -0,0 +1,98 @@ +# _ __ +# | |/ /___ ___ _ __ ___ _ _ ® +# | ' 1: + from keepercommander.display import dump_report_data + rows = [[f, kd_folders[f].get('name', '')] for f in matches] + dump_report_data(rows, ['Folder UID', 'Name'], + title='Multiple folders match the name', row_number=True) + return None + + @staticmethod + def _resolve_as_record(params, uid): + kd_records = getattr(params, 'keeper_drive_records', {}) + if uid in kd_records: + return uid + lower = uid.lower() + kd_data = getattr(params, 'keeper_drive_record_data', {}) + matches = [] + for ruid in kd_records: + if ruid in kd_data and 'data_json' in kd_data[ruid]: + title = kd_data[ruid]['data_json'].get('title', '') + if title and lower in title.lower(): + matches.append((ruid, title)) + if len(matches) == 1: + return matches[0][0] + if len(matches) > 1: + from keepercommander.display import dump_report_data + dump_report_data( + sorted(matches, key=lambda x: x[1]), + ['Record UID', 'Title'], + title='Multiple records match the title', row_number=True) + return None + + # ── Record display ──────────────────────────────────────────────── + + def _record_detail(self, params, record_uid, verbose, unmask): + # SECURITY: this method MUST only emit field values through ``print`` + # (stdout). Never pass unmasked record content to ``logging.*`` — + # operators frequently configure file/syslog handlers that would + # persist secrets to disk. ``--unmask`` is user-controlled and should + # affect what the terminal sees, not what gets archived. + meta = load_record_metadata(params, record_uid) + + print('') + print('{0:>20s}: {1:<20s}'.format('UID', record_uid)) + print('{0:>20s}: {1:<20s}'.format('Type', meta['type'] or '')) + if meta['title']: + print('{0:>20s}: {1:<20s}'.format('Title', meta['title'])) + + login_val = self._extract_field_value(meta['fields'], 'login') + if login_val: + print('{0:>20s}: {1:<20s}'.format('Login', login_val)) + + password_val = self._extract_field_value(meta['fields'], 'password') + if password_val: + display_pw = password_val if unmask else '********' + print('{0:>20s}: {1:<20s}'.format('Password', display_pw)) + + url_val = self._extract_field_value(meta['fields'], 'url') + if url_val: + print('{0:>20s}: {1:<20s}'.format('URL', url_val)) + + shown_types = {'login', 'password', 'url'} + for f in meta['fields']: + ftype = f.get('type', '') + if ftype in shown_types: + continue + label = f.get('label') or ftype.replace('_', ' ').title() + values = f.get('value', []) + if not isinstance(values, list): + values = [values] + for val in values: + if not val and val != 0: + continue + if ftype in self._MASKED_TYPES: + dval = str(val) if unmask else '********' + elif isinstance(val, dict): + dval = ', '.join(f'{k}: {v}' for k, v in val.items() if v) + else: + dval = str(val) + print('{0:>20s}: {1:21s} {1}'.format('Notes:' if i == 0 else '', line.strip())) + + self._print_record_permissions(params, record_uid, verbose) + + @staticmethod + def _extract_field_value(fields, field_type): + """Extract the first non-empty value for a given field type.""" + for f in fields: + if f.get('type', '') == field_type: + values = f.get('value', []) + if not isinstance(values, list): + values = [values] + for val in values: + if val: + return str(val) if not isinstance(val, dict) else \ + ', '.join(f'{k}: {v}' for k, v in val.items() if v) + return '' + + def _record_json(self, params, record_uid, verbose, _unmask=False): + meta = load_record_metadata(params, record_uid) + ro = { + 'record_uid': record_uid, 'title': meta['title'], + 'type': meta['type'], 'version': meta['version'], + 'revision': meta['revision'], + } + if meta['folder_location']: + ro['folder'] = meta['folder_location'] + if meta['fields']: + ro['fields'] = meta['fields'] + if meta['notes']: + ro['notes'] = meta['notes'] + + try: + accesses = _kd.get_record_accesses_v3( + params, [record_uid]).get('record_accesses', []) + if accesses: + user_perms = [] + for a in accesses: + accessor = a.get('accessor_name') or a.get('access_type_uid', '') + role = get_access_role_label(a) + entry = { + 'username': accessor, + 'owner': a.get('owner', False), + 'shareable': a.get('can_approve_access', False) or a.get('can_update_access', False), + 'editable': a.get('can_edit', False), + 'role': role, + } + if verbose: + for flag, _ in RECORD_PERM_LABELS: + entry[flag] = a.get(flag, False) + user_perms.append(entry) + if user_perms: + ro['user_permissions'] = user_perms + except Exception as e: + logger.debug('Could not retrieve record access: %s', e) + + try: + from ... import api + share_admins = api.get_share_admins_for_record(params, record_uid) or [] + if share_admins: + ro['share_admins'] = share_admins + except Exception as e: + logger.debug('Could not retrieve share admins: %s', e) + + print(json.dumps(ro, indent=2)) + + @staticmethod + def _print_record_permissions(params, record_uid, verbose): + """Display record permissions in a format similar to the legacy get command.""" + try: + accesses = _kd.get_record_accesses_v3( + params, [record_uid]).get('record_accesses', []) + if not accesses: + return + + print('') + print('User Permissions:') + for a in accesses: + accessor = a.get('accessor_name') or a.get('access_type_uid', '') + is_owner = a.get('owner', False) + can_edit = a.get('can_edit', False) + can_share = a.get('can_approve_access', False) or a.get('can_update_access', False) + role = get_access_role_label(a) + + print('') + print(' User: ' + accessor) + if is_owner: + print(' Owner: Yes') + else: + # Owners always have full control over the record, so + # surfacing a role for them is redundant noise. Show + # the role only for non-owner accessors. + print(' Role: ' + role) + print(' Shareable: ' + ('Yes' if can_share else 'No')) + print(' Read-Only: ' + ('Yes' if not can_edit else 'No')) + + if verbose: + print(f' {"Permission":<20} Value') + print(f' {"-"*20} -----') + for flag, lbl in RECORD_PERM_LABELS: + print(f' {lbl:<20} {"Y" if a.get(flag) else "N"}') + + # Share admins are sourced from the enterprise sharing-admins API + # so the list matches the legacy ``get`` command (every user + # designated as a Share Admin for this record at the enterprise + # level), not just the users currently holding full-manager + # access. + share_admins = [] + try: + from ... import api + share_admins = api.get_share_admins_for_record(params, record_uid) or [] + except Exception as e: + logger.debug('Could not retrieve share admins: %s', e) + + if share_admins: + print('') + total = len(share_admins) + max_shown = 10 + if total <= max_shown: + print(f'Share Admins ({total}):') + for admin in share_admins: + print(f' {admin}') + else: + print(f'Share Admins ({total}, showing first {max_shown}):') + for admin in share_admins[:max_shown]: + print(f' {admin}') + print(f' ... and {total - max_shown} more') + except Exception as e: + logger.debug('Could not retrieve record access: %s', e) + + # ── Folder display ──────────────────────────────────────────────── + + @staticmethod + def _folder_permission_summary(accessor): + """Return the KeeperDrive role label for a folder accessor. + + Uses the server-supplied ``role`` (an ``AccessRoleType`` enum name) and + renders it as a canonical KeeperDrive role label (e.g. ``full-manager``, + ``shared-manager``, ``viewer``). Falls back to permission-flag based + inference for legacy access rows that omit ``role``. + """ + if not isinstance(accessor, dict): + return '' + role_name = accessor.get('role') + if role_name: + return format_role_display(role_name) + perms = accessor.get('permissions') or {} + if not perms: + return '' + return get_access_role_label({ + 'can_change_ownership': perms.get('can_change_ownership', False), + 'can_delete': perms.get('can_delete', False), + 'can_update_access': perms.get('can_update_access', False), + 'can_approve_access': perms.get('can_approve_access', False), + 'can_edit': perms.get('can_edit_records', False), + 'can_view': perms.get('can_view_records', False), + 'can_list_access': perms.get('can_list_access', False), + 'can_view_title': perms.get('can_view_records', False), + }) + + @staticmethod + def _folder_detail(params, folder_uid, verbose): + fobj = getattr(params, 'keeper_drive_folders', {}).get(folder_uid, {}) + name = fobj.get('name', folder_uid) + + print('') + print('{0:>25s}: {1:<20s}'.format('KeeperDrive Folder UID', folder_uid)) + print('{0:>25s}: {1}'.format('Name', name)) + + KeeperDriveGetCommand._print_folder_permissions(params, folder_uid, verbose) + + @staticmethod + def _folder_json(params, folder_uid, verbose): + fobj = getattr(params, 'keeper_drive_folders', {}).get(folder_uid, {}) + name = fobj.get('name', folder_uid) + owner_username = fobj.get('owner_username') + owner_account_uid = fobj.get('owner_account_uid') + + fo = {'keeper_drive_folder_uid': folder_uid, 'name': name} + if owner_username: + fo['owner'] = owner_username + + try: + result = _kd.get_folder_access_v3(params, folder_uids=[folder_uid]) + for fr in result.get('results', []): + if not fr.get('success'): + continue + accessors = fr.get('accessors', []) + if not accessors: + continue + user_perms = [] + team_perms = [] + share_admins = [] + for a in accessors: + accessor = a.get('username') or a.get('accessor_uid', '') + at = a.get('access_type', '') + perms = a.get('permissions', {}) + is_owner = KeeperDriveGetCommand._is_folder_owner(a, owner_username, owner_account_uid) + role_label_display = ('owner' if is_owner + else KeeperDriveGetCommand._folder_permission_summary(a)) + entry = { + 'accessor': accessor, + 'access_type': at, + 'role': role_label_display, + 'inherited': a.get('inherited', False), + } + if verbose and perms: + entry['permission_flags'] = perms + if at == 'AT_TEAM': + team_perms.append(entry) + else: + user_perms.append(entry) + if a.get('role', '') == 'MANAGER': + share_admins.append(accessor) + if user_perms: + fo['user_permissions'] = user_perms + if team_perms: + fo['team_permissions'] = team_perms + if share_admins: + fo['share_admins'] = share_admins + except Exception as e: + logger.debug('Could not retrieve folder access: %s', e) + + print(json.dumps(fo, indent=2)) + + @staticmethod + def _is_folder_owner(accessor, owner_username, owner_account_uid): + """Return True if *accessor* matches the folder ownerInfo from sync-down.""" + if not accessor: + return False + if owner_username: + au = (accessor.get('username') or '').lower() + if au and au == owner_username.lower(): + return True + if owner_account_uid: + uid = accessor.get('accessor_uid') or '' + if uid and uid == owner_account_uid: + return True + return False + + @staticmethod + def _print_folder_permissions(params, folder_uid, verbose): + """Display folder permissions in a format similar to the legacy get command.""" + try: + fobj = getattr(params, 'keeper_drive_folders', {}).get(folder_uid, {}) or {} + owner_username = fobj.get('owner_username') + owner_account_uid = fobj.get('owner_account_uid') + + result = _kd.get_folder_access_v3(params, folder_uids=[folder_uid]) + for fr in result.get('results', []): + if not fr.get('success'): + err = fr.get('error', {}) + logging.warning(" Access error: %s — %s", + err.get('status'), err.get('message')) + continue + accessors = fr.get('accessors', []) + if not accessors: + continue + + users = [] + teams = [] + share_admins = [] + for a in accessors: + at = a.get('access_type', '') + if at == 'AT_TEAM': + teams.append(a) + else: + users.append(a) + if a.get('role', '') == 'MANAGER': + name = a.get('username') or a.get('accessor_uid', '') + share_admins.append((name, a)) + + if users: + print('') + print('{0:>25s}:'.format('User Permissions')) + for a in users: + label = a.get('username') or a.get('accessor_uid', '') + perms = a.get('permissions', {}) + if KeeperDriveGetCommand._is_folder_owner(a, owner_username, owner_account_uid): + role_str = 'owner' + else: + role_str = KeeperDriveGetCommand._folder_permission_summary(a) + print('{0:>25s}: {1}'.format(label, role_str)) + if verbose: + if a.get('date_created'): + print('{0:>25s} Created: {1}'.format('', format_timestamp(a['date_created']))) + if a.get('last_modified'): + print('{0:>25s} Modified: {1}'.format('', format_timestamp(a['last_modified']))) + if perms: + print('{0:>25s} {1:<26} {2}'.format('', 'Permission', 'Value')) + print('{0:>25s} {1:<26} {2}'.format('', '-' * 26, '-----')) + for flag, lbl in FOLDER_PERM_LABELS: + print('{0:>25s} {1:<26} {2}'.format( + '', lbl, 'Y' if perms.get(flag) else 'N')) + + if teams: + print('') + print('{0:>25s}:'.format('Team Permissions')) + for a in teams: + label = a.get('username') or a.get('accessor_uid', '') + perms = a.get('permissions', {}) + role_str = KeeperDriveGetCommand._folder_permission_summary(a) + print('{0:>25s}: {1}'.format(label, role_str)) + if verbose and perms: + print('{0:>25s} {1:<26} {2}'.format('', 'Permission', 'Value')) + print('{0:>25s} {1:<26} {2}'.format('', '-' * 26, '-----')) + for flag, lbl in FOLDER_PERM_LABELS: + print('{0:>25s} {1:<26} {2}'.format( + '', lbl, 'Y' if perms.get(flag) else 'N')) + + if share_admins: + print('') + print('{0:>25s}:'.format('Share Administrators')) + for admin_name, admin_accessor in share_admins: + if KeeperDriveGetCommand._is_folder_owner(admin_accessor, owner_username, owner_account_uid): + admin_role = 'owner' + else: + admin_role = 'full-manager' + print('{0:>25s}: {1}'.format(admin_name, admin_role)) + + print('') + except Exception as e: + logger.debug('Could not retrieve folder access: %s', e) diff --git a/keepercommander/commands/keeper_drive/folder_commands.py b/keepercommander/commands/keeper_drive/folder_commands.py new file mode 100644 index 000000000..d5fa807ea --- /dev/null +++ b/keepercommander/commands/keeper_drive/folder_commands.py @@ -0,0 +1,527 @@ +# _ __ +# | |/ /___ ___ _ __ ___ _ _ ® +# | ' 100: + raise CommandError('kd-rmdir', 'Maximum 100 folders per invocation') + + if operation == 'delete-permanent' and not force and not dry_run: + print( + '\n *** WARNING ***\n' + ' --operation delete-permanent is IRREVERSIBLE.\n' + ' All sub-folders and records inside will be permanently destroyed.\n') + + with command_error_handler('kd-rmdir'): + self._preview_and_confirm(params, removals, operation, force, dry_run, quiet) + + def _preview_and_confirm(self, params, removals, operation, force, dry_run, quiet): + result = _kd.remove_folder_v3(params, removals, dry_run=True) + any_error = False + error_lines = [] + summary_lines = [] + + for pr in result['preview_results']: + name = self._folder_name(params, pr['folder_uid']) + if pr.get('error'): + any_error = True + err = pr['error'] + error_lines.append( + f" • {name} [{pr['folder_uid']}]: {err.get('code', '')} — {err.get('message', '')}" + ) + else: + summary_lines.extend( + self._impact_summary(pr['folder_uid'], name, operation, pr.get('impact'), quiet) + ) + + if summary_lines: + for line in summary_lines: + print(line) + + if any_error: + print(f"\n{'[Dry-run] ' if dry_run else ''}The following folder(s) cannot be removed:") + for line in error_lines: + print(line) + if not dry_run: + print('\nAborting — fix the errors above before retrying.') + return + + if dry_run: + print('\n[Dry-run] No folders were deleted.') + return + + if not force: + from ..base import user_choice + prompt = ('Do you want to permanently delete the folder(s) and all their contents?' + if operation == 'delete-permanent' + else 'Do you want to proceed with the folder deletion?') + if user_choice(prompt, 'yn', default='n').lower() != 'y': + return + + confirm_result = _kd.remove_folder_v3(params, removals, dry_run=False) + if confirm_result['confirmed']: + params.sync_data = True + else: + logging.warning('Folder removal was not confirmed by the server.') + + @staticmethod + def _folder_name(params, folder_uid): + kd = getattr(params, 'keeper_drive_folders', {}) + f = kd.get(folder_uid) or getattr(params, 'subfolder_cache', {}).get(folder_uid, {}) + return f.get('name') or folder_uid + + @staticmethod + def _impact_summary(folder_uid, name, operation, impact, quiet): + action = 'permanently deleted' if operation == 'delete-permanent' else 'moved to trash' + lines = [f"\nThe following folder will be {action}:"] + lines.append(f" {name} [{folder_uid}]") + if impact and not quiet: + parts = [] + folders = impact.get('folders_count', 0) + records = impact.get('records_count', 0) + users = impact.get('affected_users_count', 0) + teams = impact.get('affected_teams_count', 0) + if folders: + parts.append(f"{folders} sub-folder(s)") + if records: + parts.append(f"{records} record(s)") + if users: + parts.append(f"{users} user(s)") + if teams: + parts.append(f"{teams} team(s)") + if parts: + lines.append(f" This will affect: {', '.join(parts)}") + for w in impact.get('warnings', []): + lines.append(f" Warning: {w}") + return lines diff --git a/keepercommander/commands/keeper_drive/helpers.py b/keepercommander/commands/keeper_drive/helpers.py new file mode 100644 index 000000000..6dfbe49ca --- /dev/null +++ b/keepercommander/commands/keeper_drive/helpers.py @@ -0,0 +1,624 @@ +# _ __ +# | |/ /___ ___ _ __ ___ _ _ ® +# | ' = 500: + try: + api.load_available_teams(params) + teams_map.update({t.get('team_uid'): t.get('team_name') + for t in (params.available_team_cache or [])}) + except Exception: + pass + + matches = [uid for uid, name in teams_map.items() + if recipient in (name, uid)] + + if len(matches) == 1: + return 'team', matches[0] + + if not matches: + logger.warning('User "%s" could not be resolved as email or team', + recipient) + else: + logger.warning( + 'Multiple matches were found for team "%s". Try using its UID -- ' + 'which can be found via `list-team` -- instead', recipient) + return None + + +def find_folder_location(params, record_uid): + """Return the display name of the first folder containing *record_uid*.""" + kd_folder_records = getattr(params, 'keeper_drive_folder_records', {}) + kd_folders = getattr(params, 'keeper_drive_folders', {}) + for fuid, rec_set in kd_folder_records.items(): + if record_uid in rec_set: + if fuid == ROOT_FOLDER_UID: + return 'root' + if fuid in kd_folders: + return kd_folders[fuid].get('name', fuid) + return fuid + return '' + + +def collect_records_in_folder(params, folder_uid, recursive=False): + """Walk Keeper Drive membership tables to collect record UIDs in *folder_uid*. + + KeeperDrive does not store ``record_uids`` / ``children`` on folder objects; + record membership lives in ``params.keeper_drive_folder_records`` and the + folder hierarchy in ``params.keeper_drive_folders[*]['parent_uid']``. This + helper walks both, optionally recursing into sub-folders. + + Returns an ordered list of unique record UIDs (preserves first-seen order). + """ + kd_folders = getattr(params, 'keeper_drive_folders', {}) + kd_folder_records = getattr(params, 'keeper_drive_folder_records', {}) + + seen = set() + record_uids = [] + + def add_records(fuid): + for rec_uid in kd_folder_records.get(fuid, set()) or (): + if rec_uid not in seen: + seen.add(rec_uid) + record_uids.append(rec_uid) + + visited = set() + + def walk(fuid): + if fuid in visited: + return + visited.add(fuid) + add_records(fuid) + if not recursive: + return + for child_uid, child_obj in kd_folders.items(): + if child_obj.get('parent_uid') == fuid and child_uid not in visited: + walk(child_uid) + + walk(folder_uid) + return record_uids + + +# ═══════════════════════════════════════════════════════════════════════════ +# Expiration parsing +# ═══════════════════════════════════════════════════════════════════════════ + +def parse_expiration(expire_at, expire_in, cmd_name): + """Parse ``--expire-at`` / ``--expire-in`` into a millisecond timestamp. + + Returns *None* if neither argument is provided, or ``-1`` for ``'never'``. + """ + raw = expire_at or expire_in + if not raw: + return None + if raw.lower() == 'never': + return -1 + + if expire_at: + try: + dt = datetime.datetime.fromisoformat(raw.replace('Z', '+00:00')) + return int(dt.timestamp() * 1000) + except ValueError: + raise CommandError( + cmd_name, + f'Invalid --expire-at format: {raw!r}. ' + f'Use ISO datetime, e.g. 2027-01-01T00:00:00Z or "never"', + ) + + m = _EXPIRATION_RE.fullmatch(raw) + if not m: + raise CommandError( + cmd_name, + f'Invalid --expire-in format: {raw!r}. Examples: 30d, 6mo, 1y, 24h, 30mi', + ) + amount = int(m.group(1)) + unit = m.group(2).lower() + now = datetime.datetime.now(timezone.utc) + delta_map = { + 'mi': timedelta(minutes=amount), + 'h': timedelta(hours=amount), + 'd': timedelta(days=amount), + 'mo': timedelta(days=amount * 30), + 'y': timedelta(days=amount * 365), + } + delta = next(v for k, v in delta_map.items() if unit.startswith(k)) + return int((now + delta).timestamp() * 1000) + + +# ═══════════════════════════════════════════════════════════════════════════ +# Role helpers +# ═══════════════════════════════════════════════════════════════════════════ + +def infer_role(access): + """Derive a display role name from permission flags (most permissive wins). + + Follows the official permission matrix:: + + full-manager > content-share-manager > shared-manager > + content-manager > viewer > contributor > requestor > navigator + """ + get = access.get + if get('can_change_ownership') or get('can_delete'): + return 'full-manager' + if get('can_update_access') and get('can_approve_access'): + return 'content-share-manager' + if get('can_update_access'): + return 'shared-manager' + if get('can_edit'): + return 'content-manager' + if get('can_view') and get('can_list_access'): + return 'viewer' + if get('can_view'): + return 'contributor' + if get('can_view_title'): + return 'requestor' + return 'navigator' + + +def role_label(access_role_type): + """Convert a numeric ``access_role_type`` to a readable uppercase label.""" + from ... import keeper_drive as _kd + if access_role_type is not None: + return next( + (k.upper() for k, v in _kd.ROLE_NAME_MAP.items() + if v == access_role_type and '_' not in k), + str(access_role_type), + ) + return '' + + +# Map backend AccessRoleType enum names to KeeperDrive display labels. +# Source of truth: folder_pb2.AccessRoleType (NAVIGATOR=0 ... MANAGER=6). +_ACCESS_ROLE_DISPLAY_LABELS = { + 'NAVIGATOR': 'contributor', + 'REQUESTOR': 'contributor', + 'VIEWER': 'viewer', + 'SHARED_MANAGER': 'shared-manager', + 'CONTENT_MANAGER': 'content-manager', + 'CONTENT_SHARE_MANAGER': 'content-share-manager', + 'MANAGER': 'full-manager', + 'UNRESOLVED': 'unresolved', +} + + +def format_role_display(role): + """Convert an ``AccessRoleType`` to a KeeperDrive display role label. + + Accepts either the proto enum name (``'SHARED_MANAGER'``) or its integer + value, and returns the canonical hyphenated lowercase label used across + KeeperDrive (``'shared-manager'``, ``'full-manager'``, ``'viewer'`` …). + Falls back to a best-effort lowercase form when the role is unknown. + """ + if role is None or role == '': + return '' + if isinstance(role, int): + from ...proto import folder_pb2 + try: + role = folder_pb2.AccessRoleType.Name(role) + except Exception: + return str(role) + if isinstance(role, str): + key = role.upper().replace('-', '_') + return _ACCESS_ROLE_DISPLAY_LABELS.get(key, role.lower().replace('_', '-')) + return str(role) + + +def get_access_role_label(access): + """Get the KeeperDrive role label for an access entry. + + Prefers the stored ``access_role_type`` (proto enum int) when available; + otherwise falls back to inferring the role from permission flags. The + returned label uses the canonical hyphenated lowercase KeeperDrive form + (e.g. ``'full-manager'``, ``'shared-manager'``, ``'viewer'``). + """ + role_int = access.get('access_role_type') + if role_int is not None: + return format_role_display(role_int) + return infer_role(access) + + +# ═══════════════════════════════════════════════════════════════════════════ +# Formatting helpers +# ═══════════════════════════════════════════════════════════════════════════ + +def format_timestamp(ms): + """Format a millisecond epoch timestamp as ``'YYYY-MM-DD HH:MM:SS'``.""" + if ms: + return datetime.datetime.fromtimestamp(ms / 1000).strftime('%Y-%m-%d %H:%M:%S') + return '' + + +# ═══════════════════════════════════════════════════════════════════════════ +# Permission checks +# ═══════════════════════════════════════════════════════════════════════════ + +def check_folder_edit_permission(params, folder_uid, cmd_name): + """Raise if the current user cannot edit (rename/recolor) the folder.""" + _check_folder_permission(params, folder_uid, 'can_update_setting', + 'You do not have permission to edit this folder.', cmd_name) + + +def check_folder_share_permission(params, folder_uid, cmd_name): + """Raise if the current user cannot share the folder.""" + _check_folder_permission(params, folder_uid, 'can_update_access', + 'You do not have permission to share this folder.', cmd_name) + + +def check_folder_delete_permission(params, folder_uid, cmd_name): + """Raise if the current user cannot delete the folder.""" + _check_folder_permission(params, folder_uid, 'can_delete', + 'You do not have permission to delete this folder.', cmd_name) + + +def check_record_edit_permission(params, record_uid, cmd_name): + """Raise if the current user cannot edit the record.""" + _check_record_permission(params, record_uid, 'can_edit', + 'You do not have edit permissions on this record.', cmd_name) + + +def check_record_share_permission(params, record_uid, cmd_name): + """Raise if the current user cannot share the record.""" + _check_record_permission(params, record_uid, 'can_update_access', + 'You do not have permission to share this record.', cmd_name) + + +def check_record_delete_permission(params, record_uid, cmd_name): + """Raise if the current user cannot delete the record.""" + _check_record_permission(params, record_uid, 'can_delete', + 'You do not have permission to delete this record.', cmd_name) + + +def _current_user_account_uid(params): + """Return the base64url-encoded account UID for the current session, or ''.""" + from ... import utils + raw = getattr(params, 'account_uid_bytes', None) + if not raw: + return '' + try: + return utils.base64_url_encode(raw) + except Exception: + return '' + + +def _is_current_user_access(access, params, current_account_uid): + """Return True if *access* belongs to the currently logged-in user. + + Matches by ``username`` first (the populated case after a successful + user-cache resolution) then falls back to ``access_type_uid`` / + ``access_uid`` against the current account UID for sync windows where + the username has not yet been filled in from ``params.user_cache``. + """ + username = access.get('username') + if username and username == params.user: + return True + if not current_account_uid: + return False + accessor_uid = access.get('access_type_uid') or access.get('access_uid') + return bool(accessor_uid) and accessor_uid == current_account_uid + + +def _check_folder_permission(params, folder_uid, permission_key, error_message, cmd_name): + """Enforce a folder permission for the current user. + + Behaviour: + * If the cache has no access entries for *folder_uid* at all, skip the + check (the server is the source of truth and will reject if needed). + This avoids false-positives during a partial / first sync. + * If access entries exist but **none** matches the current user, deny + (treat the user as having no row, not as having implicit access). + * If the matching entry is OWNER, allow. + * Otherwise, allow only when ``permissions[permission_key]`` is truthy. + """ + from ...proto import folder_pb2 + accesses = getattr(params, 'keeper_drive_folder_accesses', {}).get(folder_uid, []) + if not accesses: + return + + current_account_uid = _current_user_account_uid(params) + for fa in accesses: + if not _is_current_user_access(fa, params, current_account_uid): + continue + if fa.get('access_type') == int(folder_pb2.AT_OWNER): + return + perms = fa.get('permissions', {}) or {} + if perms.get(permission_key): + return + raise CommandError(cmd_name, error_message) + + # Access list is non-empty but the current user is not in it. + raise CommandError(cmd_name, error_message) + + +def _check_record_permission(params, record_uid, permission_key, error_message, cmd_name): + """Enforce a record permission for the current user. + + Same fail-closed semantics as :func:`_check_folder_permission`. + """ + accesses = getattr(params, 'keeper_drive_record_accesses', {}).get(record_uid, []) + if not accesses: + return + + current_account_uid = _current_user_account_uid(params) + for ra in accesses: + if not _is_current_user_access(ra, params, current_account_uid): + continue + if ra.get('owner'): + return + if ra.get(permission_key): + return + raise CommandError(cmd_name, error_message) + + raise CommandError(cmd_name, error_message) + + +# ═══════════════════════════════════════════════════════════════════════════ +# Record metadata loading +# ═══════════════════════════════════════════════════════════════════════════ + +def load_record_metadata(params, record_uid): + """Load record metadata from cache, falling back to the v3 details API. + + Returns a dict with keys: + ``title``, ``type``, ``fields``, ``notes``, + ``revision``, ``version``, ``folder_location`` + """ + from ... import keeper_drive as _kd + + title = record_uid + rec_type = '' + fields = [] + notes = '' + revision = 0 + version = 0 + + kd_record_data = getattr(params, 'keeper_drive_record_data', {}) + if record_uid in kd_record_data: + data_obj = kd_record_data[record_uid] + if 'data_json' in data_obj: + dj = data_obj['data_json'] + title = dj.get('title', record_uid) + rec_type = dj.get('type', '') + fields = dj.get('fields', []) + notes = dj.get('notes', '') or '' + + kd_records = getattr(params, 'keeper_drive_records', {}) + if record_uid in kd_records: + rec_obj = kd_records[record_uid] + revision = rec_obj.get('revision', 0) + version = rec_obj.get('version', 0) + + if title == record_uid: + try: + det = _kd.get_record_details_v3(params, [record_uid]) + if det['data']: + d = det['data'][0] + title = d.get('title', record_uid) + rec_type = d.get('type', '') + revision = d.get('revision', 0) + version = d.get('version', 0) + except Exception: + pass + + return { + 'title': title, + 'type': rec_type, + 'fields': fields, + 'notes': notes, + 'revision': revision, + 'version': version, + 'folder_location': find_folder_location(params, record_uid), + } diff --git a/keepercommander/commands/keeper_drive/parsers.py b/keepercommander/commands/keeper_drive/parsers.py new file mode 100644 index 000000000..a444a2c6b --- /dev/null +++ b/keepercommander/commands/keeper_drive/parsers.py @@ -0,0 +1,357 @@ +# _ __ +# | |/ /___ ___ _ __ ___ _ _ ® +# | ' [(mi)nutes|(h)ours|(d)ays|(mo)nths|(y)ears]', type=str, + help='share expiration: never or period (e.g. 30d, 6mo, 1y)') + + +keeper_drive_record_permission_parser = _make_parser( + 'kd-record-permission', 'Modify the permissions of a record') +keeper_drive_record_permission_parser.add_argument( + '--dry-run', dest='dry_run', action='store_true', + help='Display the permissions changes without committing them') +keeper_drive_record_permission_parser.add_argument( + '-f', '--force', dest='force', action='store_true', + help='Apply permission changes without any confirmation') +keeper_drive_record_permission_parser.add_argument( + '-R', '--recursive', dest='recursive', action='store_true', + help='Apply permission changes to all sub-folders') +keeper_drive_record_permission_parser.add_argument( + '-a', '--action', dest='action', choices=['grant', 'revoke'], required=True, + help='The action being taken') +keeper_drive_record_permission_parser.add_argument( + '-r', '--role', dest='role', + choices=[ + 'viewer', 'shared-manager', + 'content-manager', 'content-share-manager', 'full-manager', + ], + help='Permission role to grant, or filter for revoke') +keeper_drive_record_permission_parser.add_argument( + 'folder', nargs='?', type=str, help='folder path or folder UID') + + +keeper_drive_transfer_record_parser = _make_parser( + 'kd-transfer-record', 'Transfer record ownership to another user') +keeper_drive_transfer_record_parser.add_argument( + 'record_uids', nargs='+', type=str, help='Record UID(s) to transfer') +keeper_drive_transfer_record_parser.add_argument( + 'new_owner_email', type=str, help='Email address of the new owner') + + +# ══════════════════════════════════════════════════════════════════════════ +# Detail / access parsers +# ══════════════════════════════════════════════════════════════════════════ + +keeper_drive_get_record_details_parser = _make_parser( + 'kd-get-record-details', + 'Get record metadata (title, color, etc.) using v3 API') +keeper_drive_get_record_details_parser.add_argument( + 'record_uids', nargs='+', type=str, help='Record UIDs to get details for') +keeper_drive_get_record_details_parser.add_argument( + '--format', dest='format', choices=['table', 'json'], default='table', + help='Output format (default: table)') + + +# ══════════════════════════════════════════════════════════════════════════ +# Shortcut parsers +# ══════════════════════════════════════════════════════════════════════════ + +kd_shortcut_list_parser = _make_parser( + 'kd-shortcut list', + 'List KeeperDrive records that appear in more than one folder.') +kd_shortcut_list_parser.add_argument( + 'target', nargs='?', type=str, + help='Optional record UID/title or folder path/UID to filter results') +kd_shortcut_list_parser.add_argument( + '--format', dest='format', choices=['table', 'csv', 'json'], default='table', + help='Output format (default: table)') +kd_shortcut_list_parser.add_argument( + '--output', dest='output', type=str, + help='Path to output file (ignored for table format)') + + +kd_shortcut_keep_parser = _make_parser( + 'kd-shortcut keep', + 'Keep a record only in one KeeperDrive folder, removing it from all others.') +kd_shortcut_keep_parser.add_argument( + 'target', nargs='?', type=str, help='Record UID or title') +kd_shortcut_keep_parser.add_argument( + 'folder', nargs='?', type=str, + help='Folder path or UID to keep the record in (defaults to current folder)') +kd_shortcut_keep_parser.add_argument( + '-f', '--force', dest='force', action='store_true', + help='Do not prompt before removing') + + +# ══════════════════════════════════════════════════════════════════════════ +# Remove parsers +# ══════════════════════════════════════════════════════════════════════════ + +kd_rm_parser = _make_parser( + 'kd-rm', + 'Remove a KeeperDrive record. Supports owner-trash, folder-trash, or unlink.') +kd_rm_parser.add_argument( + 'records', nargs='+', metavar='RECORD', + help='Record UID(s) or title(s) to remove (max 500 per invocation)') + +kd_rm_parser.add_argument( + '--folder', dest='folder_uid', metavar='FOLDER', + help='Folder UID or name that provides context for the operation') +kd_rm_parser.add_argument( + '--operation', '-o', dest='operation', + choices=['owner-trash', 'folder-trash', 'unlink'], default='owner-trash', + help='Removal operation (default: owner-trash)') +_kd_rm_confirm = kd_rm_parser.add_mutually_exclusive_group() +_kd_rm_confirm.add_argument( + '--force', '-f', action='store_true', + help='Skip the confirmation prompt and execute immediately after preview.') +_kd_rm_confirm.add_argument( + '--dry-run', dest='dry_run', action='store_true', + help='Run the preview step only; do not delete anything.') + + +# ══════════════════════════════════════════════════════════════════════════ +# Get parser +# ══════════════════════════════════════════════════════════════════════════ + +kd_get_parser = _make_parser( + 'kd-get', + 'Get the details of a KeeperDrive record or folder by UID or title') +kd_get_parser.add_argument( + 'uid', type=str, help='Record UID, folder UID, or title to look up') +kd_get_parser.add_argument( + '--format', dest='format', choices=['detail', 'json'], default='detail', + help='Output format: detail (default) or json') +kd_get_parser.add_argument( + '--verbose', '-v', dest='verbose', action='store_true', default=False, + help='Show full permission breakdown for each accessor') +kd_get_parser.add_argument( + '--unmask', dest='unmask', action='store_true', default=False, + help='Reveal masked field values (passwords, secrets)') diff --git a/keepercommander/commands/keeper_drive/record_commands.py b/keepercommander/commands/keeper_drive/record_commands.py new file mode 100644 index 000000000..d77231010 --- /dev/null +++ b/keepercommander/commands/keeper_drive/record_commands.py @@ -0,0 +1,557 @@ +# _ __ +# | |/ /___ ___ _ __ ___ _ _ ® +# | ' 1} + + +class KeeperDriveShortcutListCommand(Command): + """List KeeperDrive records that appear in more than one folder.""" + + def get_parser(self): + return kd_shortcut_list_parser + + def execute(self, params, **kwargs): + records = KeeperDriveShortcutCommand.get_record_shortcuts(params) + target = kwargs.get('target') + + kd_records = getattr(params, 'keeper_drive_records', {}) + kd_folders = getattr(params, 'keeper_drive_folders', {}) + + to_show = self._resolve_target(params, target, records, kd_records, kd_folders) \ + if target else set(records.keys()) + + if not to_show: + logging.info('No KeeperDrive shortcut records found') + return + + fmt = kwargs.get('format') or 'table' + table = [] + for record_uid in sorted(to_show): + title = kd_records.get(record_uid, {}).get('title', record_uid) + folder_names = [] + for fuid in sorted(records[record_uid]): + fname = kd_folders.get(fuid, {}).get('name', fuid) + folder_names.append({'folder_uid': fuid, 'name': fname} if fmt == 'json' + else f'{fname} ({fuid})') + table.append([record_uid, title, folder_names]) + + headers = (['record_uid', 'record_title', 'folders'] if fmt == 'json' + else ['Record UID', 'Record Title', 'Folders']) + from ..base import dump_report_data + return dump_report_data(table, headers, fmt=fmt, filename=kwargs.get('output')) + + @staticmethod + def _resolve_target(params, target, records, kd_records, kd_folders): + to_show = set() + if target in kd_records: + if target not in records: + raise CommandError('kd-shortcut list', f'Record UID {target} does not have shortcuts') + return {target} + + lower = target.casefold() + for uid, rec in kd_records.items(): + if rec.get('title', '').casefold() == lower: + if uid not in records: + raise CommandError('kd-shortcut list', f'Record "{target}" does not have shortcuts') + return {uid} + + resolved_folder = _kd.resolve_folder_identifier(params, target) + if resolved_folder: + return {r for r in records if resolved_folder in records[r]} + + raise CommandError('kd-shortcut list', + f'Target "{target}" is not a known record UID, title, or folder path') + + +class KeeperDriveShortcutKeepCommand(Command): + """Keep a KeeperDrive record in exactly one folder, removing it from all others.""" + + def get_parser(self): + return kd_shortcut_keep_parser + + def execute(self, params, **kwargs): + target = kwargs.get('target') + if not target: + self.get_parser().print_help() + return + + force = kwargs.get('force', False) + kd_records = getattr(params, 'keeper_drive_records', {}) + kd_folders = getattr(params, 'keeper_drive_folders', {}) + + record_uid = self._resolve_record(target, kd_records) + keep_folder_uid = self._resolve_keep_folder(params, kwargs.get('folder'), kd_folders) + + records = KeeperDriveShortcutCommand.get_record_shortcuts(params) + if record_uid not in records: + raise CommandError('kd-shortcut keep', + f'Record "{target}" does not appear in multiple folders') + if keep_folder_uid not in records[record_uid]: + fname = kd_folders.get(keep_folder_uid, {}).get('name', keep_folder_uid) + raise CommandError('kd-shortcut keep', f'Record "{target}" is not in folder "{fname}"') + + folders_to_remove = [f for f in records[record_uid] if f != keep_folder_uid] + if not folders_to_remove: + logging.info('Nothing to do — record is already in only one folder.') + return + + if not force: + lines = [f' Will remove record "{target}" ({record_uid}) from:'] + for fuid in folders_to_remove: + lines.append(f' - {kd_folders.get(fuid, {}).get("name", fuid)} ({fuid})') + keep_name = kd_folders.get(keep_folder_uid, {}).get('name', keep_folder_uid) + lines.append(f' Keeping in: {keep_name} ({keep_folder_uid})') + print('\n'.join(lines)) + from ..base import user_choice + if user_choice('Do you want to proceed with deletion?', 'yn', default='n').lower() != 'y': + return + + errors = [] + for fuid in folders_to_remove: + try: + result = _kd.remove_record_from_folder_v3(params, fuid, record_uid) + if not result.get('success'): + errors.append(f'{fuid}: {result.get("message", "unknown error")}') + except Exception as exc: + errors.append(f'{fuid}: {exc}') + + if errors: + raise CommandError('kd-shortcut keep', 'Some removals failed:\n' + '\n'.join(errors)) + + params.sync_data = True + keep_name = kd_folders.get(keep_folder_uid, {}).get('name', keep_folder_uid) + logging.info('Record "%s" kept in "%s" and removed from %d other folder(s).', + target, keep_name, len(folders_to_remove)) + + @staticmethod + def _resolve_record(target, kd_records): + if target in kd_records: + return target + lower = target.casefold() + for uid, rec in kd_records.items(): + if rec.get('title', '').casefold() == lower: + return uid + raise CommandError('kd-shortcut keep', f'Record "{target}" not found in KeeperDrive') + + @staticmethod + def _resolve_keep_folder(params, folder_arg, kd_folders): + if folder_arg: + uid = _kd.resolve_folder_identifier(params, folder_arg) + if not uid: + raise CommandError('kd-shortcut keep', f'Folder "{folder_arg}" not found') + ensure_keeper_drive_folder(params, uid, 'kd-shortcut keep', + identifier=folder_arg) + return uid + current = getattr(params, 'current_folder', None) + if current and current in kd_folders: + return current + raise CommandError('kd-shortcut keep', + 'No folder specified and current folder is not a KeeperDrive folder.') + + +# ══════════════════════════════════════════════════════════════════════════ +# kd-rm +# ══════════════════════════════════════════════════════════════════════════ + +class KeeperDriveRemoveRecordCommand(Command): + """Remove (delete/unlink) one or more KeeperDrive records.""" + + def get_parser(self): + return kd_rm_parser + + def execute(self, params, **kwargs): + record_args = kwargs.get('records') or [] + folder_arg = kwargs.get('folder_uid') + operation = kwargs.get('operation', 'owner-trash') + force = kwargs.get('force', False) + dry_run = kwargs.get('dry_run', False) + + if not record_args: + raise CommandError('kd-rm', 'At least one record UID or title is required') + if operation == 'unlink' and not folder_arg: + raise CommandError('kd-rm', '--folder is required when --operation is "unlink"') + + folder_uid = None + if folder_arg: + folder_uid = _kd.resolve_folder_identifier(params, folder_arg) + if not folder_uid: + raise CommandError('kd-rm', f"Folder '{folder_arg}' not found") + ensure_keeper_drive_folder(params, folder_uid, 'kd-rm', + identifier=folder_arg) + + removals = self._build_removals(params, record_args, folder_uid, operation) + if len(removals) > 500: + raise CommandError('kd-rm', 'Maximum 500 records per invocation') + + with command_error_handler('kd-rm'): + self._preview_and_confirm(params, removals, operation, force, dry_run) + + def _build_removals(self, params, record_args, folder_uid, operation): + removals = [] + for identifier in record_args: + record_uid = _kd.resolve_kd_record_uid(params, identifier) + if not record_uid: + raise CommandError('kd-rm', f"Record '{identifier}' not found") + ensure_keeper_drive_record(params, record_uid, 'kd-rm', + identifier=identifier) + check_record_delete_permission(params, record_uid, 'kd-rm') + ctx_folder = folder_uid + if not ctx_folder: + folders = _kd.find_kd_folders_for_record(params, record_uid) + if not folders and operation != 'owner-trash': + raise CommandError('kd-rm', + f"No folder context for record '{identifier}'. " + f"Use --folder or --operation owner-trash.") + ctx_folder = folders[0] if folders else None + removals.append({ + 'record_uid': record_uid, + 'folder_uid': ctx_folder, + 'operation_type': operation, + }) + return removals + + def _preview_and_confirm(self, params, removals, operation, force, dry_run): + result = _kd.remove_record_v3(params, removals, dry_run=True) + any_error = False + summary_lines = [] + + for pr in result['preview_results']: + title = self._record_title(params, pr['record_uid']) + if pr.get('error'): + any_error = True + err = pr['error'] + summary_lines.append( + f" {title} [{pr['record_uid']}]: " + f"{err.get('code', '')} — {err.get('message', '')}" + ) + else: + summary_lines.extend( + self._impact_summary(pr['record_uid'], title, operation, pr.get('impact')) + ) + + for line in summary_lines: + print(line) + + if any_error: + print('\nOne or more records could not be previewed. Aborting.') + return + if dry_run: + print('\n[Dry-run] No records were deleted.') + return + if not force: + from ..base import user_choice + if user_choice('Do you want to proceed with deletion?', 'yn', default='n').lower() != 'y': + return + + confirm_result = _kd.remove_record_v3(params, removals, dry_run=False) + if confirm_result['confirmed']: + params.sync_data = True + else: + logging.warning('Record removal was not confirmed by the server.') + + @staticmethod + def _record_title(params, record_uid): + return getattr(params, 'keeper_drive_records', {}).get( + record_uid, {}).get('title') or record_uid + + @staticmethod + def _impact_summary(record_uid, title, operation, impact): + lines = [f"\nThe following record will be {operation}:"] + lines.append(f" {title} [{record_uid}]") + if impact: + folders = impact.get('folders_count', 0) + records = impact.get('records_count', 0) + users = impact.get('affected_users_count', 0) + teams = impact.get('affected_teams_count', 0) + parts = [] + if folders: + parts.append(f"{folders} folder(s)") + if records: + parts.append(f"{records} record(s)") + if users: + parts.append(f"{users} user(s)") + if teams: + parts.append(f"{teams} team(s)") + if parts: + lines.append(f" This will affect: {', '.join(parts)}") + for ri in impact.get('record_info', []): + if ri.get('locations_count', 0) > 1: + lines.append( + f" Note: record exists in {ri['locations_count']} folder locations" + ) + for w in impact.get('warnings', []): + lines.append(f" Warning: {w}") + return lines diff --git a/keepercommander/commands/keeper_drive/sharing_commands.py b/keepercommander/commands/keeper_drive/sharing_commands.py new file mode 100644 index 000000000..bc48691c8 --- /dev/null +++ b/keepercommander/commands/keeper_drive/sharing_commands.py @@ -0,0 +1,625 @@ +# _ __ +# | |/ /___ ___ _ __ ___ _ _ ® +# | ' 1: + raise CommandError('kd-share-record', 'Ownership can only be transferred to a single account') + if action == 'grant' and not role: + raise CommandError('kd-share-record', 'Role is required for grant action') + + if kwargs.get('contacts_only'): + emails = [self._resolve_contact(params, e, force) for e in emails] + + expiration = parse_expiration( + kwargs.get('expire_at'), kwargs.get('expire_in'), 'kd-share-record') + access_role_type = _kd.resolve_role_name(role) if role else None + record_uids = self._resolve_record_uids(params, record_arg, recursive) + + for uid in record_uids: + check_record_share_permission(params, uid, 'kd-share-record') + + if dry_run: + self._print_dry_run(action, record_uids, emails, role, expiration) + return + + with command_error_handler('kd-share-record'): + for email in emails: + for record_uid in record_uids: + result, effective_action = self._dispatch( + params, action, record_uid, email, access_role_type, expiration) + self._log_results(result, effective_action, email) + + # Strategy dispatch — returns (result, effective_action) + @staticmethod + def _dispatch(params, action, record_uid, email, access_role_type, expiration): + if action == 'owner': + return (_kd.transfer_record_ownership_v3( + params=params, record_uid=record_uid, new_owner_email=email), 'owner') + + if action == 'grant': + if KeeperDriveShareRecordCommand._is_already_shared( + params, record_uid, email): + logging.debug( + "Record '%s' is already shared with user '%s'; switching to update.", + record_uid, email) + return (_kd.update_record_share_v3( + params=params, record_uid=record_uid, recipient_email=email, + access_role_type=access_role_type, + expiration_timestamp=expiration), 'update') + return (_kd.share_record_v3( + params=params, record_uid=record_uid, recipient_email=email, + access_role_type=access_role_type, + expiration_timestamp=expiration), 'grant') + + return (_kd.unshare_record_v3( + params=params, record_uid=record_uid, recipient_email=email), 'revoke') + + @staticmethod + def _is_already_shared(params, record_uid, email): + """Return True if *email* already has a *direct* non-owner share on *record_uid*. + + Inherited permissions (delivered via a parent shared folder) are + intentionally ignored: the v3 share endpoint cannot ``update`` an + inherited row — attempting to do so is rejected with ``trying to + update or revoke non existing permissions``. Returning False here + causes the caller to dispatch ``share_record_v3`` (a fresh direct + grant) which correctly overrides the inherited folder permission. + """ + try: + access_result = _kd.get_record_accesses_v3(params, [record_uid]) + for a in access_result.get('record_accesses', []): + if a.get('record_uid') != record_uid or a.get('owner', False): + continue + if a.get('access_type') and a.get('access_type') != 'AT_USER': + continue + if a.get('inherited'): + continue + if a.get('accessor_name', '').casefold() == email.casefold(): + return True + return False + except Exception as exc: + logging.debug("Could not fetch record accesses for '%s': %s", record_uid, exc) + return False + + @staticmethod + def _log_results(result, action, email): + verbs = {'grant': 'granted to', 'update': 'changed for', 'revoke': 'revoked from'} + for res in result['results']: + uid = res['record_uid'] + if action == 'owner': + if res['success']: + logging.info("Record '%s' ownership transferred to '%s'", uid, email) + logging.warning("You will no longer have access to this record!") + else: + logging.error("Failed to transfer ownership of '%s' to '%s': %s", + uid, email, res.get('message', 'unknown error')) + elif res.get('pending'): + logging.warning("Share invitation has been sent to '%s'", email) + logging.warning('Please repeat this command when invitation is accepted.') + elif res['success']: + logging.info('Record "%s" access permissions has been %s \'%s\'', + uid, verbs.get(action, action), email) + else: + logging.info('Failed to %s record "%s" access for \'%s\': %s', + action, uid, email, res['message']) + + @staticmethod + def _resolve_contact(params, email, force): + from keepercommander import api + from keepercommander.commands.base import user_choice, dump_report_data + + known_users = api.get_share_objects(params).get('users', {}) + if email.casefold() in [u.casefold() for u in known_users]: + return email + + get_user = lambda addr: next(iter(addr.split('@')), '').casefold() + matches = [c for c in known_users if get_user(email) == get_user(c)] + if len(matches) > 1: + raise CommandError('kd-share-record', 'More than 1 matching usernames found. Aborting') + match = next(iter(matches), None) + if match: + dump_report_data([[email, match]], ['Requested', 'Known Contact']) + if force or user_choice('\tReplace with known matching contact?', 'yn', default='n') == 'y': + return match + raise CommandError('kd-share-record', + f'Recipient {email!r} is not a known contact') + + @staticmethod + def _resolve_record_uids(params, record_arg, recursive): + kd_folders = getattr(params, 'keeper_drive_folders', {}) + kd_records = getattr(params, 'keeper_drive_records', {}) + + # Fast path: if the identifier is a known record UID, don't attempt + # to resolve it as a folder path (folder resolution can traverse the + # folder tree and raise on malformed/missing nodes). + if record_arg in kd_records: + return [record_arg] + + try: + folder_uid = _kd.resolve_folder_identifier(params, record_arg) + except Exception: + folder_uid = None + if folder_uid: + # Reject legacy folders up-front with a friendly message rather + # than letting them slip through as a "no records found" error. + if folder_uid not in kd_folders: + ensure_keeper_drive_folder(params, folder_uid, 'kd-share-record', + identifier=record_arg) + record_uids = collect_records_in_folder(params, folder_uid, recursive) + if not record_uids: + raise CommandError('kd-share-record', 'No records found in the specified folder') + return record_uids + + resolved_uid = _kd.resolve_kd_record_uid(params, record_arg) + if not resolved_uid: + raise CommandError('kd-share-record', + f"Record '{record_arg}' not found") + ensure_keeper_drive_record(params, resolved_uid, 'kd-share-record', + identifier=record_arg) + return [resolved_uid] + + @staticmethod + def _print_dry_run(action, record_uids, emails, role, expiration): + print(f"[dry-run] Action : {action.upper()}") + print(f"[dry-run] Records : {', '.join(record_uids)}") + if action == 'owner': + print(f"[dry-run] New Owner : {emails[0]}") + print("[dry-run] Warning : You will no longer have access to transferred records!") + else: + print(f"[dry-run] Recipients: {', '.join(emails)}") + if role: + print(f"[dry-run] Role : {role}") + if expiration: + print(f"[dry-run] Expires : {expiration} ms") + + +# ══════════════════════════════════════════════════════════════════════════ +# kd-record-permission (Template Method — resolve → collect → compute → display → execute) +# ══════════════════════════════════════════════════════════════════════════ + +class KeeperDriveRecordPermissionCommand(Command): + """Bulk-update sharing permissions on records within a KeeperDrive folder.""" + + _ROLE_NAMES = [ + 'viewer', 'shared-manager', + 'content-manager', 'content-share-manager', 'full-manager', + ] + + def get_parser(self): + return keeper_drive_record_permission_parser + + def execute(self, params, **kwargs): + from keepercommander.commands.base import dump_report_data, user_choice + from keepercommander.display import bcolors + + folder_name = kwargs.get('folder') or '' + action = kwargs.get('action') + role = kwargs.get('role') + recursive = kwargs.get('recursive', False) + dry_run = kwargs.get('dry_run', False) + force = kwargs.get('force', False) + + if action == 'grant' and not role: + raise CommandError('kd-record-permission', 'Role is required for grant action') + + kd_folders = getattr(params, 'keeper_drive_folders', {}) + kd_folder_records = getattr(params, 'keeper_drive_folder_records', {}) + kd_record_data = getattr(params, 'keeper_drive_record_data', {}) + + role_map_pb = {name: _kd.resolve_role_name(name) for name in self._ROLE_NAMES} + + # Step 1: Resolve + folder_uid, display_name = self._resolve_folder(kd_folders, folder_name, params) + + if not force: + role_label = '"' + role + '"' if role else 'all' + logging.info('\nRequest to %s %s permission(s) in "%s" folder %s', + 'GRANT' if action == 'grant' else 'REVOKE', + role_label, display_name, + 'recursively' if recursive else 'only') + + # Step 2: Collect + record_uids = self._collect_record_uids(kd_folders, kd_folder_records, folder_uid, recursive) + if not record_uids: + raise CommandError('kd-record-permission', 'No records found in the specified folder') + + try: + accesses_result = _kd.get_record_accesses_v3(params, list(record_uids)) + except Exception as e: + raise CommandError('kd-record-permission', f'Failed to fetch record accesses: {e}') + + # Step 3: Compute + updates, creates, revokes, skipped = self._compute_changes( + accesses_result, record_uids, params.user, action, role, role_map_pb) + if not updates and not creates and not revokes: + if skipped: + logging.warning('No permission changes can be made. ' + 'See skipped entries below (insufficient permissions).') + from keepercommander.commands.base import dump_report_data + from keepercommander.display import bcolors + self._print_plan([], [], [], skipped, kd_record_data, dump_report_data, bcolors) + else: + logging.info('No permission changes are needed.') + return + + # Step 4: Display + if dry_run or not force: + self._print_plan(updates, creates, revokes, skipped, kd_record_data, dump_report_data, bcolors) + if dry_run: + return + + if not force: + print('\n\n' + bcolors.WARNING + bcolors.BOLD + 'ALERT!!!' + bcolors.ENDC) + if user_choice('Do you want to proceed with these permission changes?', 'yn', 'n').lower() != 'y': + return + + # Step 5: Execute + self._execute_changes(params, updates, creates, revokes) + params.sync_data = True + + @staticmethod + def _resolve_folder(kd_folders, folder_name, params=None): + if not folder_name: + return None, 'root' + if params is not None: + resolved = _kd.resolve_folder_identifier(params, folder_name) + if resolved and resolved in kd_folders: + return resolved, kd_folders[resolved].get('name', resolved) + if resolved: + # Resolution succeeded against legacy caches; reject with a + # friendly cross-type message instead of "not found". + ensure_keeper_drive_folder(params, resolved, 'kd-record-permission', + identifier=folder_name) + if folder_name in kd_folders: + return folder_name, kd_folders[folder_name].get('name', folder_name) + lower = folder_name.lower() + for fuid, fobj in kd_folders.items(): + if fobj.get('name', '').lower() == lower: + return fuid, fobj.get('name', fuid) + raise CommandError('kd-record-permission', f'Folder "{folder_name}" not found') + + @staticmethod + def _collect_record_uids(kd_folders, kd_folder_records, folder_uid, recursive): + record_uids = set() + + def walk(fuid, visited=None): + if visited is None: + visited = set() + if fuid in visited: + return + visited.add(fuid) + record_uids.update(kd_folder_records.get(fuid, set())) + if recursive: + for child_uid, child_obj in kd_folders.items(): + if child_obj.get('parent_uid') == fuid and child_uid not in visited: + walk(child_uid, visited) + + if folder_uid: + walk(folder_uid) + else: + for fuid, recs in kd_folder_records.items(): + if fuid not in kd_folders: + record_uids.update(recs) + if recursive: + for fuid in list(kd_folders): + walk(fuid) + return record_uids + + @staticmethod + def _compute_changes(accesses_result, record_uids, current_user, action, role, role_map_pb): + """Classify every non-owner share into updates, creates, revokes, or skipped. + + Buckets: + - ``updates`` — direct shares whose role differs from *role*; sent + via ``updateSharingPermissions``. + - ``creates`` — recipients who currently only have an *inherited* + (folder-level) permission. ``vault/records/v3/share`` cannot + ``update`` an inherited row (server returns ``trying to update or + revoke non existing permissions``); a fresh + ``createSharingPermissions`` adds a direct override. + - ``revokes`` — direct shares to remove. Inherited shares cannot be + revoked at the record level and are routed to *skipped*. + - ``skipped`` — added when: + * The record UID appears in ``forbidden_records`` (current user + cannot read or modify its sharing at all), OR + * The current user's own access entry lacks + ``can_update_access`` (the user can see the share list but + cannot modify it — equivalent to the classic + ``has_record_share_permissions`` check), OR + * The action is ``revoke`` and the existing share is inherited + (must be removed from the parent shared folder instead). + """ + updates, creates, revokes, skipped = [], [], [], [] + + # Pre-flight: record UIDs the server refused to return access info for. + forbidden = set(accesses_result.get('forbidden_records', [])) + + # Index current-user's own access flags per record_uid for fast lookup. + owner_flags = {} # record_uid -> can_update_access bool + for access in accesses_result.get('record_accesses', []): + if access.get('accessor_name', '') == current_user: + owner_flags[access.get('record_uid')] = access.get('can_update_access', False) + + for rec_uid in record_uids: + if rec_uid in forbidden: + skipped.append({ + 'record_uid': rec_uid, 'email': '', 'cur_role': '', + 'reason': 'No access — record is forbidden', + }) + + for access in accesses_result.get('record_accesses', []): + rec_uid = access.get('record_uid') + if not rec_uid or rec_uid not in record_uids or access.get('owner'): + continue + email = access.get('accessor_name', '') + if not email or email == current_user: + continue + + cur_role = infer_role(access) + is_inherited = bool(access.get('inherited')) + + # Pre-flight: does the current user have permission to modify this share? + can_update = owner_flags.get(rec_uid, False) + if not can_update: + skipped.append({ + 'record_uid': rec_uid, 'email': email, 'cur_role': cur_role, + 'reason': 'Insufficient permission (can_update_access is false)', + }) + continue + + if action == 'grant': + if cur_role != role: + entry = { + 'record_uid': rec_uid, 'email': email, + 'cur_role': cur_role, 'new_role': role, + 'access_role_type': role_map_pb.get(role), + } + if is_inherited: + creates.append(entry) + else: + updates.append(entry) + else: + if not role or cur_role == role: + if is_inherited: + skipped.append({ + 'record_uid': rec_uid, 'email': email, 'cur_role': cur_role, + 'reason': 'Inherited from a shared folder — ' + 'revoke at the parent shared folder', + }) + else: + revokes.append({'record_uid': rec_uid, 'email': email, 'cur_role': cur_role}) + + return updates, creates, revokes, skipped + + @staticmethod + def _print_plan(updates, creates, revokes, skipped, kd_record_data, dump_report_data, bcolors): + def title_for(rec_uid): + obj = kd_record_data.get(rec_uid, {}) + dj = obj.get('data_json', {}) if isinstance(obj, dict) else {} + return (dj.get('title', '')[:32]) if isinstance(dj, dict) else '' + + if skipped: + table = [[s['record_uid'], title_for(s['record_uid']), + s['email'] or '—', s['cur_role'] if s['cur_role'] else '—', + s['reason']] for s in skipped] + title = (bcolors.FAIL + ' SKIP ' + bcolors.ENDC + + 'Record permission(s). Not permitted') + dump_report_data(table, + ['Record UID', 'Title', 'Email', 'Current Role', 'Reason'], + title=title, row_number=True, group_by=0) + logging.info('') + logging.info('') + + # Display GRANTs as a single table for the user — direct updates and + # inherited-overrides are both presented as "current → new" rows even + # though the underlying API call differs (update vs. create). + grant_rows = [] + for u in updates: + grant_rows.append([u['record_uid'], title_for(u['record_uid']), u['email'], + u['cur_role'], + bcolors.BOLD + ' ' + u['new_role'] + bcolors.ENDC]) + for c in creates: + grant_rows.append([c['record_uid'], title_for(c['record_uid']), c['email'], + c['cur_role'] + ' (inherited)', + bcolors.BOLD + ' ' + c['new_role'] + bcolors.ENDC]) + if grant_rows: + title = (bcolors.OKGREEN + ' GRANT' + bcolors.ENDC + + ' Record permission(s)') + dump_report_data(grant_rows, + ['Record UID', 'Title', 'Email', 'Current Role', 'New Role'], + title=title, row_number=True, group_by=0) + logging.info('') + logging.info('') + + if revokes: + table = [] + for r in revokes: + row = [r['record_uid'], title_for(r['record_uid']), r['email'], + bcolors.BOLD + ' ' + r['cur_role'] + bcolors.ENDC] + table.append(row) + title = (bcolors.FAIL + ' REVOKE' + bcolors.ENDC + + ' Record share(s)') + dump_report_data(table, + ['Record UID', 'Title', 'Email', 'Current Role'], + title=title, row_number=True, group_by=0) + logging.info('') + logging.info('') + + @staticmethod + def _execute_changes(params, updates, creates, revokes): + """Apply permission changes in batched REST calls (up to 200 per request). + + ``updates`` use ``updateSharingPermissions`` (modify a direct share); + ``creates`` use ``createSharingPermissions`` (add a new direct share + that overrides a folder-inherited permission). Both are reported to + the user under a single "Failed to GRANT" error table. + """ + from keepercommander.commands.base import dump_report_data + from keepercommander.display import bcolors + + grant_failures = [] + + if updates: + outcomes = _kd.batch_update_record_shares_v3(params, updates) + for item, result in outcomes: + record_uid = item['record_uid'] + email = item['email'] + if result.get('skipped'): + grant_failures.append([record_uid, email, 'skipped', + result.get('message', 'could not build permission')]) + elif result.get('success'): + logging.info("Updated '%s' for %s: %s -> %s", + record_uid, email, + item['cur_role'], item['new_role']) + else: + grant_failures.append([record_uid, email, 'error', + result.get('message', 'Unknown error')]) + + if creates: + outcomes = _kd.batch_create_record_shares_v3(params, creates) + for item, result in outcomes: + record_uid = item['record_uid'] + email = item['email'] + if result.get('skipped'): + grant_failures.append([record_uid, email, 'skipped', + result.get('message', 'could not build permission')]) + elif result.get('success'): + logging.info("Granted '%s' to %s: %s (inherited) -> %s", + record_uid, email, + item['cur_role'], item['new_role']) + else: + grant_failures.append([record_uid, email, 'error', + result.get('message', 'Unknown error')]) + + if grant_failures: + headers = ['Record UID', 'Email', 'Error Code', 'Message'] + title = (bcolors.WARNING + 'Failed to GRANT' + bcolors.ENDC + + ' Record permission(s)') + dump_report_data(grant_failures, headers, title=title, row_number=True) + logging.info('') + logging.info('') + + if revokes: + table = [] + outcomes = _kd.batch_unshare_records_v3(params, revokes) + for item, result in outcomes: + record_uid = item['record_uid'] + email = item['email'] + if result.get('skipped'): + table.append([record_uid, email, 'skipped', + result.get('message', 'could not build permission')]) + elif result.get('success'): + logging.info("Revoked '%s' from %s (%s)", + record_uid, email, + item['cur_role']) + else: + table.append([record_uid, email, 'error', + result.get('message', 'Unknown error')]) + + if table: + headers = ['Record UID', 'Email', 'Error Code', 'Message'] + title = (bcolors.WARNING + 'Failed to REVOKE' + bcolors.ENDC + + ' Record share(s)') + dump_report_data(table, headers, title=title, row_number=True) + logging.info('') + logging.info('') + + +# ══════════════════════════════════════════════════════════════════════════ +# kd-transfer-record +# ══════════════════════════════════════════════════════════════════════════ + +class KeeperDriveTransferRecordCommand(Command): + """Transfer record ownership to another user.""" + + def get_parser(self): + return keeper_drive_transfer_record_parser + + def execute(self, params, **kwargs): + identifiers = kwargs.get('record_uids') or [] + new_owner_email = kwargs.get('new_owner_email') + + if not identifiers or not new_owner_email: + raise CommandError('kd-transfer-record', 'Record UID(s) and new owner email are required') + + with command_error_handler('kd-transfer-record'): + for identifier in identifiers: + record_uid = _kd.resolve_kd_record_uid(params, identifier) + if not record_uid: + raise CommandError('kd-transfer-record', + f"Record '{identifier}' not found") + ensure_keeper_drive_record(params, record_uid, 'kd-transfer-record', + identifier=identifier) + result = _kd.transfer_record_ownership_v3( + params=params, record_uid=record_uid, new_owner_email=new_owner_email) + check_result(result, 'kd-transfer-record') + for res in result['results']: + if res['success']: + logging.info("Record '%s' ownership transferred to %s", + res['record_uid'], new_owner_email) + logging.warning("You will no longer have access to this record!") + else: + logging.error("Failed to transfer: %s", res['message']) diff --git a/keepercommander/commands/ksm.py b/keepercommander/commands/ksm.py index bd4bbfcaf..b4ec8ff17 100644 --- a/keepercommander/commands/ksm.py +++ b/keepercommander/commands/ksm.py @@ -100,6 +100,19 @@ {bcolors.BOLD}Remove Secret from Application:{bcolors.ENDC} {bcolors.OKGREEN}secrets-manager share remove --app {bcolors.OKBLUE}[APP NAME OR UID] {bcolors.OKGREEN}--secret {bcolors.OKBLUE}[RECORD OR SHARED FOLDER UID]{bcolors.ENDC} + {bcolors.BOLD}Add Token to Application:{bcolors.ENDC} + {bcolors.OKGREEN}secrets-manager token add {bcolors.OKBLUE}[APP NAME OR UID]{bcolors.ENDC} + Options: + --count [NUM] : Number of tokens to generate (Default: 1) + --unlock-ip : Does not lock IP address to first requesting device + --first-access-expires-in-min [MIN] : First time access expiration (Default 60, Max 1440) + --access-expire-in-min [MIN] : Client access expiration (Default: no expiration) + --name [CLIENT NAME] : Name of the client + --config-init [json, b64 or k8s] : Initialize configuration string from a one-time token + --return-tokens : Return generated tokens as a comma-separated string + Adds one or more one-time access tokens to an existing KSM application. + Equivalent to: secrets-manager client add --app [APP NAME OR UID] + ----- Note: If the UID you are using contains a dash (-) in the beginning, the value should be wrapped in quotes and prepended with an equal sign. For example: @@ -114,7 +127,7 @@ add_help=False) ksm_parser.add_argument('command', type=str, action='store', nargs="*", help='One of: "app list", "app get", "app create", "app update", "app remove", "app share", ' + - '"app unshare", "client add", "client remove", "share add", "share update" or "share remove"') + '"app unshare", "client add", "client remove", "share add", "share update", "share remove" or "token add"') ksm_parser.add_argument('--secret', '-s', type=str, action='append', required=False, help='Record UID') ksm_parser.add_argument('--app', '-a', type=str, action='store', required=False, @@ -427,6 +440,32 @@ def execute(self, params, **kwargs): return + elif ksm_obj in ('token', 'tokens') and ksm_action in ('add', 'create'): + if len(ksm_command) < 3: + print( + f'{bcolors.WARNING}App UID or name is required.{bcolors.ENDC}\n' + f'\tEx: {bcolors.OKGREEN}secrets-manager token add {bcolors.OKBLUE}MyApp{bcolors.ENDC}' + ) + return + app_name_or_uid = ksm_command[2] + count = kwargs.get('count', 1) + unlock_ip = kwargs.get('unlockIp', False) + first_access_expire_on = kwargs.get('firstAccessExpiresIn') + access_expire_in_min = kwargs.get('accessExpireInMin') + client_name = kwargs.get('name') + config_init = kwargs.get('config_init') + is_return_tokens = kwargs.get('returnTokens', False) + tokens_and_device = KSMCommand.add_client( + params, app_name_or_uid, count, unlock_ip, + first_access_expire_on, access_expire_in_min, + client_name=client_name, config_init=config_init, + client_type=enterprise_pb2.GENERAL, + ) + if is_return_tokens and tokens_and_device: + tokens_only = [x.get('oneTimeToken', '') for x in tokens_and_device if x.get('oneTimeToken')] + return ', '.join(tokens_only) if tokens_only else None + return + print(f"{bcolors.WARNING}Unknown combination of KSM commands. " + f"Type 'secrets-manager' for more details'{bcolors.ENDC}") diff --git a/keepercommander/commands/pam_import/KCM_IMPORT.md b/keepercommander/commands/pam_import/KCM_IMPORT.md new file mode 100644 index 000000000..546b527d7 --- /dev/null +++ b/keepercommander/commands/pam_import/KCM_IMPORT.md @@ -0,0 +1,205 @@ +# KCM Database Import — Quick Start Guide + +Migrate connections from a KCM (Keeper Connection Manager) / Apache Guacamole database directly into Keeper PAM. The `pam project kcm-import` command connects to the KCM database, extracts connections, users, and groups, maps 150+ Guacamole parameters to Keeper record fields, and imports everything into your vault. + +## Prerequisites + +| Requirement | Details | +|-------------|---------| +| **Keeper Commander** | Installed and logged in (`keeper shell`) | +| **KCM instance** | Running KCM with MySQL or PostgreSQL backend | +| **Database access** | Credentials for the KCM/Guacamole database | +| **Python DB driver** | `pip3 install pymysql` (MySQL) or `pip3 install psycopg2-binary` (PostgreSQL) | +| **Gateway** | An existing Keeper gateway, or the command will create one | +| **Docker** *(optional)* | Only needed if using `--docker-detect` for auto-discovery | + +## Quick Start — Docker Auto-Detect (Simplest) + +If Commander is running on the same host as KCM's Docker stack: + +```bash +# 1. Preview what would be imported (no vault changes) +My Vault> pam project kcm-import --docker-detect --dry-run + +# 2. Run the actual import +My Vault> pam project kcm-import --docker-detect --name "KCM Migration" +``` + +That's it. The command will: +1. Discover the KCM database container automatically +2. Detect the database type (MySQL or PostgreSQL) +3. Resolve the container's IP address +4. Extract credentials from the container's environment +5. Connect, extract all connections/users/groups, and import them + +## Quick Start — Manual Database Connection + +When the database is on a remote host or you need explicit control: + +```bash +# Store your DB password in a Keeper vault record first, then: +My Vault> pam project kcm-import \ + --db-host 10.0.0.5 \ + --db-type postgresql \ + --db-password-record "KCM DB Password" \ + --db-ssl \ + --name "Production KCM" +``` + +> **Security note:** Database passwords are never accepted as CLI arguments. +> Use `--db-password-record` to reference a vault record, or the command will prompt interactively. + +## Common Workflows + +### 1. Explore Before You Import + +List all connection groups in the KCM database to understand what's there: + +```bash +My Vault> pam project kcm-import --docker-detect --list-groups +``` + +Output shows each group with its resource and user counts, helping you decide what to import. + +### 2. Import Specific Groups Only + +Use `--groups` with wildcard patterns to import a subset: + +```bash +# Import only Production and Staging groups +My Vault> pam project kcm-import --docker-detect \ + --groups "Production*,Staging*" \ + --name "Prod Migration" +``` + +Or exclude groups you don't want: + +```bash +# Import everything except test and incomplete groups +My Vault> pam project kcm-import --docker-detect \ + --exclude-groups "Test*,Incomplete*,Sandbox*" +``` + +Patterns support `*` and `?` wildcards and match against group name, full path, or any path segment. + +### 3. Dry Run + JSON Review + +For maximum control, preview the import and save the extracted data: + +```bash +# Save JSON for review without modifying the vault +My Vault> pam project kcm-import --docker-detect \ + --dry-run \ + --output ~/kcm-review.json + +# Include credentials in the JSON (redacted by default) +My Vault> pam project kcm-import --docker-detect \ + --output ~/kcm-full.json \ + --include-credentials +``` + +### 4. Extend an Existing PAM Configuration + +Add KCM connections to an existing PAM project instead of creating a new one: + +```bash +My Vault> pam project kcm-import --docker-detect \ + --config "Existing PAM Config" \ + --groups "NewDepartment*" +``` + +### 5. Non-Interactive / Batch Mode + +For scripting or automation, skip all prompts: + +```bash +My Vault> pam project kcm-import --docker-detect \ + --name "Automated Import" \ + --gateway "My Gateway" \ + --yes +``` + +### 6. Get a Size Estimate + +Check how many records would be created without connecting to the vault: + +```bash +My Vault> pam project kcm-import --docker-detect --estimate +``` + +## Folder Modes + +The `--folder-mode` flag controls how KCM connection groups map to Keeper shared folders: + +| Mode | Behavior | +|------|----------| +| `ksm` *(default)* | Preserves group nesting, but groups with a KSM config become root-level shared folders | +| `exact` | Preserves the exact KCM group hierarchy as nested folders | +| `flat` | Every group becomes a root-level shared folder (no nesting) | + +```bash +# Use exact hierarchy +My Vault> pam project kcm-import --docker-detect --folder-mode exact +``` + +## What Gets Imported + +| KCM Object | Keeper Record Type | +|------------|-------------------| +| SSH connections | `pamMachine` | +| RDP connections | `pamMachine` | +| VNC connections | `pamMachine` | +| Telnet connections | `pamMachine` | +| HTTP/HTTPS connections | `pamRemoteBrowser` | +| MySQL connections | `pamDatabase` | +| PostgreSQL connections | `pamDatabase` | +| SQL Server connections | `pamDatabase` | +| Oracle connections | `pamDatabase` | +| Kubernetes connections | `pamMachine` | +| LDAP connections | `pamMachine` | +| Connection users | `pamUser` | + +150+ Guacamole parameters are mapped, including: hostname, port, credentials, SSH keys, RDP display settings, VNC encodings, database schemas, TOTP/MFA, jump hosts, recording paths, and more. + +## Import Report + +After a successful import, the command: + +1. **Prints a summary** to the console with pass/fail/skip counts per record type +2. **Creates a vault record** in the project folder containing: + - Copyable custom fields: gateway deploy command, Gateway Token, Config UID, Gateway UID, KSM App UID + - `KCM-Import-Report.md` file attachment with the full report + - Per-record breakdown with reasons for any failures or skips + - Throttle statistics and the redacted CLI command for reproducibility + +## Cleaning Up an Import + +To reverse an import and remove all created records, folders, gateway, and KSM app: + +```bash +# Preview what would be deleted +My Vault> pam project kcm-cleanup --name "KCM Migration" --dry-run + +# Delete everything from the import +My Vault> pam project kcm-cleanup --name "KCM Migration" --yes + +# Or reference by PAM config UID +My Vault> pam project kcm-cleanup --config VxANFEPLi8E9gdtlDmfBvw --yes +``` + +## Troubleshooting + +| Problem | Solution | +|---------|----------| +| `MySQL driver not found` | `pip3 install pymysql` | +| `PostgreSQL driver not found` | `pip3 install psycopg2-binary` | +| `Refusing to connect without SSL/TLS` | Add `--db-ssl` for encrypted connections, or `--allow-cleartext` if SSL is unavailable (not recommended) | +| `No Docker containers found` | Ensure Docker is running and the KCM database container is up. Use `--docker-container NAME` if auto-discovery fails | +| `KCM schema not found` | The database exists but doesn't have Guacamole tables. Verify `--db-name` points to the correct database (default: `guacamole_db`) | +| `No connections match the group filter` | Run `--list-groups` to see available groups and adjust your `--groups` pattern | +| HTTP 403 throttling | The adaptive throttler handles this automatically. For manual tuning, use `--batch-size` and `--batch-delay` | +| Import is slow | Reduce `--batch-size` if hitting throttles, or increase it if the server handles load well. `--no-auto-throttle` disables adaptive tuning | + +## Full Flag Reference + +See the [PAM Import README](README.md) for the complete list of all flags and their defaults, JSON format details, and PAM configuration options. diff --git a/keepercommander/commands/pam_import/README.md b/keepercommander/commands/pam_import/README.md index 165ca8977..e5baae283 100644 --- a/keepercommander/commands/pam_import/README.md +++ b/keepercommander/commands/pam_import/README.md @@ -23,6 +23,91 @@ Adding new PAM resources and users to an existing PAM configuration from an impo - If the command reports errors, run it again with **`--dry-run`** for more detailed error messages. +Import directly from a KCM/Guacamole database. Connects to the KCM database, extracts connections/users/groups, maps 130+ parameters, and feeds the result into the existing import engine. +`pam project kcm-import --db-host=HOST [OPTIONS]` + +**Database (one of `--db-host` or `--docker-detect` required):** +- `--db-host HOST` → KCM database hostname. +- `--docker-detect` → Auto-detect credentials from Docker container. Discovers the KCM database container automatically, detects database type (mysql/postgresql), and resolves the container IP. +- `--docker-container NAME` → Specify Docker container name _(auto-discovered if omitted)_. +- `--db-port PORT` → Database port _(default: 3306 mysql, 5432 postgresql)_. +- `--db-name NAME` → Database name _(default: guacamole\_db)_. +- `--db-type {mysql,postgresql}` → Database type _(auto-detected with `--docker-detect`)_. +- `--db-user USER` → Database username _(default: guacamole\_user)_. +- `--db-password-record UID` → Keeper record UID or title containing DB password. If omitted, searches vault for candidates or prompts interactively. +- `--db-ssl` → Require SSL/TLS for database connection. +- `--allow-cleartext` → Allow unencrypted connection to remote database _(not recommended; required when connecting to a remote host without `--db-ssl`)_. + +**Import:** +- `--name`, `-n` → Project name _(default: KCM-Import-TIMESTAMP)_. +- `--config`, `-c` → Existing PAM config UID or name to extend (skip project creation). +- `--folder-mode {ksm,exact,flat}` → Connection group mapping _(default: ksm)_. +- `--output`, `-o` → Save JSON to file for review before importing. +- `--gateway`, `-g` → Existing gateway UID or name _(interactive picker if omitted)_. +- `--max-instances N` → Set gateway pool size _(0 = skip, requires new gateway)_. + +**Group Filtering:** +- `--list-groups` → List available KCM connection groups with resource/user counts, then exit. +- `--groups "Pattern1,Pattern2"` → Import only connections in matching groups. Supports fnmatch wildcards (`*`, `?`). Matches group name, full path, or any path segment. +- `--exclude-groups "Pattern1,Pattern2"` → Exclude connections in matching groups. Same wildcard support. + +**Flags:** +- `--dry-run`, `-d` → Preview without vault changes (credentials redacted). +- `--skip-users` → Import connections only. +- `--include-disabled` → Include disabled KCM connections. +- `--include-credentials` → Include passwords in `--output` JSON _(redacted by default)_. +- `--yes`, `-y` → Skip confirmation prompt. +- `--estimate` → Show migration size estimate without importing. + +**Throttling:** +- `--auto-throttle` / `--no-auto-throttle` → Enable/disable adaptive throttling with probe _(default: on)_. +- `--batch-size N` → Override records per batch. +- `--batch-delay N` → Override seconds between batches. + +**Examples:** +```bash +# Full auto-detect from Docker (discovers container, db type, IP, credentials) +pam project kcm-import --docker-detect --dry-run + +# List available connection groups before importing +pam project kcm-import --docker-detect --list-groups + +# Import only specific connection groups +pam project kcm-import --docker-detect --groups "Production*,Staging*" --name "Prod Migration" + +# Exclude groups from import +pam project kcm-import --db-host 10.0.0.5 --exclude-groups "Incomplete*,Test*" + +# Import using password from vault record +pam project kcm-import --db-host db.example.com --db-password-record RECORD_UID --name "Prod KCM" + +# Extend existing PAM config from PostgreSQL +pam project kcm-import --db-host pg.example.com --db-type postgresql --config "Existing Config" + +# Auto-detect from Docker and save JSON for review +pam project kcm-import --docker-detect --output /tmp/kcm-review.json + +# Specify Docker container and save with credentials +pam project kcm-import --docker-detect --docker-container kcm-db-1 --output /tmp/full.json --include-credentials +``` + +**Security:** DB passwords are never accepted as CLI arguments. Use `--db-password-record` (vault) or respond to the interactive prompt. Dry-run output redacts all credentials. + +**Interactive Features:** When running interactively (no `--yes`): +- **Group picker** — shows connection groups with counts, lets you select by number +- **Gateway picker** — shows online gateways or create new +- **Password search** — searches vault for records matching "guacamole"/"kcm" +- **Import confirmation** — shows summary before proceeding + +**Import Report:** After import, a structured report is printed to console and saved as a vault record at the project's top-level folder (alongside Resources/Users). The record includes: +- **Copyable custom fields**: `Deploy Gateway (copy & paste)` with full docker command, Gateway Token, Config UID, Gateway UID, KSM App UID, folder names +- `KCM-Import-Report.md` file attachment with the full report +- Per-record pass/fail breakdown by type (including nested users) +- Failed/skipped records with reasons +- Throttle statistics +- Redacted CLI command for reproducibility + + ### JSON format details Text UI (TUI) elements (a.k.a. JSON Keys) match their Web UI counterparts so you can create the correponding record type in your web vault to help you visualize all options and possible values. diff --git a/keepercommander/commands/pam_import/commands.py b/keepercommander/commands/pam_import/commands.py index 63ac5f78a..bec4017e0 100644 --- a/keepercommander/commands/pam_import/commands.py +++ b/keepercommander/commands/pam_import/commands.py @@ -10,11 +10,16 @@ # from .edit import PAMProjectImportCommand +from .export import PAMProjectExportCommand from .extend import PAMProjectExtendCommand +from .kcm_import import PAMProjectKCMImportCommand, PAMProjectKCMCleanupCommand from ..base import GroupCommand class PAMProjectCommand(GroupCommand): def __init__(self): super(PAMProjectCommand, self).__init__() self.register_command("import", PAMProjectImportCommand(), "Import PAM Project", "i") + self.register_command("export", PAMProjectExportCommand(), "Export PAM project to JSON for re-import", "x") self.register_command("extend", PAMProjectExtendCommand(), "Extend PAM Project by importing additional data", "e") + self.register_command("kcm-import", PAMProjectKCMImportCommand(), "Import from KCM/Guacamole database", "k") + self.register_command("kcm-cleanup", PAMProjectKCMCleanupCommand(), "Remove a KCM-imported project", "K") diff --git a/keepercommander/commands/pam_import/edit.py b/keepercommander/commands/pam_import/edit.py index f14d952fa..0b5d35686 100644 --- a/keepercommander/commands/pam_import/edit.py +++ b/keepercommander/commands/pam_import/edit.py @@ -1476,6 +1476,27 @@ def process_data(self, params, project): if not(isinstance(usr.uid, str) and RecordV3.is_valid_ref_uid(usr.uid)): usr.uid = utils.generate_uid() + # Detect and reject duplicate UIDs to prevent graph ambiguity + _all_assigned_uids: list[str] = [] + for _obj in chain(resources, users): + _all_assigned_uids.append(_obj.uid) + if hasattr(_obj, 'users') and isinstance(_obj.users, list): + for _usr in _obj.users: + _all_assigned_uids.append(_usr.uid) + _seen_uids: set[str] = set() + _duplicate_uids: list[str] = [] + for _uid in _all_assigned_uids: + if _uid in _seen_uids: + _duplicate_uids.append(_uid) + _seen_uids.add(_uid) + if _duplicate_uids: + print( + f"{bcolors.FAIL}pam project import: duplicate uid values detected in import JSON: " + f"{', '.join(sorted(set(_duplicate_uids)))}. " + f"Each resource and user must have a unique uid. Import aborted.{bcolors.ENDC}" + ) + return + # resolve linked object UIDs (machines and users) # pam_settings.connection.administrative_credentials must reference # one of its own users[] -> userRecords["admin_user_record_UID"] diff --git a/keepercommander/commands/pam_import/export.py b/keepercommander/commands/pam_import/export.py new file mode 100644 index 000000000..1effb4ee3 --- /dev/null +++ b/keepercommander/commands/pam_import/export.py @@ -0,0 +1,319 @@ +# _ __ +# | |/ /___ ___ _ __ ___ _ _ ® +# | ' environment name used by pam project import +_RECORD_TYPE_TO_ENV = { + "pamNetworkConfiguration": "local", + "pamAwsConfiguration": "aws", + "pamAzureConfiguration": "azure", + "pamDomainConfiguration": "domain", + "pamGcpConfiguration": "gcp", + "pamOciConfiguration": "oci", +} + +# Maps DAG allowedSettings keys -> JSON keys used in PROJECT_IMPORT_JSON_TEMPLATE +_DAG_KEY_TO_JSON = { + "connections": "connections", + "portForwards": "tunneling", + "rotation": "rotation", + "remoteBrowserIsolation": "remote_browser_isolation", + "sessionRecording": "graphical_session_recording", + "typescriptRecording": "text_session_recording", + "aiEnabled": "ai_threat_detection", + "aiSessionTerminate": "ai_terminate_session_on_detection", +} + + +class PAMProjectExportCommand(Command): + """Export a PAM project to a JSON document that can be re-imported via pam project import.""" + + parser = argparse.ArgumentParser(prog="pam project export") + parser.add_argument( + "--project-uid", "-p", + required=True, dest="project_uid", action="store", + help="PAM configuration record UID to export.", + ) + parser.add_argument( + "--output", "-o", + required=False, dest="output", action="store", + help="File path to write JSON output (default: print to stdout).", + ) + + def get_parser(self): + return PAMProjectExportCommand.parser + + # ------------------------------------------------------------------ + # Public execute + # ------------------------------------------------------------------ + + def execute(self, params, **kwargs): + project_uid = (kwargs.get("project_uid") or "").strip() + output_file = (kwargs.get("output") or "").strip() + + if not project_uid: + logging.warning(f"{bcolors.FAIL}--project-uid is required{bcolors.ENDC}") + return + + # 1. Load PAM configuration record (v6) + config_record = vault.KeeperRecord.load(params, project_uid) + if not config_record: + logging.warning( + f"{bcolors.FAIL}PAM configuration '{project_uid}' not found in vault{bcolors.ENDC}" + ) + return + if config_record.version != 6: + logging.warning( + f"{bcolors.FAIL}Record '{project_uid}' (version {config_record.version}) " + f"is not a PAM configuration — version 6 required{bcolors.ENDC}" + ) + return + if not isinstance(config_record, vault.TypedRecord): + logging.warning( + f"{bcolors.FAIL}Record '{project_uid}' is not a TypedRecord{bcolors.ENDC}" + ) + return + + # 2. Determine environment + environment = _RECORD_TYPE_TO_ENV.get(config_record.record_type, "local") + + # 3. Get resource UIDs from pamResources.resourceRef + facade = PamConfigurationRecordFacade() + facade.record = config_record + resource_uids = list(facade.resource_ref or []) + + # 4. Try to read connection/rotation/tunneling settings from DAG (best-effort) + allowed_settings = self._get_allowed_settings(params, project_uid) + + # 5. Walk resources and gather users + resources_list, top_level_users = self._build_resources_and_users(params, resource_uids) + + # 6. Assemble result dict + result = { + "tool_version": "commander-export-1.0", + "project": config_record.title, + "shared_folder_users": {}, + "shared_folder_resources": {}, + "pam_configuration": { + "environment": environment, + "title": config_record.title, + "connections": allowed_settings.get("connections", "on"), + "rotation": allowed_settings.get("rotation", "on"), + "tunneling": allowed_settings.get("tunneling", "on"), + "remote_browser_isolation": allowed_settings.get("remote_browser_isolation", "on"), + "graphical_session_recording": allowed_settings.get("graphical_session_recording", "off"), + "text_session_recording": allowed_settings.get("text_session_recording", "off"), + "ai_threat_detection": allowed_settings.get("ai_threat_detection", "off"), + "ai_terminate_session_on_detection": allowed_settings.get("ai_terminate_session_on_detection", "off"), + }, + "pam_data": { + "resources": resources_list, + "users": top_level_users, + }, + } + + output_json = json.dumps(result, indent=2, sort_keys=True) + + if output_file: + with open(output_file, "w", encoding="utf-8") as fh: + fh.write(output_json) + print(f"{bcolors.OKGREEN}PAM project exported to: {output_file}{bcolors.ENDC}") + return + + return output_json + + # ------------------------------------------------------------------ + # Helpers + # ------------------------------------------------------------------ + + def _get_allowed_settings(self, params, config_uid): + """Return on/off dict for tunneling config, falling back to safe defaults.""" + defaults = { + "connections": "on", + "rotation": "on", + "tunneling": "on", + "remote_browser_isolation": "on", + "graphical_session_recording": "off", + "text_session_recording": "off", + "ai_threat_detection": "off", + "ai_terminate_session_on_detection": "off", + } + try: + from ..tunnel.port_forward.tunnel_helpers import get_keeper_tokens + from ..tunnel.port_forward.TunnelGraph import TunnelDAG, get_vertex_content + + encrypted_session_token, encrypted_transmission_key, transmission_key = get_keeper_tokens(params) + tmp_dag = TunnelDAG( + params, encrypted_session_token, encrypted_transmission_key, + config_uid, is_config=True, transmission_key=transmission_key, + ) + tmp_dag.linking_dag.load() + vertex = tmp_dag.linking_dag.get_vertex(config_uid) + content = get_vertex_content(vertex) if vertex else None + dag_allowed = (content or {}).get("allowedSettings") or {} + for dag_key, json_key in _DAG_KEY_TO_JSON.items(): + if dag_key in dag_allowed: + defaults[json_key] = "on" if dag_allowed[dag_key] else "off" + except Exception as exc: + logging.debug("PAMProjectExportCommand: could not load DAG allowed settings: %s", exc) + return defaults + + def _build_resources_and_users(self, params, resource_uids): + """Walk resource UIDs and collect resources + deduplicated top-level users. + + Two linking strategies are supported: + + 1. Standard: ``pam_settings.connection.userRecords[]`` and + top-level ``adminRef`` / ``adminCredentialRef`` carry user UIDs. + 2. Title-based (e.g. KCM imports — see PR #1942): the resource + record references users by **title** in + ``pam_settings.connection.{launch,administrative}_credentials`` + (e.g. ``"KCM User - prod-db"``) without a userRecords list. We + resolve those by scanning the project's vault for pamUser / + login records with matching titles. + """ + resources_list = [] + top_level_users = [] + seen_user_uids = set() + + # Pre-build a lookup of (record_type, title.lower()) -> uid for fallback resolution + title_to_uid = self._build_user_title_index(params) + + for res_uid in resource_uids: + res_record = vault.KeeperRecord.load(params, res_uid) + if not res_record or not isinstance(res_record, vault.TypedRecord): + logging.debug("Export: skipping resource UID %s (not found or not TypedRecord)", res_uid) + continue + if res_record.record_type not in PAM_RESOURCES_RECORD_TYPES: + logging.debug( + "Export: skipping record %s with type '%s' (not a PAM resource type)", + res_uid, res_record.record_type, + ) + continue + + # Extract raw pamSettings payload (keep as-is for round-trip fidelity) + pam_settings_dict = {} + pam_settings_field = res_record.get_typed_field("pamSettings") + if ( + pam_settings_field + and isinstance(pam_settings_field.value, list) + and pam_settings_field.value + and isinstance(pam_settings_field.value[0], dict) + ): + pam_settings_dict = dict(pam_settings_field.value[0]) + + # Gather user UIDs referenced by this resource + resource_user_entries = [] + user_uids_for_resource = self._extract_user_uids(pam_settings_dict, title_to_uid) + + for usr_uid in user_uids_for_resource: + user_obj = self._load_user_obj(params, usr_uid) + if user_obj is None: + continue + resource_user_entries.append({"uid": usr_uid, "type": user_obj["type"], "title": user_obj["title"], "login": user_obj["login"]}) + if usr_uid not in seen_user_uids: + seen_user_uids.add(usr_uid) + top_level_users.append(user_obj) + + resources_list.append({ + "uid": res_uid, + "type": res_record.record_type, + "title": res_record.title, + "pam_settings": pam_settings_dict, + "users": resource_user_entries, + }) + + return resources_list, top_level_users + + def _build_user_title_index(self, params): + """Index every pamUser / login record by lowercased title for title-based linking.""" + index = {} + record_cache = getattr(params, "record_cache", {}) or {} + for uid in record_cache: + try: + rec = vault.KeeperRecord.load(params, uid) + except Exception: + continue + if not rec or not isinstance(rec, vault.TypedRecord): + continue + if rec.record_type not in ("pamUser", "login"): + continue + if rec.title: + index.setdefault(rec.title.strip().lower(), uid) + return index + + def _extract_user_uids(self, pam_settings_dict, title_to_uid=None): + """Return all user record UIDs referenced inside a pamSettings dict. + + Falls back to title-based resolution against ``title_to_uid`` when + the record stores a title (e.g. KCM-imported records, PR #1942) + instead of a UID in launch_credentials / administrative_credentials. + """ + user_uids = [] + title_to_uid = title_to_uid or {} + conn = pam_settings_dict.get("connection") or {} + if isinstance(conn, dict): + for uid in (conn.get("userRecords") or []): + if uid and uid not in user_uids: + user_uids.append(uid) + # KCM-style title references (PR #1942 schema) + for key in ("launch_credentials", "administrative_credentials"): + ref = conn.get(key) + if not isinstance(ref, str) or not ref: + continue + # If it already looks like a UID, accept as-is + if len(ref) == 22 and "/" not in ref and " " not in ref: + if ref not in user_uids: + user_uids.append(ref) + continue + # Otherwise treat as a title and resolve against the index + resolved = title_to_uid.get(ref.strip().lower()) + if resolved and resolved not in user_uids: + user_uids.append(resolved) + # Some record types also reference admin via adminRef / adminCredentialRef at top level + for key in ("adminRef", "adminCredentialRef"): + uid = pam_settings_dict.get(key) + if uid and uid not in user_uids: + user_uids.append(uid) + return user_uids + + def _load_user_obj(self, params, usr_uid): + """Load a pamUser/login record and return a plain dict, or None on failure.""" + usr_record = vault.KeeperRecord.load(params, usr_uid) + if not usr_record or not isinstance(usr_record, vault.TypedRecord): + logging.debug("Export: user UID %s not found or not TypedRecord", usr_uid) + return None + login_field = usr_record.get_typed_field("login") + login = "" + if login_field: + raw = login_field.get_default_value() + login = str(raw) if raw is not None else "" + return { + "uid": usr_uid, + "type": usr_record.record_type, + "title": usr_record.title, + "login": login, + } diff --git a/keepercommander/commands/pam_import/kcm_import.py b/keepercommander/commands/pam_import/kcm_import.py new file mode 100644 index 000000000..5bc4e5f6e --- /dev/null +++ b/keepercommander/commands/pam_import/kcm_import.py @@ -0,0 +1,4340 @@ +# _ __ +# | |/ /___ ___ _ __ ___ _ _ ® +# | ' [(attr_name, attr_value)] + try: + self.cursor.execute(SQL_ATTRIBUTES) + for arow in self.cursor.fetchall(): + arow = dict(arow) + cid = arow['connection_id'] + attr_map.setdefault(cid, []).append( + (arow['attribute_name'], arow['attribute_value'])) + except Exception as e: + logging.debug('Attribute query skipped (table may not exist): %s', e) + + return connection_rows, attr_map + + def close(self): + try: + if self.cursor: + self.cursor.close() + except Exception: + pass + try: + if self.conn: + self.conn.close() + except Exception: + pass + + +def _set_nested(d, dotted_path, value): + """Set a value in a nested dict using a dotted key path.""" + keys = dotted_path.split('.') + for key in keys[:-1]: + child = d.get(key) + if not isinstance(child, dict): + child = {} + d[key] = child + d = child + d[keys[-1]] = value + + +class KCMParameterMapper: + """Applies kcm_mappings.json transformations to raw KCM connection data.""" + + def __init__(self): + mappings_path = os.path.join(os.path.dirname(__file__), 'kcm_mappings.json') + with open(mappings_path, 'r') as f: + self.mappings = json.load(f) + + def transform(self, connection_rows, include_disabled=False, attr_map=None): + # type: (List[Dict], bool, Dict) -> Tuple[List[Dict], List[Dict]] + """Group rows by connection_id, apply mappings, return (resources, users). + + Parameters and attributes are fetched with separate queries to avoid + an N*M cartesian product. ``attr_map`` is {connection_id: [(name, value)]}. + """ + connections = {} # type: Dict[int, Dict] + users = {} # type: Dict[int, Dict] + disabled_ids = set() # type: set + if attr_map is None: + attr_map = {} + + # Pre-scan for disabled connections (max_connections == 0) + if not include_disabled: + for row in connection_rows: + if row.get('max_connections') == 0: + disabled_ids.add(row['connection_id']) + + for row in connection_rows: + cid = row['connection_id'] + if cid in disabled_ids: + continue + name = row['name'] + protocol = row['protocol'] + + if cid not in connections: + record_type = PROTOCOL_TYPE_MAP.get(protocol, 'pamMachine') + conn_protocol = 'postgresql' if protocol == 'postgres' else protocol + connections[cid] = { + 'title': f'KCM Resource - {name}', + 'type': record_type, + 'host': '', + 'pam_settings': { + 'options': { + 'rotation': 'off', + 'connections': 'on', + 'tunneling': 'off', + 'graphical_session_recording': 'off' + }, + 'connection': { + 'protocol': conn_protocol, + 'launch_credentials': f'KCM User - {name}' + } + }, + '_group_id': row.get('connection_group_id'), + } + + if cid not in users: + users[cid] = { + 'title': f'KCM User - {name}', + 'type': 'pamUser', + 'password': '', + '_group_id': row.get('connection_group_id'), + } + + param_name = row.get('parameter_name') + param_value = row.get('parameter_value') if row.get('parameter_value') is not None else '' + + if param_name: + self._apply_mapping(cid, param_name, param_value, + connections, users) + + # Apply attributes (fetched separately — no cartesian product) + for cid, attrs in attr_map.items(): + if cid in disabled_ids or cid not in connections: + continue + for attr_name, attr_value in attrs: + if attr_name: + self._apply_mapping(cid, attr_name, + attr_value if attr_value is not None else '', + connections, users) + + # Fill in default ports for connections where KCM had no explicit port. + # Guacamole applies these defaults implicitly; Keeper needs them explicit. + for conn in connections.values(): + conn_settings = conn['pam_settings']['connection'] + if 'port' not in conn_settings: + default_port = PROTOCOL_DEFAULT_PORTS.get(conn_settings['protocol']) + if default_port: + conn_settings['port'] = default_port + + return list(connections.values()), list(users.values()) + + def _apply_mapping(self, cid, arg, value, connections, users): + resource = connections[cid] + user = users[cid] + + # Special cases first + if arg == 'hostname': + resource['host'] = value + return + if arg == 'port': + try: + resource['pam_settings']['connection']['port'] = str(int(value)) + except (ValueError, TypeError): + resource['pam_settings']['connection']['port'] = value + return + if arg.startswith('totp-') and value: + self._handle_totp(user, arg, value) + return + # Legacy Guacamole autofill selectors → autofill_targets + if arg in ('username-field', 'password-field') and value: + self._append_legacy_autofill(resource, arg, value) + return + # KCM autofill-configuration is a JSON/YAML array of page + # objects — convert to Keeper's newline key=value format + if arg == 'autofill-configuration' and value: + self._convert_kcm_autofill(resource, value) + return + # Params with no RBI equivalent — append to notes + if arg == 'profile-storage-directory' and value: + existing = resource.get('notes', '') or '' + resource['notes'] = ( + f'{existing}\nKCM profile-storage-directory: {value}'.strip()) + return + + # User mappings + if value and arg in self.mappings['users']: + mapping = self.mappings['users'][arg] + self._apply_single_mapping(mapping, value, user) + return + + # Resource mappings + if arg in self.mappings['resources']: + mapping = self.mappings['resources'][arg] + self._apply_single_mapping(mapping, value, resource) + + def _apply_single_mapping(self, mapping, value, target): + if mapping == 'ignore': + return + if mapping == 'log': + logging.debug('KCM parameter not mapped (action=log)') + return + if mapping is None: + return + if '=' in mapping: + mapping, value = mapping.split('=', 1) + _set_nested(target, mapping, value) + + def _handle_totp(self, user, arg, value): + if '_totp_parts' not in user: + user['_totp_parts'] = {} + user['_totp_parts'][arg] = value + + @staticmethod + def finalize_totp(users): + """Convert collected TOTP parts into otpauth:// URLs.""" + for user in users: + parts = user.pop('_totp_parts', None) + if not parts: + continue + alg = parts.get('totp-algorithm', '') + digits = parts.get('totp-digits', '') + period = parts.get('totp-period', '') + secret = parts.get('totp-secret', '') + # Base32 alphabet: A-Z, 2-7 (case-insensitive, strip padding) + stripped_secret = ''.join( + c for c in secret.upper() if c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567') + if not stripped_secret: + continue + user['otp'] = ( + f'otpauth://totp/{TOTP_ACCOUNT}' + f'?secret={stripped_secret}&issuer=&algorithm={alg}' + f'&digits={digits}&period={period}' + ) + + @staticmethod + def _convert_kcm_autofill(resource, raw_value): + """Clean KCM autofill-configuration JSON for Keeper RBI. + + Keeper RBI uses the same JSON array format as KCM/Guacamole: + [{"page": "*.example.com", "username-field": "#user", + "password-field": "#pass", "submit": "button"}] + + The KCM database often stores this with excessive whitespace + and literal \\n characters from PostgreSQL extraction. + Parse and re-serialize as compact JSON. + """ + conn = resource.get('pam_settings', {}).get('connection', {}) + + # The KCM/PostgreSQL extraction chain may double-escape the JSON: + # real newlines → literal \n, \" → \\" + # Un-escape one level before parsing. + cleaned = (raw_value + .replace('\\\\"', '\\"') # \\" → \" (double-escaped quotes) + .replace('\\n', '\n') # \n → real newline + .replace('\\t', '\t')) # \t → real tab + + # Try parsing as JSON to validate and compact it + try: + parsed = json.loads(cleaned) + if isinstance(parsed, list): + conn['autofill_targets'] = json.dumps(parsed) + return + except (json.JSONDecodeError, TypeError): + pass + + # Fallback: try the raw value as-is (real newlines already) + try: + parsed = json.loads(raw_value) + if isinstance(parsed, list): + conn['autofill_targets'] = json.dumps(parsed) + return + except (json.JSONDecodeError, TypeError): + pass + + # Not valid JSON — store cleaned/stripped version + conn['autofill_targets'] = cleaned.strip() + + @staticmethod + def _append_legacy_autofill(resource, arg, value): + """Convert legacy username-field/password-field to autofill JSON. + + Old Guacamole used simple CSS selectors (e.g. 'u', 'passwd') as + username-field/password-field params. Convert these to the JSON + array format used by both KCM and Keeper RBI. + """ + conn = resource.get('pam_settings', {}).get('connection', {}) + existing = conn.get('autofill_targets', '') + + # Parse existing JSON array or start fresh + try: + steps = json.loads(existing) if existing else [] + if not isinstance(steps, list): + steps = [] + except (json.JSONDecodeError, TypeError): + steps = [] + + # Merge into existing step or create new one + if steps: + # Add to the last step (same page) + steps[-1][arg] = value + else: + steps.append({arg: value}) + + conn['autofill_targets'] = json.dumps(steps) + + @staticmethod + def map_protocol_to_type(protocol): + return PROTOCOL_TYPE_MAP.get(protocol, 'pamMachine') + + +class KCMGroupResolver: + """Builds folder hierarchy from KCM connection groups.""" + + def __init__(self, groups, mode='ksm'): + self.groups = {g['connection_group_id']: g for g in groups} + self.mode = mode + self.paths = {} # type: Dict[int, str] + self._resolve_all() + + def _resolve_all(self): + for gid in self.groups: + if self.mode == 'flat': + raw = self.groups[gid]['connection_group_name'] + self.paths[gid] = raw.replace('/', '_').replace('\\', '_').replace('..', '_') + else: + self._resolve_path(gid) + + def _resolve_path(self, group_id, _seen=None): + if group_id is None: + return 'ROOT' + if group_id in self.paths: + return self.paths[group_id] + if _seen is None: + _seen = set() + if group_id in _seen: + return 'ROOT' + _seen.add(group_id) + group = self.groups.get(group_id) + if not group: + return 'ROOT' + # Sanitize group name: strip path separators to prevent traversal + raw_name = group.get('connection_group_name') or f'group_{group_id}' + safe_name = raw_name.replace('/', '_').replace('\\', '_').replace('..', '_') + if self.mode == 'ksm' and group.get('ksm_config'): + self.paths[group_id] = safe_name + return safe_name + parent_path = self._resolve_path(group.get('parent_id'), _seen) + full_path = f"{parent_path}/{safe_name}" + self.paths[group_id] = full_path + return full_path + + def resolve_path(self, group_id): + if group_id is None: + return 'ROOT' + return self.paths.get(group_id, 'ROOT') + + def get_shared_folders(self): + folders = set() + for path in self.paths.values(): + root = path.split('/')[0] + folders.add(root) + return sorted(folders) + + +class AdaptiveThrottler: + """Probe-based adaptive batch throttler for Keeper API imports. + + Sends small probe batches before the real import to measure server + response times, then computes optimal batch parameters. During import, + continuously monitors batch timing and adjusts if throttles are detected + or headroom is available. + + The API rate limit is global per device token (~50 calls before HTTP 403). + Each record type has a known API call cost: + - Resource + nested user: ~20 calls (measured avg 19.2) + - External user (login): ~8 calls (measured avg 8.0) + + Batch sizes are bounded by: budget / calls_per_record. + Delays are per-type: proportional to calls_per_batch so the rate window + can absorb each batch before the next one starts. + + Adaptation is type-specific: + On throttle: only the offending type's batch_size halved, delay doubled + On recovery: 3 clean batches → type's batch_size += 1, delay *= 0.85 + """ + + # API call costs per record type (measured via instrumentation) + CALLS_PER_RESOURCE = 20 # PAM resource + nested user (measured avg 19.2) + CALLS_PER_USER = 8 # External login record (measured avg 8.0) + SECS_PER_CALL = 0.6 # ~100 calls/min = 1 call per 0.6s + + # Probe thresholds + PROBE_RTT_THRESHOLD = 30.0 # seconds — RTT above this stops probing + PROBE_COOLDOWN_MULTIPLIER = 5 # cooldown = max(10, base_rtt * this) + + # Adaptation parameters + CLEAN_BATCHES_TO_RECOVER = 3 # consecutive clean batches before speeding up + MIN_DELAY = 3.0 # never go below 3s delay + MAX_DELAY = 60.0 # never exceed 60s delay + MIN_BATCH_SIZE = 1 + MAX_BATCH_SIZE = 10 + + # Throttle detection: if batch takes longer than + # base_rtt * batch_size * THROTTLE_RATIO, consider it throttled. + # Ratio > 3x the expected time means the server injected backoff. + THROTTLE_RATIO = 3.0 + # Minimum absolute headroom (seconds) to avoid false positives on + # small batches where even a short network hiccup looks like 3x. + THROTTLE_HEADROOM_SECS = 30.0 + + def __init__(self, enabled=True): + self.enabled = enabled + self.probe_rtts = [] # round-trip times from probe batches + self.base_rtt = None # median probe RTT + + # Active batch parameters (set after probe or from defaults) + self.res_batch_size = 2 + self.usr_batch_size = 8 + self.res_delay = 15.0 + self.usr_delay = 15.0 + + # Runtime state + self.throttle_count = 0 + self.consecutive_clean = 0 + self.total_batches = 0 + + # Optimal values (computed from probe, used as floor for recovery) + self._optimal_res_batch = 2 + self._optimal_usr_batch = 8 + self._optimal_res_delay = 15.0 + self._optimal_usr_delay = 15.0 + + def run_probe(self, params, config_uid, pam_json, extend_cmd_factory): + """Run probe batches to measure server response characteristics. + + Sends 3 single-record probe batches with decreasing delays (10s, 5s, 2s) + to determine the server's baseline RTT and throttle sensitivity. + + Args: + params: Keeper session params + config_uid: PAM config UID for extend calls + pam_json: Full PAM JSON (used as template for probe batches) + extend_cmd_factory: Callable returning a PAMProjectExtendCommand instance + + Returns: + dict with probe results: base_rtt, probed_window, recommended params + """ + if not self.enabled: + return {'skipped': True, 'reason': 'auto-throttle disabled'} + + all_resources = pam_json['pam_data'].get('resources', []) + all_users = pam_json['pam_data'].get('users', []) + + # Pick a small probe record (prefer users — cheaper at ~8 API calls) + probe_items = all_users[:1] if all_users else all_resources[:1] + if not probe_items: + return {'skipped': True, 'reason': 'no records to probe with'} + is_user_probe = bool(all_users) + + probe_delays = [8.0, 4.0, 1.0] # decreasing delays between probes + probe_was_throttled = False + + logging.warning(f'{bcolors.OKBLUE}[Probe]{bcolors.ENDC} Measuring server response (3 probe batches)...') + + for i, probe_delay in enumerate(probe_delays): + # Build minimal batch JSON (pam_data only to avoid extend "extra data" warning) + shared_folders = pam_json['pam_data'].get('shared_folders', []) + if is_user_probe: + batch_json = {'pam_data': { + 'shared_folders': shared_folders, + 'resources': [], 'users': probe_items, + }} + else: + batch_json = {'pam_data': { + 'shared_folders': shared_folders, + 'resources': probe_items, 'users': [], + }} + + tmp_fd, tmp_path = tempfile.mkstemp(suffix='.json') + try: + with os.fdopen(tmp_fd, 'w') as tmp: + json.dump(batch_json, tmp, indent=2) + + batch_start = time.time() + cmd = extend_cmd_factory() + cmd.execute(params, config=config_uid, + file_name=tmp_path, dry_run=False) + rtt = time.time() - batch_start + self.probe_rtts.append(rtt) + + logging.warning('[Probe %d/3] RTT=%.1fs (delay before next: %.0fs)', + i + 1, rtt, probe_delay if i < 2 else 0) + + # Check for throttle signature: RTT > 30s suggests server + # injected a backoff (rest_api.py sleeps 30-120s on 403) + if rtt > self.PROBE_RTT_THRESHOLD: + probe_was_throttled = True + logging.warning('[Probe] Throttle detected at probe %d ' + '(RTT=%.1fs > 30s threshold)', i + 1, rtt) + break # Don't stress the server further + + except Exception as e: + err_msg = str(e).lower() + if 'throttle' in err_msg or 'rate limit' in err_msg or 'too many request' in err_msg: + rtt = time.time() - batch_start + self.probe_rtts.append(rtt) + probe_was_throttled = True + logging.warning( + '[Probe %d/3] Throttled (%.1fs) — stopping probes', + i + 1, rtt) + break + raise + finally: + if os.path.exists(tmp_path): + os.unlink(tmp_path) + + if i < len(probe_delays) - 1: + time.sleep(probe_delay) + + if not self.probe_rtts: + return {'skipped': True, 'reason': 'all probes failed'} + + # Compute baseline RTT (median, excluding throttled values) + clean_rtts = [r for r in self.probe_rtts if r < self.PROBE_RTT_THRESHOLD] + if clean_rtts: + sorted_rtts = sorted(clean_rtts) + mid = len(sorted_rtts) // 2 + self.base_rtt = sorted_rtts[mid] + else: + # All probes were slow — server is heavily throttled + self.base_rtt = min(self.probe_rtts) + + # Compute optimal parameters from probe data + self._compute_optimal_params(probe_was_throttled) + + result = { + 'base_rtt': self.base_rtt, + 'probe_rtts': self.probe_rtts, + 'throttle_detected': probe_was_throttled, + 'optimal_res_batch': self._optimal_res_batch, + 'optimal_usr_batch': self._optimal_usr_batch, + 'optimal_res_delay': self._optimal_res_delay, + 'optimal_usr_delay': self._optimal_usr_delay, + } + + logging.warning( + '[Probe] Results: base_rtt=%.1fs, throttle=%s → ' + 'res: batch=%d delay=%.0fs, usr: batch=%d delay=%.0fs', + self.base_rtt, probe_was_throttled, + self._optimal_res_batch, self._optimal_res_delay, + self._optimal_usr_batch, self._optimal_usr_delay) + + # Apply optimal params + self.res_batch_size = self._optimal_res_batch + self.usr_batch_size = self._optimal_usr_batch + self.res_delay = self._optimal_res_delay + self.usr_delay = self._optimal_usr_delay + + # Let the rate window clear after probe before real import starts. + # The probe's API calls are still in the server's sliding window. + cooldown = (max(10, int(self.base_rtt * self.PROBE_COOLDOWN_MULTIPLIER)) + if not probe_was_throttled else 30) + logging.warning('[Probe] Cooldown %ds (clearing rate window)...', cooldown) + time.sleep(cooldown) + + return result + + def _compute_optimal_params(self, probe_throttled): + """Compute optimal batch parameters from probe results. + + The API rate limit is global (~50 calls before HTTP 403 on EU). + Batch sizes are bounded by: budget / calls_per_record. + Delays are per-type: calls_per_batch * SECS_PER_CALL, so each + batch's API calls can be absorbed by the server's rate window + before the next batch starts. + """ + # API call budget: stay under 70% of throttle window per batch + budget = 50 * 0.7 # ~35 calls safe per batch + + if probe_throttled: + # Server is already rate-limiting — very conservative + self._optimal_res_batch = 1 + self._optimal_usr_batch = 2 + self._optimal_res_delay = max(15.0, self.base_rtt * 3) + self._optimal_usr_delay = max(15.0, self.base_rtt * 3) + else: + # Batch size = budget / calls_per_record, capped at MAX_BATCH + self._optimal_res_batch = max( + self.MIN_BATCH_SIZE, + min(self.MAX_BATCH_SIZE, + int(budget / self.CALLS_PER_RESOURCE))) + self._optimal_usr_batch = max( + self.MIN_BATCH_SIZE, + min(self.MAX_BATCH_SIZE, + int(budget / self.CALLS_PER_USER))) + # Delay per type: proportional to API calls in the batch. + # This gives heavier batches more time for the rate window + # to absorb and prevents oscillating throttle/recovery. + res_calls = self._optimal_res_batch * self.CALLS_PER_RESOURCE + usr_calls = self._optimal_usr_batch * self.CALLS_PER_USER + self._optimal_res_delay = max( + self.MIN_DELAY, res_calls * self.SECS_PER_CALL) + self._optimal_usr_delay = max( + self.MIN_DELAY, usr_calls * self.SECS_PER_CALL) + + def record_batch(self, batch_elapsed, num_records, is_resource=True): + """Record a completed batch and adapt parameters if needed. + + Throttle detection is purely timing-based: if the batch took more + than THROTTLE_RATIO × the expected time (based on probe RTT), + the server likely injected a backoff. + + Args: + batch_elapsed: Wall-clock time for the batch (seconds) + num_records: Number of records in the batch + is_resource: True for resource batches, False for user batches + + Returns: + dict with adaptation info (for logging) + """ + self.total_batches += 1 + if not self.enabled: + return {'adapted': False} + + # Expected time scales linearly with records. Use base_rtt as + # per-record baseline. Resources are ~5x heavier than users. + if self.base_rtt and self.base_rtt > 0: + weight = 5.0 if is_resource else 1.0 + expected = num_records * self.base_rtt * weight + 5.0 + else: + expected = num_records * (15.0 if is_resource else 3.0) + 10.0 + + # Throttle = batch took much longer than expected + threshold = max(expected * self.THROTTLE_RATIO, + expected + self.THROTTLE_HEADROOM_SECS) + throttled = batch_elapsed > threshold + + if throttled: + return self._adapt_down(batch_elapsed, expected, is_resource) + else: + return self._adapt_up(is_resource) + + def _adapt_down(self, batch_elapsed, expected_time, is_resource): + """Throttle detected — reduce the offending type's batch size and + increase its delay. The other type is left untouched.""" + self.throttle_count += 1 + self.consecutive_clean = 0 + + if is_resource: + old_batch = self.res_batch_size + old_delay = self.res_delay + self.res_batch_size = max(self.MIN_BATCH_SIZE, + self.res_batch_size // 2) + self.res_delay = min(self.MAX_DELAY, self.res_delay * 2) + logging.warning( + ' [Throttle #%d] Resource batch took %.0fs (expected ~%.0fs). ' + 'Adjusting: res_batch %d→%d, res_delay %.0fs→%.0fs', + self.throttle_count, batch_elapsed, expected_time, + old_batch, self.res_batch_size, + old_delay, self.res_delay) + else: + old_batch = self.usr_batch_size + old_delay = self.usr_delay + self.usr_batch_size = max(self.MIN_BATCH_SIZE, + self.usr_batch_size // 2) + self.usr_delay = min(self.MAX_DELAY, self.usr_delay * 2) + logging.warning( + ' [Throttle #%d] User batch took %.0fs (expected ~%.0fs). ' + 'Adjusting: usr_batch %d→%d, usr_delay %.0fs→%.0fs', + self.throttle_count, batch_elapsed, expected_time, + old_batch, self.usr_batch_size, + old_delay, self.usr_delay) + + return { + 'adapted': True, 'direction': 'down', + 'res_batch': self.res_batch_size, + 'usr_batch': self.usr_batch_size, + 'res_delay': self.res_delay, + 'usr_delay': self.usr_delay, + } + + def _adapt_up(self, is_resource): + """Clean batch — potentially increase the current type's throughput.""" + self.consecutive_clean += 1 + + if self.consecutive_clean < self.CLEAN_BATCHES_TO_RECOVER: + return {'adapted': False} + + changed = False + + if is_resource: + old_batch = self.res_batch_size + old_delay = self.res_delay + if self.res_batch_size < self._optimal_res_batch: + self.res_batch_size = min(self._optimal_res_batch, + self.res_batch_size + 1) + changed = True + if self.res_delay > self._optimal_res_delay: + self.res_delay = max(self._optimal_res_delay, + self.res_delay * 0.85) + changed = True + if changed: + self.consecutive_clean = 0 + logging.warning( + ' [Recovery] %d clean batches → res_batch %d→%d, ' + 'res_delay %.0fs→%.0fs', + self.CLEAN_BATCHES_TO_RECOVER, + old_batch, self.res_batch_size, + old_delay, self.res_delay) + else: + old_batch = self.usr_batch_size + old_delay = self.usr_delay + if self.usr_batch_size < self._optimal_usr_batch: + self.usr_batch_size = min(self._optimal_usr_batch, + self.usr_batch_size + 1) + changed = True + if self.usr_delay > self._optimal_usr_delay: + self.usr_delay = max(self._optimal_usr_delay, + self.usr_delay * 0.85) + changed = True + if changed: + self.consecutive_clean = 0 + logging.warning( + ' [Recovery] %d clean batches → usr_batch %d→%d, ' + 'usr_delay %.0fs→%.0fs', + self.CLEAN_BATCHES_TO_RECOVER, + old_batch, self.usr_batch_size, + old_delay, self.usr_delay) + + return {'adapted': changed, 'direction': 'up' if changed else 'none'} + + def get_summary(self): + """Return summary dict for post-import stats.""" + return { + 'probe_rtts': self.probe_rtts, + 'base_rtt': self.base_rtt, + 'throttle_count': self.throttle_count, + 'total_batches': self.total_batches, + 'final_res_batch': self.res_batch_size, + 'final_usr_batch': self.usr_batch_size, + 'final_res_delay': self.res_delay, + 'final_usr_delay': self.usr_delay, + } + + +class PAMProjectKCMImportCommand(Command): + _PRIVATE_NETS = ( + ipaddress.ip_network('10.0.0.0/8'), + ipaddress.ip_network('172.16.0.0/12'), + ipaddress.ip_network('192.168.0.0/16'), + ) + + parser = argparse.ArgumentParser( + prog='pam project kcm-import', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=''' +Examples: + # Auto-detect everything from Docker (simplest) + pam project kcm-import --docker-detect --name "My KCM Migration" + + # List available connection groups before importing + pam project kcm-import --docker-detect --list-groups + + # Import only specific groups + pam project kcm-import --docker-detect --groups "Production*,Staging*" --name "Prod" + + # Exclude groups from import + pam project kcm-import --docker-detect --exclude-groups "Incomplete*,Test*" + + # Dry-run preview (no vault changes) + pam project kcm-import --docker-detect --dry-run --output /tmp/review.json + + # Manual database connection + pam project kcm-import --db-host 10.0.0.5 --db-type postgresql --name "Manual" + + # Use password from vault record + pam project kcm-import --db-host db.local --db-password-record "KCM DB Creds" + + # Extend an existing PAM project + pam project kcm-import --docker-detect --config "Existing Config" --groups "NewGroup*" + + # Non-interactive batch mode + pam project kcm-import --docker-detect --name "Auto Import" --yes + ''') + + # Database options + parser.add_argument('--db-host', dest='db_host', action='store', + help='KCM database hostname') + parser.add_argument('--docker-detect', dest='docker_detect', action='store_true', + default=False, + help='Auto-detect credentials from Docker container') + parser.add_argument('--docker-container', dest='docker_container', + action='store', default=None, + help='Docker container name (auto-discovered if not set)') + parser.add_argument('--db-port', dest='db_port', type=int, action='store', + help='Database port (default: 3306 mysql, 5432 postgresql)') + parser.add_argument('--db-name', dest='db_name', action='store', + default=None, help='Database name (default: guacamole_db)') + parser.add_argument('--db-type', dest='db_type', action='store', + choices=['mysql', 'postgresql'], default=None, + help='Database type (auto-detected with --docker-detect)') + parser.add_argument('--db-user', dest='db_user', action='store', + default=None, help='Database username (default: guacamole_user)') + parser.add_argument('--db-password-record', dest='db_password_record', + action='store', + help='Keeper record UID or title containing DB password') + parser.add_argument('--db-ssl', dest='db_ssl', action='store_true', + default=False, + help='Require SSL/TLS for database connection') + parser.add_argument('--allow-cleartext', dest='allow_cleartext', + action='store_true', default=False, + help='Allow unencrypted connection to remote database (not recommended)') + + # Import options + parser.add_argument('--name', '-n', dest='project_name', action='store', + help='Project name') + parser.add_argument('--config', '-c', dest='config', action='store', + help='Existing PAM config UID or name (extend mode)') + parser.add_argument('--folder-mode', dest='folder_mode', action='store', + choices=['ksm', 'exact', 'flat'], default='ksm', + help='Connection group mapping mode') + parser.add_argument('--output', '-o', dest='output', action='store', + help='Save JSON to file instead of importing') + parser.add_argument('--include-credentials', dest='include_credentials', + action='store_true', default=False, + help='Include real passwords in --output file (default: redacted)') + + # Gateway options + parser.add_argument('--gateway', '-g', dest='gateway', action='store', + help='Existing gateway UID or name (interactive picker if omitted)') + parser.add_argument('--max-instances', dest='max_instances', type=int, + default=0, + help='Set gateway pool size (0 = skip, requires new gateway)') + + # Flags + parser.add_argument('--dry-run', '-d', dest='dry_run', action='store_true', + default=False, help='Preview without vault changes') + parser.add_argument('--skip-users', dest='skip_users', action='store_true', + default=False, help='Import connections only, skip users') + parser.add_argument('--include-disabled', dest='include_disabled', + action='store_true', default=False, + help='Include disabled KCM connections') + parser.add_argument('--estimate', dest='estimate', action='store_true', + default=False, + help='Scan database and show migration estimate without importing') + parser.add_argument('--yes', '-y', dest='auto_confirm', action='store_true', + default=False, + help='Skip interactive confirmation prompt') + parser.add_argument('--batch-size', dest='batch_size', type=int, + default=None, + help='Resources per batch (auto-scaled if not set)') + parser.add_argument('--batch-delay', dest='batch_delay', type=float, + default=None, + help='Seconds to wait between batches (default: 10)') + parser.add_argument('--auto-throttle', dest='auto_throttle', + action='store_true', default=True, + help='Enable adaptive throttling with probe (default: on)') + parser.add_argument('--no-auto-throttle', dest='auto_throttle', + action='store_false', + help='Disable adaptive throttling, use fixed batch params') + + # Group filtering + parser.add_argument('--groups', dest='include_groups', action='store', + default=None, + help='Import only these connection groups (comma-separated, ' + 'supports wildcards: "Lab*,Production")') + parser.add_argument('--exclude-groups', dest='exclude_groups', action='store', + default=None, + help='Exclude these connection groups (comma-separated, ' + 'supports wildcards: "Incomplete*,Test*")') + parser.add_argument('--list-groups', dest='list_groups', action='store_true', + default=False, + help='List available connection groups and exit') + + def get_parser(self): + return PAMProjectKCMImportCommand.parser + + def execute(self, params, **kwargs): + db_host = kwargs.get('db_host') or '' + docker_detect = kwargs.get('docker_detect', False) + + if not db_host and not docker_detect: + raise CommandError('kcm-import', + 'Either --db-host or --docker-detect is required') + + folder_mode = kwargs.get('folder_mode', 'ksm') + output_file = kwargs.get('output') or '' + dry_run = kwargs.get('dry_run', False) + skip_users = kwargs.get('skip_users', False) + config_uid = kwargs.get('config') or '' + project_name = kwargs.get('project_name') or '' + include_disabled = kwargs.get('include_disabled', False) + + # Read CLI values (None when not explicitly provided) + db_port = kwargs.get('db_port') + db_name = kwargs.get('db_name') + db_user = kwargs.get('db_user') + + # Validate container name if provided + container_arg = kwargs.get('docker_container') + if container_arg and not re.match(r'^[a-zA-Z0-9][a-zA-Z0-9_.-]*$', container_arg): + raise CommandError('kcm-import', + f'Invalid container name "{container_arg}". ' + f'Must match Docker naming rules: [a-zA-Z0-9][a-zA-Z0-9_.-]*') + + # Validate batch parameters + batch_size = kwargs.get('batch_size') + if batch_size is not None and batch_size < 1: + raise CommandError('kcm-import', '--batch-size must be >= 1') + batch_delay = kwargs.get('batch_delay') + if batch_delay is not None and batch_delay < 0: + raise CommandError('kcm-import', '--batch-delay must be >= 0') + + # Resolve DB credentials — CLI flags override docker-detected values + if docker_detect: + # Auto-discover container if not specified + container_name = kwargs.get('docker_container') + if container_name is None: + container_name = self._discover_docker_container() + logging.warning(f'{bcolors.OKGREEN}Auto-discovered Docker container:{bcolors.ENDC} %s', + container_name) + else: + logging.warning('Using specified Docker container: %s', + container_name) + # Auto-detect db_type if not specified + db_type_explicit = kwargs.get('db_type') + if db_type_explicit is None: + db_type = self._detect_db_type_from_docker(container_name) + logging.warning(f'{bcolors.OKGREEN}Auto-detected database type:{bcolors.ENDC} %s', db_type) + else: + db_type = db_type_explicit + conn_info, db_password = \ + self._detect_docker_credentials(db_type, container_name) + det_host, det_port, det_name, det_user = conn_info + db_host = db_host or det_host + db_port = db_port or det_port + db_name = db_name or det_name + db_user = db_user or det_user + else: + db_type = kwargs.get('db_type') or 'mysql' + db_password = self._resolve_db_password(params, kwargs) + + # Apply defaults for anything not set by CLI or docker-detect + db_port = db_port or (3306 if db_type == 'mysql' else 5432) + db_name = db_name or 'guacamole_db' + db_user = db_user or 'guacamole_user' + + # Connection target for log messages (not a credential) + log_target = f'{db_host}:{db_port}' + + # Connect and extract + db_ssl = kwargs.get('db_ssl', False) + allow_cleartext = kwargs.get('allow_cleartext', False) + if not db_ssl and not self._is_local_host(db_host): + if not allow_cleartext: + raise CommandError('kcm-import', + f'Refusing to connect to remote host {db_host} without SSL/TLS. ' + f'Credentials and data would transit in cleartext. ' + f'Use --db-ssl to encrypt, or --allow-cleartext to override.') + logging.warning( + 'WARNING: Connecting to remote database %s without SSL/TLS. ' + 'Credentials and extracted data will transit in cleartext.', log_target) + logging.info('Connecting to KCM database at %s...', log_target) + connector = KCMDatabaseConnector( + db_type, db_host, db_port, db_user, db_password, db_name, ssl=db_ssl + ) + try: + connector.connect() + connector.validate_schema() + + logging.info('Extracting connection groups...') + groups = connector.extract_groups() + + logging.info('Extracting connections and parameters...') + connection_rows, attr_map = connector.extract_connections() + except CommandError: + raise + except Exception as e: + logging.debug('Database error: %s: %s', type(e).__name__, e) + raise CommandError('kcm-import', + f'Database connection failed: {type(e).__name__}. ' + f'Use --debug for details.') + finally: + connector.close() + # Clear credentials from memory (best effort — Python strings are immutable) + connector.password = None + db_password = None # noqa: F841 + + logging.info('Extracted %d group(s), %d connection row(s)', + len(groups), len(connection_rows)) + + # Build group hierarchy + resolver = KCMGroupResolver(groups, mode=folder_mode) + + # Transform parameters + mapper = KCMParameterMapper() + resources, users = mapper.transform(connection_rows, + include_disabled=include_disabled, + attr_map=attr_map) + + # --list-groups: show available groups and exit + if kwargs.get('list_groups'): + self._print_group_list(groups, resolver, resources, users) + return + + # Interactive group picker (when no --groups flag in interactive mode) + include_groups = kwargs.get('include_groups') or '' + exclude_groups = kwargs.get('exclude_groups') or '' + auto_confirm = kwargs.get('auto_confirm', False) + if (not include_groups and not exclude_groups + and not auto_confirm + and not kwargs.get('dry_run') + and not kwargs.get('output') + and not kwargs.get('estimate') + and not getattr(params, 'batch_mode', False)): + selected = self._interactive_group_picker( + groups, resolver, resources, users) + if selected is not None: + # User selected specific groups — apply as include filter + include_groups = selected + + # Group filtering: --groups and --exclude-groups + if include_groups or exclude_groups: + resources, users = self._filter_by_groups( + resources, users, groups, resolver, + include_groups, exclude_groups) + num_resources = len(resources) + num_users = len(users) + if num_resources == 0 and num_users == 0: + raise CommandError('kcm-import', + 'No connections match the group filter. ' + 'Use --list-groups to see available groups.') + + # Resolve KSM dynamic tokens (${KEEPER_*}) AFTER group filtering + # so we only process connections the user actually selected. + ksm_warnings, ksm_resolved, ksm_unresolved = self._resolve_ksm_tokens( + params, resources, users) + + # Estimation mode: scan and report without importing + if kwargs.get('estimate'): + total_connections = len({r['connection_id'] for r in connection_rows}) + self._print_estimate(groups, resources, users, skip_users, + include_disabled, total_connections) + return + + # Ensure project_name is set before folder_path assignment + if not project_name: + ts = datetime.datetime.now().strftime('%Y%m%d-%H%M%S') + project_name = f'KCM-Import-{ts}' + + # Assign folder paths under project-named shared folders + res_root = f'{project_name} - Resources' + usr_root = f'{project_name} - Users' + for item in resources: + group_id = item.pop('_group_id', None) + kcm_path = resolver.resolve_path(group_id) + if kcm_path == 'ROOT': + item['folder_path'] = res_root + elif kcm_path.startswith('ROOT/'): + item['folder_path'] = f'{res_root}/{kcm_path[5:]}' + else: + item['folder_path'] = f'{res_root}/{kcm_path}' + + for item in users: + group_id = item.pop('_group_id', None) + kcm_path = resolver.resolve_path(group_id) + if kcm_path == 'ROOT': + item['folder_path'] = usr_root + elif kcm_path.startswith('ROOT/'): + item['folder_path'] = f'{usr_root}/{kcm_path[5:]}' + else: + item['folder_path'] = f'{usr_root}/{kcm_path}' + + # Finalize TOTP + KCMParameterMapper.finalize_totp(users) + + # Clean SFTP settings — SFTP is a connection setting on the record, + # NOT a separate resource. Strip fields that don't belong per protocol. + for resource in resources: + conn = resource.get('pam_settings', {}).get('connection', {}) + sftp = conn.get('sftp') + if not sftp: + continue + protocol = conn.get('protocol', '') + + if protocol in ('ssh', 'telnet'): + # SSH/Telnet: only enable_sftp + sftp_root_directory + cleaned = {} + if sftp.get('enable_sftp'): + cleaned['enable_sftp'] = sftp['enable_sftp'] + if sftp.get('sftp_root_directory'): + cleaned['sftp_root_directory'] = sftp['sftp_root_directory'] + if cleaned: + conn['sftp'] = cleaned + else: + conn.pop('sftp', None) + + elif protocol in ('rdp', 'vnc'): + # RDP/VNC: keep SFTPConnectionSettings fields as-is + # (enable_sftp, sftp_root_directory, sftp_upload_directory, + # host, port, login, password, private_key, etc.) + # These are connection settings, not separate records. + pass + + # Flag records with incomplete data from KCM source. + # Move them to a special subfolder with notes explaining the issues. + self._flag_incomplete_records(resources, users, res_root, usr_root) + + # Move KSM-affected records to dedicated subfolders with notes + self._flag_ksm_records(resources, users, ksm_resolved, ksm_unresolved, + res_root, usr_root) + + # Nest users inside their parent resources for proper extend.py linking. + # extend.py expects users in resource['users'] to create DAG links. + # Exception: pamRemoteBrowser (RBI) — PamRemoteBrowserObject has no + # 'users' attribute; RBI users must stay top-level and are linked via + # autofill_credentials in rbi_settings, not launch_credentials. + if not skip_users: + user_index = {} + for user in users: + title = user.get('title', '') + if title: + user_index.setdefault(title, []).append(user) + + nested_ids = set() # track id() of nested user dicts + for resource in resources: + launch_cred = (resource.get('pam_settings', {}) + .get('connection', {}) + .get('launch_credentials', '')) + if not launch_cred or launch_cred not in user_index: + continue + + # RBI: keep users top-level, set autofill_credentials + if resource.get('type') == 'pamRemoteBrowser': + rbi_conn = (resource.get('pam_settings', {}) + .get('connection', {})) + rbi_conn['autofill_credentials'] = launch_cred + # RBI users are Login records, not pamUser + for u in user_index.get(launch_cred, []): + u['type'] = 'login' + continue + candidates = [u for u in user_index[launch_cred] + if id(u) not in nested_ids] + if len(candidates) == 1: + resource['users'] = [candidates[0]] + nested_ids.add(id(candidates[0])) + elif len(candidates) > 1: + # Duplicate titles across groups — match by folder path + res_fp = resource.get('folder_path', '') + res_suffix = (res_fp.split(' - Resources', 1)[-1] + if ' - Resources' in res_fp else res_fp) + for u in candidates: + u_fp = u.get('folder_path', '') + u_suffix = (u_fp.split(' - Users', 1)[-1] + if ' - Users' in u_fp else u_fp) + if res_suffix == u_suffix: + resource['users'] = [u] + nested_ids.add(id(u)) + break + + # Top-level users: only those not nested into a resource + users = [u for u in users if id(u) not in nested_ids] + + # Build shared folder list + sf_list = [res_root, usr_root] + + # Build PAM JSON + pam_json = { + 'pam_data': { + 'shared_folders': sf_list, + 'resources': resources, + 'users': users if not skip_users else [], + } + } + + if not config_uid: + pam_json['project'] = project_name + + num_resources = len(resources) + nested_user_count = sum(len(r.get('users', [])) for r in resources) + num_users = (nested_user_count + len(users)) if not skip_users else 0 + + # Output or import + if output_file: + include_creds = kwargs.get('include_credentials', False) + out_data = pam_json if include_creds else self._redact_for_display(pam_json) + fd = os.open(output_file, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600) + with os.fdopen(fd, 'w') as f: + json.dump(out_data, f, indent=2) + redact_note = '' if include_creds else ' (credentials redacted)' + logging.warning('JSON written to %s (%d resources, %d users)%s', + output_file, num_resources, num_users, redact_note) + if not include_creds: + logging.info('Use --include-credentials to include passwords in output') + return + + if dry_run: + redacted = self._redact_for_display(pam_json) + print(json.dumps(redacted, indent=2)) + self._print_import_summary( + project_name, config_uid, num_resources, num_users, + resources, users, skip_users) + logging.warning('Dry run: %d resources, %d users (no vault changes)', + num_resources, num_users) + return + + # Validate data before import + warnings = self._validate_import_data(resources, users, skip_users) + warnings.extend(ksm_warnings) + for w in warnings: + logging.warning(f'{bcolors.WARNING}Validation:{bcolors.ENDC} %s', w) + + # Pre-import summary + confirmation + auto_confirm = kwargs.get('auto_confirm', False) + if not auto_confirm: + self._print_import_summary( + project_name, config_uid, num_resources, num_users, + resources, users, skip_users) + answer = input('\n Proceed with import? [y/N]: ').strip().lower() + if answer not in ('y', 'yes'): + raise CommandError('kcm-import', 'Import cancelled by user.') + + # Gateway selection (only for new project imports, not extend mode) + is_new_project = not config_uid + gateway_arg = kwargs.get('gateway') or '' + resolved_config = None + if not config_uid: + if gateway_arg: + resolved_config = self._resolve_gateway(params, gateway_arg) + elif not auto_confirm and not getattr(params, 'batch_mode', False): + # Interactive: show gateway picker + resolved_config = self._resolve_gateway(params, '') + if resolved_config: + # Gateway is already bound to an existing project + actual_res, actual_usr = self._discover_shared_folder_names( + params, resolved_config) + existing_project = actual_res.rsplit(' - Resources', 1)[0] if actual_res else '(unknown)' + gw_label = gateway_arg or 'Selected gateway' + if auto_confirm or getattr(params, 'batch_mode', False): + # --yes or batch mode: default to new project + logging.warning('Gateway "%s" belongs to project "%s" ' + '— creating new project (auto-confirm)', + gw_label, existing_project) + else: + print(f'\n{gw_label} belongs to project "{existing_project}".') + print(f' [1] Create a NEW project with its own folders and gateway (recommended)') + print(f' [2] Add records into "{existing_project}" existing folders') + print(f' [3] Cancel import') + choice = input('\n Select [1]: ').strip() + if choice == '2': + config_uid = resolved_config + is_new_project = False + elif choice == '3': + raise CommandError('kcm-import', 'Import cancelled by user.') + # Default (1 or empty): create new project + + # Phase 1: Create project skeleton if no existing config + gateway_token = '' + if not config_uid: + config_uid, gateway_token = self._create_project_skeleton( + params, project_name, pam_json) + is_new_project = True + + # Discover actual shared folder names and rewrite paths if needed + actual_res, actual_usr = self._discover_shared_folder_names( + params, config_uid) + if actual_res and actual_usr: + self._rewrite_folder_paths( + pam_json, actual_res, actual_usr, project_name) + pam_json['pam_data']['shared_folders'] = [actual_res, actual_usr] + + # Phase 2: Populate records via batched extend calls. + # Phase 2a: External users (login records) in small batches + # Phase 2b: Resources (with nested users) — users=[] since external + # users already exist and are found by title match. + + from .extend import PAMProjectExtendCommand + from ... import api + + all_resources = pam_json['pam_data']['resources'] + all_users = pam_json['pam_data']['users'] + + # Set up adaptive throttler + auto_throttle = kwargs.get('auto_throttle', True) + override_size = kwargs.get('batch_size') + override_delay = kwargs.get('batch_delay') + # Disable adaptive throttle if user set manual batch params + if override_size is not None or override_delay is not None: + auto_throttle = False + + throttler = AdaptiveThrottler(enabled=auto_throttle) + + if auto_throttle: + # Probe phase: measure server response before importing + probe_result = throttler.run_probe( + params, config_uid, pam_json, + extend_cmd_factory=PAMProjectExtendCommand) + if probe_result.get('skipped'): + logging.info('[Probe] Skipped: %s — using static params', + probe_result.get('reason', 'unknown')) + # Fall back to static params + res_batch, usr_batch, delay = self._compute_batch_params( + len(all_resources), len(all_users), None, None) + throttler.res_batch_size = res_batch + throttler.usr_batch_size = usr_batch + throttler.res_delay = delay + throttler.usr_delay = delay + else: + # Static batch params (manual overrides or auto-throttle off) + res_batch, usr_batch, delay = self._compute_batch_params( + len(all_resources), len(all_users), + override_size, override_delay) + throttler.res_batch_size = res_batch + throttler.usr_batch_size = usr_batch + throttler.res_delay = delay + throttler.usr_delay = delay + + pre_count = len(params.record_cache) + tmp_path = None + import_start = time.time() + + # Per-record tracking: list of dicts with keys: + # name, type, phase ('user'|'resource'), status ('ok'|'skipped'|'error'), reason + import_results = [] + + # Log import plan (batch counts use current throttler params) + res_batch = throttler.res_batch_size + usr_batch = throttler.usr_batch_size + res_batches_n = ((len(all_resources) + res_batch - 1) // res_batch + if all_resources else 0) + usr_batches_n = ((len(all_users) + usr_batch - 1) // usr_batch + if all_users else 0) + throttle_mode = 'adaptive' if auto_throttle else 'fixed' + logging.warning( + '[Phase 2] Import plan (%s): %d users in %d batches ' + '(size %d, %.0fs delay) + %d resources in %d batches ' + '(size %d, %.0fs delay)', + throttle_mode, + len(all_users), usr_batches_n, usr_batch, throttler.usr_delay, + len(all_resources), res_batches_n, res_batch, throttler.res_delay) + + try: + # Phase 2a: External users (RBI login records) + if all_users: + logging.warning(f'{bcolors.OKBLUE}[Phase 2a]{bcolors.ENDC} Importing %d external users...', + len(all_users)) + ui = 0 # user index pointer + ub = 0 # batch counter + while ui < len(all_users): + batch_size = throttler.usr_batch_size + batch_users = all_users[ui:ui + batch_size] + + batch_json = {'pam_data': { + 'shared_folders': pam_json['pam_data'].get('shared_folders', []), + 'resources': [], + 'users': batch_users, + }} + + if tmp_path and os.path.exists(tmp_path): + os.unlink(tmp_path) + tmp_fd, tmp_path = tempfile.mkstemp(suffix='.json') + with os.fdopen(tmp_fd, 'w') as tmp: + json.dump(batch_json, tmp, indent=2) + + elapsed = time.time() - import_start + logging.warning( + '[Phase 2a] Users batch %d: users %d-%d of %d ' + '[%.0fs elapsed, batch_size=%d, delay=%.0fs]', + ub + 1, ui + 1, + min(ui + batch_size, len(all_users)), + len(all_users), elapsed, + batch_size, throttler.usr_delay) + + batch_start = time.time() + retries = 0 + while True: + try: + self._run_extend_batch( + params, config_uid, tmp_path, + batch_users, 'user', import_results) + break + except Exception as e: + err_msg = str(e).lower() + if retries < 3 and ('throttle' in err_msg or 'rate limit' in err_msg or 'too many request' in err_msg): + retries += 1 + wait = 60 * retries + logging.warning( + f' {bcolors.FAIL}[Throttle]{bcolors.ENDC} %s — waiting %ds before ' + 'retry %d/3', e, wait, retries) + time.sleep(wait) + throttler.record_batch( + wait, len(batch_users), + is_resource=False) + else: + # Mark batch items as errors + for item in batch_users: + import_results.append({ + 'name': item.get('title', item.get('login', '')), + 'type': item.get('type', 'unknown'), + 'phase': 'user', + 'status': 'error', + 'reason': str(e)[:100], + }) + raise + batch_elapsed = time.time() - batch_start + + throttler.record_batch(batch_elapsed, len(batch_users), + is_resource=False) + + ui += len(batch_users) + ub += 1 + if ui < len(all_users): + time.sleep(throttler.usr_delay) + + logging.warning( + f'{bcolors.OKGREEN}[Phase 2a] Complete{bcolors.ENDC}: %d users imported in %d batches ' + '[%.0fs]', + len(all_users), ub, time.time() - import_start) + + # Phase 2b: Resources (nested users travel with parent resource) + if all_resources: + logging.warning( + '[Phase 2b] Importing %d resources...', + len(all_resources)) + phase2b_start = time.time() + ri = 0 # resource index pointer + rb = 0 # batch counter + while ri < len(all_resources): + batch_size = throttler.res_batch_size + batch_resources = all_resources[ri:ri + batch_size] + + batch_json = {'pam_data': { + 'shared_folders': pam_json['pam_data'].get('shared_folders', []), + 'resources': batch_resources, + 'users': [], + }} + + if tmp_path and os.path.exists(tmp_path): + os.unlink(tmp_path) + tmp_fd, tmp_path = tempfile.mkstemp(suffix='.json') + with os.fdopen(tmp_fd, 'w') as tmp: + json.dump(batch_json, tmp, indent=2) + + elapsed = time.time() - import_start + # Estimate remaining time based on average batch time + if rb > 0: + avg_batch = (time.time() - phase2b_start) / rb + remaining_items = len(all_resources) - ri + remaining_batches = (remaining_items + batch_size - 1) // batch_size + remaining = avg_batch * remaining_batches + eta_str = f', ~{remaining:.0f}s remaining' + else: + eta_str = '' + + logging.warning( + '[Phase 2b] Resources batch %d: resources %d-%d ' + 'of %d [%.0fs elapsed%s, batch_size=%d, delay=%.0fs]', + rb + 1, ri + 1, + min(ri + batch_size, len(all_resources)), + len(all_resources), elapsed, eta_str, + batch_size, throttler.res_delay) + + batch_start = time.time() + retries = 0 + while True: + try: + self._run_extend_batch( + params, config_uid, tmp_path, + batch_resources, 'resource', import_results) + break + except Exception as e: + err_msg = str(e).lower() + if retries < 3 and ('throttle' in err_msg or 'rate limit' in err_msg or 'too many request' in err_msg): + retries += 1 + wait = 60 * retries + logging.warning( + ' [Throttle] %s — waiting %ds before ' + 'retry %d/3', e, wait, retries) + time.sleep(wait) + throttler.record_batch( + wait, len(batch_resources), + is_resource=True) + else: + for item in batch_resources: + import_results.append({ + 'name': item.get('title', ''), + 'type': item.get('type', 'unknown'), + 'phase': 'resource', + 'status': 'error', + 'reason': str(e)[:100], + }) + raise + batch_elapsed = time.time() - batch_start + + throttler.record_batch(batch_elapsed, len(batch_resources), + is_resource=True) + + ri += len(batch_resources) + rb += 1 + if ri < len(all_resources): + time.sleep(throttler.res_delay) + + logging.warning( + f'{bcolors.OKGREEN}[Phase 2b] Complete{bcolors.ENDC}: %d resources imported in %d batches ' + '[%.0fs]', + len(all_resources), rb, time.time() - import_start) + + api.sync_down(params) + post_count = len(params.record_cache) + created = post_count - pre_count + + if created == 0: + raise CommandError('kcm-import', + 'Extend phase created 0 records. ' + 'Check errors above (duplicate titles, bad paths, etc.)') + + # Post-import summary + total_time = time.time() - import_start + expected = num_resources + num_users + summary = throttler.get_summary() + + # Set max instances for gateway pooling (new gateways only) + max_instances = kwargs.get('max_instances', 0) + if max_instances > 0 and is_new_project: + self._set_gateway_pool_size(params, project_name, max_instances) + + # Gather project assets for report + assets = self._get_project_assets(params, config_uid) + if gateway_token: + assets['gateway_token'] = gateway_token + assets['config_uid'] = config_uid + + # Build and print structured report + report_text = self._build_import_report( + project_name=project_name, + config_uid=config_uid, + is_new_project=is_new_project, + assets=assets, + num_resources=num_resources, + num_users=num_users, + created=created, + expected=expected, + total_time=total_time, + throttler_summary=summary, + warnings=warnings, + kwargs=kwargs, + import_results=import_results, + ) + + # Print report to console + try: + print(report_text) + except (BrokenPipeError, OSError): + pass + + # Save report as vault record (always attempt, even if console broken) + try: + res_sf_name = assets.get('res_sf_name', '') + self._create_summary_record( + params, res_sf_name, report_text, + project_name=project_name, assets=assets, + kwargs=kwargs, + import_results=import_results, + warnings=warnings) + except Exception as e: + logging.warning('Could not create summary record: %s', e) + logging.debug('Summary record traceback:', exc_info=True) + finally: + if tmp_path and os.path.exists(tmp_path): + os.unlink(tmp_path) + + @staticmethod + def _run_extend_batch(params, config_uid, tmp_path, batch_items, + phase, import_results): + """Run a single extend batch and capture per-record results. + + Parses stdout from the extend command to determine which records + were created, skipped (existing), or failed (missing fields). + + Args: + batch_items: list of dicts from the import JSON (resources or users) + phase: 'user' or 'resource' + import_results: list to append result dicts to + """ + from .extend import PAMProjectExtendCommand + + pre_cache = set(params.record_cache.keys()) + + stdout_trap = io.StringIO() + with _STDOUT_LOCK: + original_stdout = sys.stdout + sys.stdout = stdout_trap + try: + cmd = PAMProjectExtendCommand() + cmd.execute(params, + config=config_uid, + file_name=tmp_path, + dry_run=False) + finally: + sys.stdout = original_stdout + batch_output = stdout_trap.getvalue() + + # Echo captured output (safe against broken pipes) + if batch_output: + try: + print(batch_output, end='') + except (BrokenPipeError, OSError): + pass + + post_cache = set(params.record_cache.keys()) + new_uids = post_cache - pre_cache + + # Parse output for per-record info + # Common patterns from extend.py output: + # "PAM User is missing required field `password`" + # "N existing records (skipped), M new records to be created" + missing_fields = [] + existing_count = 0 + new_count = 0 + for line in batch_output.splitlines(): + stripped = line.strip() + if 'missing required field' in stripped.lower(): + missing_fields.append(stripped) + if 'existing records' in stripped and 'skipped' in stripped: + m = re.search(r'(\d+)\s+existing records?\s*\(skipped\)', stripped) + if m: + existing_count += int(m.group(1)) + if 'new records to be created' in stripped: + m = re.search(r'(\d+)\s+new records? to be created', stripped) + if m: + new_count += int(m.group(1)) + + # Build per-item results (includes nested users inside resources) + all_items = [] + for item in batch_items: + all_items.append(item) + # Resources can have nested users that also become records + for nested_user in item.get('users', []): + all_items.append(nested_user) + + for item in all_items: + title = item.get('title', item.get('login', '')) + rtype = item.get('type', 'unknown') + # Check if this item had a missing-field warning + item_failed = False + fail_reason = '' + for mf in missing_fields: + # Missing field warnings don't name the record, so we + # attribute to items missing the mentioned field + if 'password' in mf.lower(): + pw = item.get('password', '') + if not pw: + item_failed = True + fail_reason = mf + break + if item_failed: + import_results.append({ + 'name': title, 'type': rtype, 'phase': phase, + 'status': 'skipped', 'reason': fail_reason, + }) + else: + import_results.append({ + 'name': title, 'type': rtype, 'phase': phase, + 'status': 'ok', 'reason': '', + }) + + @staticmethod + def _compute_batch_params(num_resources, num_users, + override_size=None, override_delay=None): + """Compute batch sizes and delay to avoid API throttling. + + API call costs per record (measured via instrumentation): + External user: ~8 calls → batch of 8 = ~64 calls + Resource+user: ~20 calls → batch of 2 = ~40 calls (safe) + + Keeper EU throttle window: ~50 requests triggers HTTP 403 + 60s + backoff. Conservative batching with 15s delays avoids throttles. + + Returns (resource_batch_size, user_batch_size, delay_seconds). + """ + total = num_resources + num_users + if total <= 50: + res_batch, usr_batch, delay = 2, 8, 12.0 + elif total <= 500: + res_batch, usr_batch, delay = 2, 8, 15.0 + elif total <= 5000: + res_batch, usr_batch, delay = 1, 6, 15.0 + else: + res_batch, usr_batch, delay = 1, 5, 15.0 + + # Allow CLI overrides + if override_size is not None and override_size > 0: + res_batch = override_size + if override_delay is not None and override_delay >= 0: + delay = float(override_delay) + + return res_batch, usr_batch, delay + + @staticmethod + def _set_gateway_pool_size(params, project_name, max_instances): + """Set max instances for the newly created gateway.""" + from ..pam import gateway_helper + gateways = gateway_helper.get_all_gateways(params) + gw_name = f'{project_name} Gateway' + match = next((g for g in gateways if g.controllerName == gw_name), None) + if match: + try: + from ...proto import pam_pb2 + from ... import api + rq = pam_pb2.PAMSetMaxInstanceCountRequest() + rq.controllerUid = match.controllerUid + rq.maxInstanceCount = max_instances + api.communicate_rest(params, rq, 'pam/set_controller_max_instance_count') + logging.warning('Gateway pool size set to %d instances.', max_instances) + except Exception as e: + logging.warning('Could not set pool size: %s', type(e).__name__) + else: + logging.warning('Could not find gateway "%s" to set pool size.', gw_name) + + @staticmethod + def _print_group_list(groups, resolver, resources, users): + """Print available connection groups with resource counts.""" + # Build group_id → name mapping + group_names = {} + for g in groups: + gid = g['connection_group_id'] + group_names[gid] = g.get('connection_group_name', f'group_{gid}') + + # Count resources per group + group_res_counts = {} + group_usr_counts = {} + for r in resources: + gid = r.get('_group_id') + group_res_counts[gid] = group_res_counts.get(gid, 0) + 1 + for u in users: + gid = u.get('_group_id') + group_usr_counts[gid] = group_usr_counts.get(gid, 0) + 1 + + # Also count items in ROOT (no group) + root_res = group_res_counts.get(None, 0) + root_usr = group_usr_counts.get(None, 0) + + print('\nAvailable Connection Groups:') + print('=' * 70) + print(f' {"Group Name":<45s} {"Resources":>10s} {"Users":>8s}') + print(' ' + '-' * 65) + + if root_res or root_usr: + print(f' {"(ROOT - no group)":<45s} {root_res:>10d} {root_usr:>8d}') + + for g in sorted(groups, key=lambda x: x.get('connection_group_name', '')): + gid = g['connection_group_id'] + name = g.get('connection_group_name', f'group_{gid}') + path = resolver.resolve_path(gid) + res_n = group_res_counts.get(gid, 0) + usr_n = group_usr_counts.get(gid, 0) + # Only show groups that have connections (or are parents) + indent = ' ' + if '/' in path: + depth = path.count('/') + indent = ' ' + ' ' * depth + display = f'{indent}{name}' + if res_n > 0 or usr_n > 0: + print(f' {display:<45s} {res_n:>10d} {usr_n:>8d}') + else: + print(f' {display:<45s} {"—":>10s} {"—":>8s}') + + total_res = len(resources) + total_usr = len(users) + print(' ' + '-' * 65) + print(f' {"TOTAL":<45s} {total_res:>10d} {total_usr:>8d}') + print() + print(' Use --groups "Name1,Name2" to import specific groups') + print(' Use --exclude-groups "Name1,Name2" to exclude groups') + print(' Wildcards supported: --exclude-groups "Incomplete*,Test*"') + print() + + @staticmethod + def _interactive_group_picker(groups, resolver, resources, users): + """Show connection groups and let user select which to import. + + Returns a comma-separated pattern string for _filter_by_groups, + or None if user wants to import all groups. + """ + # Build group stats + group_res = {} + group_usr = {} + for r in resources: + gid = r.get('_group_id') + group_res[gid] = group_res.get(gid, 0) + 1 + for u in users: + gid = u.get('_group_id') + group_usr[gid] = group_usr.get(gid, 0) + 1 + + # Only show top-level groups (no parent or parent is ROOT) + top_groups = [] + for g in groups: + path = resolver.resolve_path(g['connection_group_id']) + # Top-level: path is "ROOT/name" or just the group name (ksm mode) + depth = path.count('/') + if depth <= 1: + gid = g['connection_group_id'] + name = g.get('connection_group_name', f'group_{gid}') + # Count all items in this group and its children + res_n = 0 + usr_n = 0 + for g2 in groups: + g2_path = resolver.resolve_path(g2['connection_group_id']) + if g2_path == path or g2_path.startswith(path + '/'): + g2id = g2['connection_group_id'] + res_n += group_res.get(g2id, 0) + usr_n += group_usr.get(g2id, 0) + if res_n > 0 or usr_n > 0: + top_groups.append((name, res_n, usr_n)) + + root_res = group_res.get(None, 0) + root_usr = group_usr.get(None, 0) + + print('\nConnection Groups Found:') + print('─' * 60) + idx = 1 + numbered = [] + if root_res or root_usr: + print(f' [{idx}] (ROOT — ungrouped) ' + f'({root_res} resources, {root_usr} users)') + numbered.append('ROOT') + idx += 1 + for name, res_n, usr_n in sorted(top_groups): + print(f' [{idx}] {name} ' + f'({res_n} resources, {usr_n} users)') + numbered.append(name) + idx += 1 + + total_res = len(resources) + total_usr = len(users) + print(f'\n Total: {total_res} resources, {total_usr} users') + print(f'\n [A] Import ALL groups') + print() + + try: + choice = input(' Select groups (comma-separated numbers, or A for all) [A]: ').strip() + except EOFError: + return None + + if not choice or choice.upper() == 'A': + return None # import all + + # Parse selections + selected_names = [] + for part in choice.split(','): + part = part.strip() + try: + i = int(part) - 1 + if 0 <= i < len(numbered): + selected_names.append(numbered[i]) + except ValueError: + continue + + if not selected_names: + return None # bad input, import all + + # Build pattern string — exact names (no wildcards needed) + # Use wildcards to match children too + patterns = [f'{name}*' if name != 'ROOT' else name + for name in selected_names] + logging.warning('Selected groups: %s', ', '.join(selected_names)) + return ','.join(patterns) + + @staticmethod + def _filter_by_groups(resources, users, groups, resolver, + include_pattern, exclude_pattern): + """Filter resources and users by connection group name patterns. + + Patterns are comma-separated, support fnmatch wildcards (* and ?). + Returns (filtered_resources, filtered_users). + """ + from fnmatch import fnmatch + + # Build group_id → group_name and group_id → full_path + group_names = {} + group_paths = {} + for g in groups: + gid = g['connection_group_id'] + group_names[gid] = g.get('connection_group_name', '') + group_paths[gid] = resolver.resolve_path(gid) + + include_pats = [p.strip() for p in include_pattern.split(',') + if p.strip()] if include_pattern else [] + exclude_pats = [p.strip() for p in exclude_pattern.split(',') + if p.strip()] if exclude_pattern else [] + + def _matches(gid, patterns): + """Check if a group matches any of the patterns.""" + name = group_names.get(gid, '') + path = group_paths.get(gid, '') + for pat in patterns: + pat_lower = pat.lower() + # Match against group name, full path, or path segments + if fnmatch(name.lower(), pat_lower): + return True + if fnmatch(path.lower(), pat_lower): + return True + # Also match any path segment + for segment in path.split('/'): + if fnmatch(segment.lower(), pat_lower): + return True + return False + + def _keep(item): + gid = item.get('_group_id') + if gid is None: + # ROOT items: keep unless --groups is set, or if ROOT was + # explicitly selected in the interactive picker + if not include_pats: + return True + return any(p == 'ROOT' for p in include_pats) + if include_pats and not _matches(gid, include_pats): + return False + if exclude_pats and _matches(gid, exclude_pats): + return False + return True + + filtered_res = [r for r in resources if _keep(r)] + filtered_usr = [u for u in users if _keep(u)] + + kept = len(filtered_res) + len(filtered_usr) + total = len(resources) + len(users) + skipped = total - kept + if skipped > 0: + logging.warning(f'{bcolors.WARNING}Group filter:{bcolors.ENDC} keeping %d of %d items ' + '(%d excluded)', kept, total, skipped) + + return filtered_res, filtered_usr + + @staticmethod + def _print_estimate(groups, resources, users, skip_users, + include_disabled, total_connections): + """Print a pre-import migration estimate.""" + # Count resource types + type_counts = {} # type: Dict[str, int] + for r in resources: + rtype = r.get('type', 'pamMachine') + type_counts[rtype] = type_counts.get(rtype, 0) + 1 + num_users = len(users) if not skip_users else 0 + num_resources = len(resources) + + # Estimate API calls per record type (measured via instrumentation): + # resource + nested user: ~20 calls (avg 19.2, range 16-25) + # external user (login): ~8 calls (avg 8.0, range 6-10) + # project setup: ~20 calls (folders, KSM app, gateway, config) + api_per_resource = 20 + api_per_user = 8 + api_setup = 20 + est_api_calls = (api_setup + + num_resources * api_per_resource + + num_users * api_per_user) + + # Time estimates at different throughput rates (requests per second) + rates = [ + ('Conservative (5 req/s)', 5), + ('Standard (15 req/s)', 15), + ('Enterprise (50 req/s)', 50), + ] + + def _fmt_duration(seconds): + if seconds < 60: + return f'{seconds:.0f}s' + m, s = divmod(int(seconds), 60) + if m < 60: + return f'{m}m {s:02d}s' + h, m = divmod(m, 60) + return f'{h}h {m:02d}m {s:02d}s' + + print() + print('=' * 60) + print('KCM Migration Estimate') + print('=' * 60) + print() + print(f' Connection groups: {len(groups):>6d}') + print(f' Total connections: {total_connections:>6d}') + if not include_disabled: + disabled = total_connections - num_resources + if disabled > 0: + print(f' Disabled (excluded): {disabled:>6d}') + print() + print(' Resources:') + for rtype, count in sorted(type_counts.items()): + label = rtype.replace('pam', '').replace('Machine', 'SSH/RDP/VNC') + print(f' {label:<22s} {count:>5d}') + print(f' {"Total":<22s} {num_resources:>5d}') + print() + if skip_users: + print(' Users: (skipped)') + else: + print(f' Users: {num_users:>6d}') + print() + print(f' Estimated API calls: ~{est_api_calls:>5d}') + print() + print(' Estimated import time:') + for label, rps in rates: + seconds = est_api_calls / rps + print(f' {label} {_fmt_duration(seconds):>10s}') + print() + print('=' * 60) + print() + logging.info('Estimate complete. Run without --estimate to import.') + + @staticmethod + def _validate_import_data(resources, users, skip_users): + """Pre-import validation. Returns list of warning strings.""" + warnings = [] + + # Check for resources with rotation_settings (should never exist from KCM) + for r in resources: + if r.get('rotation_settings'): + warnings.append( + f'Resource "{r.get("title")}" has rotation_settings ' + f'(unexpected for KCM imports)') + if not skip_users: + for u in users: + if u.get('rotation_settings'): + warnings.append( + f'User "{u.get("title")}" has rotation_settings ' + f'(will cause errors without admin credentials)') + for r in resources: + for nu in r.get('users', []): + if nu.get('rotation_settings'): + warnings.append( + f'Nested user "{nu.get("title")}" has rotation_settings') + + # Check for unnested users (will become external users without resource linkage) + if not skip_users and users: + warnings.append( + f'{len(users)} user(s) not linked to any resource ' + f'(will be created as external users)') + + # Check for users missing passwords (will be skipped by extend engine) + if not skip_users: + no_pw_external = [u for u in users if not u.get('password')] + no_pw_nested = [] + for r in resources: + for nu in r.get('users', []): + if not nu.get('password'): + no_pw_nested.append(nu) + total_no_pw = len(no_pw_external) + len(no_pw_nested) + if total_no_pw > 0: + warnings.append( + f'{total_no_pw} user(s) missing password ' + f'({len(no_pw_external)} external, {len(no_pw_nested)} nested) ' + f'— these will be created without credentials') + + # Check for resources missing host (skip pamRemoteBrowser — uses URL not host) + no_host = [r for r in resources + if not r.get('host') and r.get('type') != 'pamRemoteBrowser'] + if no_host: + for r in no_host: + warnings.append( + f'Resource "{r.get("title")}" has no host/IP address') + + return warnings + + @staticmethod + def _resolve_ksm_tokens(params, resources, users): + """Resolve ${KEEPER_*} dynamic tokens from KCM vault integration. + + KCM servers with KSM integration store placeholder tokens + (e.g., ${KEEPER_USER_PASSWORD}) instead of real credentials. This + method detects those tokens and handles them correctly: + + - If the credential record already exists in the user's vault, + LINK to it (via uid → uid_imported) instead of duplicating it. + - If not found, clear the token (never import literal ${KEEPER_*} + strings) and warn the user. + + Matching logic (mirrors KCM's runtime resolution): + - ${KEEPER_SERVER_*} tokens: matched by hostname → vault record host + - ${KEEPER_USER_*} tokens: matched by username → vault record login + - ${KEEPER_DOMAIN_*} tokens: matched by domain → vault record domain + + Returns a list of warning strings for tokens that couldn't be resolved. + """ + _TOKEN_RE = re.compile(r'^\$\{KEEPER_(\w+)\}$') + + # Check if any tokens exist before building the expensive index + has_tokens = False + for resource in resources: + conn = resource.get('pam_settings', {}).get('connection', {}) + for val in conn.values(): + if isinstance(val, str) and val.startswith('${KEEPER_'): + has_tokens = True + break + if has_tokens: + break + if not has_tokens: + for user in users: + for key in ('password', 'login', 'private_pem_key'): + val = user.get(key, '') + if isinstance(val, str) and val.startswith('${KEEPER_'): + has_tokens = True + break + if has_tokens: + break + + if not has_tokens: + return [], [], [] + + logging.warning( + f'{bcolors.WARNING}KSM integration detected:{bcolors.ENDC} ' + f'some connections use ${{KEEPER_*}} dynamic tokens. ' + f'Attempting to resolve from vault...') + + # Build vault lookup indexes from all accessible records + vault_by_host = {} # host/IP → {uid, title, login, password, ...} + vault_by_login = {} # login → {uid, title, password, ...} + for uid in params.record_cache: + try: + rec = vault.KeeperRecord.load(params, uid) + if not rec: + continue + rec_data = {'uid': uid, 'title': getattr(rec, 'title', '')} + + if hasattr(rec, 'get_typed_field'): + for ftype in ('login', 'password', 'host', 'keyPair', + 'oneTimeCode'): + field = rec.get_typed_field(ftype) + if field and field.value: + val = field.value + if isinstance(val, list) and val: + val = val[0] + if isinstance(val, dict): + rec_data[ftype] = val + elif isinstance(val, str) and val: + rec_data[ftype] = val + elif hasattr(rec, 'login'): + rec_data['login'] = getattr(rec, 'login', '') or '' + rec_data['password'] = getattr(rec, 'password', '') or '' + + host = rec_data.get('host', '') + if host: + host_key = host.split(':')[0].strip().lower() + if host_key and host_key not in vault_by_host: + vault_by_host[host_key] = rec_data + elif host_key: + logging.debug( + 'Multiple vault records for host %s: %s and %s', + host_key, vault_by_host[host_key].get('title'), + rec_data.get('title')) + + login = rec_data.get('login', '') + if login: + login_key = login.strip().lower() + if login_key and login_key not in vault_by_login: + vault_by_login[login_key] = rec_data + elif login_key: + logging.debug( + 'Multiple vault records for login %s: %s and %s', + login_key, vault_by_login[login_key].get('title'), + rec_data.get('title')) + except Exception: + continue + + warnings = [] + resolved_details = [] # (resource_title, host, login, vault_title) + unresolved_details = [] # (resource_title, protocol, host, login, token, field) + + # Track which users have been linked to existing vault records + linked_users = set() # set of user titles + + for resource in resources: + host = resource.get('host', '') + conn = resource['pam_settings']['connection'] + res_name = resource.get('title', '').replace('KCM Resource - ', '') + + # Find the paired user for this resource + paired_user = None + for u in users: + if u.get('title') == f'KCM User - {res_name}': + paired_user = u + break + + # Scan connection fields for tokens + has_resource_token = False + for key in list(conn.keys()): + val = conn[key] + if not isinstance(val, str): + continue + m = _TOKEN_RE.match(val) + if not m: + continue + has_resource_token = True + + token_name = m.group(1) + vault_rec = None + + if token_name.startswith('SERVER_'): + host_key = host.strip().lower() + vault_rec = vault_by_host.get(host_key) + elif token_name.startswith('USER_'): + if paired_user: + login_key = paired_user.get('login', '').lower() + vault_rec = vault_by_login.get(login_key) + elif token_name.startswith('DOMAIN_'): + domain = resource.get('domain_name', '').lower() + vault_rec = vault_by_host.get(domain) + + if vault_rec: + conn[key] = '' + else: + protocol = conn.get('protocol', '') + login = paired_user.get('login', '') if paired_user else '' + unresolved_details.append(( + res_name, protocol, host, login, val, key)) + conn[key] = '' + + # If we found a vault record for the user tokens, LINK instead + # of creating a duplicate. Set uid_imported on the paired user + # so extend.py uses the existing vault record. + if has_resource_token and paired_user: + login_key = paired_user.get('login', '').lower() + vault_rec = vault_by_login.get(login_key) + if not vault_rec: + # Try by host (SERVER tokens) + host_key = host.strip().lower() + vault_rec = vault_by_host.get(host_key) + + if vault_rec and vault_rec.get('uid'): + linked_users.add(paired_user.get('title', '')) + resolved_details.append(( + res_name, host, + paired_user.get('login', ''), + vault_rec.get('title', ''), + vault_rec['uid'])) + logging.warning( + f'{bcolors.OKBLUE}KSM match:{bcolors.ENDC} %s ' + f'(login=%s, host=%s) → vault record "%s" (%s)', + res_name, + paired_user.get('login', ''), + host or '(none)', + vault_rec.get('title', ''), + vault_rec['uid']) + + # Check host field itself for tokens + if host and _TOKEN_RE.match(host): + protocol = conn.get('protocol', '') + login = paired_user.get('login', '') if paired_user else '' + unresolved_details.append(( + res_name, protocol, '(token)', login, host, 'host')) + resource['host'] = '' + + # Process user-level tokens (password, login, private_pem_key) + for user in users: + user_title = user.get('title', '') + if user_title in linked_users: + # Already linked to an existing vault record — clear any + # remaining tokens but don't try to resolve (the existing + # record has the real values) + for key in ('password', 'login', 'private_pem_key'): + val = user.get(key, '') + if isinstance(val, str) and _TOKEN_RE.match(val): + user[key] = '' + continue + + for key in ('password', 'login', 'private_pem_key'): + val = user.get(key, '') + if not isinstance(val, str): + continue + m = _TOKEN_RE.match(val) + if not m: + continue + + token_name = m.group(1) + vault_rec = None + + if token_name.startswith('USER_'): + login_key = user.get('login', '').lower() + vault_rec = vault_by_login.get(login_key) + elif token_name.startswith('SERVER_'): + usr_name = user_title.replace('KCM User - ', '') + for r in resources: + if r.get('title') == f'KCM Resource - {usr_name}': + host_key = r.get('host', '').strip().lower() + vault_rec = vault_by_host.get(host_key) + break + + if vault_rec: + user[key] = '' + else: + # Find paired resource for context + usr_name = user_title.replace('KCM User - ', '') + res_host = '' + res_proto = '' + for r in resources: + if r.get('title') == f'KCM Resource - {usr_name}': + res_host = r.get('host', '') + res_proto = r.get('pam_settings', {}).get( + 'connection', {}).get('protocol', '') + break + unresolved_details.append(( + usr_name, res_proto, res_host, + user.get('login', ''), val, key)) + user[key] = '' + + # Build warnings with structured detail + if resolved_details: + logging.warning( + f'{bcolors.OKGREEN}KSM tokens:{bcolors.ENDC} %d connection(s) ' + f'matched existing vault records — tokens cleared', + len(resolved_details)) + # Build a resolved summary for the report + resolved_lines = [] + for name, host, login, vtitle, vuid in resolved_details: + resolved_lines.append( + f'{name} (login={login}, host={host or "(none)"}) ' + f'→ "{vtitle}"') + warnings.append( + f'{len(resolved_details)} KSM connection(s) matched existing ' + f'vault records (tokens cleared, credentials on existing ' + f'record):\n' + '\n'.join( + f' - {line}' for line in resolved_lines)) + + if unresolved_details: + # Deduplicate by connection name + seen = set() + unique = [] + for detail in unresolved_details: + if detail[0] not in seen: + seen.add(detail[0]) + unique.append(detail) + + logging.warning( + f'{bcolors.WARNING}KSM tokens:{bcolors.ENDC} %d connection(s) ' + f'could not be resolved — credentials imported empty', + len(unique)) + # Build a structured warning with actionable info + unresolved_lines = [] + for name, proto, host, login, token, field in unique: + parts = [name] + if proto: + parts.append(f'protocol={proto}') + if host: + parts.append(f'host={host}') + if login: + parts.append(f'login={login}') + parts.append(f'token={token}') + unresolved_lines.append(' | '.join(parts)) + + warnings.append( + f'{len(unique)} KSM connection(s) need credentials added ' + f'manually. These used ${{KEEPER_*}} tokens in KCM but no ' + f'matching vault record was found (the KSM shared folder may ' + f'not be accessible to your account):\n' + '\n'.join( + f' - {line}' for line in unresolved_lines) + + f'\n Action: add passwords to these records after import, ' + f'or share the KSM app folder with your vault and re-import.') + + return warnings, resolved_details, unresolved_details + + @staticmethod + def _flag_ksm_records(resources, users, resolved_details, unresolved_details, + res_root, usr_root): + """Add notes and move KSM-affected records to dedicated subfolders. + + Resolved records stay in place but get a note explaining the match. + Unresolved records move to a mirrored 'Needs KSM Credentials/' subfolder + so they're visually separated and easy to find. + """ + if not resolved_details and not unresolved_details: + return + + # Build lookup: connection name → resolved detail + resolved_map = {} + for name, host, login, vtitle, vuid in resolved_details: + resolved_map[name] = (host, login, vtitle, vuid) + + # Build lookup: connection name → unresolved detail + unresolved_map = {} + for name, proto, host, login, token, field in unresolved_details: + if name not in unresolved_map: + unresolved_map[name] = (proto, host, login, token) + + # Add notes to resolved user records + for user in users: + conn_name = user.get('title', '').replace('KCM User - ', '') + detail = resolved_map.get(conn_name) + if detail: + host, login, vtitle, vuid = detail + note = ( + f'[KSM Integration] This connection used dynamic tokens in KCM.\n' + f'Matched vault record: "{vtitle}" (UID: {vuid})\n' + f'Original token: ${{KEEPER_USER_PASSWORD}}\n' + f'The credential was not copied — add it manually or link ' + f'to the existing record.') + existing = user.get('notes', '') + user['notes'] = f'{existing}\n\n{note}'.strip() if existing else note + + # Move unresolved user records to mirrored subfolder + add notes + ksm_usr_root = f'{usr_root}/Needs KSM Credentials' + ksm_res_root = f'{res_root}/Needs KSM Credentials' + + for user in users: + conn_name = user.get('title', '').replace('KCM User - ', '') + detail = unresolved_map.get(conn_name) + if not detail: + continue + proto, host, login, token = detail + + # Mirror the folder path: replace usr_root with ksm subfolder + orig_path = user.get('folder_path', '') + if orig_path.startswith(usr_root): + suffix = orig_path[len(usr_root):] + user['folder_path'] = f'{ksm_usr_root}{suffix}' + user['_original_folder'] = orig_path + + note = ( + f'[KSM Integration — Needs Credentials]\n' + f'This connection used dynamic tokens in KCM but no matching\n' + f'vault record was found in your account.\n' + f'\n' + f'Original token: {token}\n' + f'KCM username: {login}\n' + f'KCM host: {host or "(none)"}\n' + f'Protocol: {proto}\n' + f'\n' + f'To fix:\n' + f'1. Add the password to this record manually, OR\n' + f'2. Find the KSM shared folder that contained this credential,\n' + f' share it with your vault, and re-import\n' + f'\n' + f'After adding credentials, move this record to:\n' + f' {orig_path}') + existing = user.get('notes', '') + user['notes'] = f'{existing}\n\n{note}'.strip() if existing else note + + # Also move the paired resource records for unresolved connections + for resource in resources: + conn_name = resource.get('title', '').replace('KCM Resource - ', '') + if conn_name not in unresolved_map: + continue + orig_path = resource.get('folder_path', '') + if orig_path.startswith(res_root): + suffix = orig_path[len(res_root):] + resource['folder_path'] = f'{ksm_res_root}{suffix}' + resource['_original_folder'] = orig_path + + proto, host, login, token = unresolved_map[conn_name] + note = ( + f'[KSM Integration — Needs Credentials]\n' + f'The paired user record for this connection needs credentials.\n' + f'See the corresponding KCM User record for details.\n' + f'\n' + f'After fixing credentials, move this record to:\n' + f' {orig_path}') + existing = resource.get('notes', '') + resource['notes'] = f'{existing}\n\n{note}'.strip() if existing else note + + @staticmethod + def _flag_incomplete_records(resources, users, res_root, usr_root): + """Flag records with incomplete KCM source data. + + Moves incomplete resources/users to an 'Incomplete (KCM Source)' + subfolder and adds a 'notes' field describing the issues. + """ + incomplete_res_folder = f'{res_root}/Incomplete (KCM Source)' + incomplete_usr_folder = f'{usr_root}/Incomplete (KCM Source)' + + # Protocols that require a host field (pamRemoteBrowser uses url) + host_required = {'ssh', 'rdp', 'vnc', 'telnet', + 'mysql', 'postgresql', 'sql-server'} + # Protocols that require a login on the user record + login_required = {'ssh', 'rdp', 'telnet', + 'mysql', 'postgresql', 'sql-server'} + + # Build a lookup from resource title to user for cross-referencing + user_by_title = {} + for u in users: + t = u.get('title', '') + if t: + user_by_title.setdefault(t, []).append(u) + + for resource in resources: + conn = resource.get('pam_settings', {}).get('connection', {}) + protocol = conn.get('protocol', '') + issues = [] + + # Check host for protocols that need it + if protocol in host_required and not resource.get('host'): + issues.append(f'Missing host/IP address (required for {protocol})') + + # Check url for http/pamRemoteBrowser + if protocol == 'http' and not resource.get('host') and not resource.get('url'): + issues.append('Missing URL (required for Remote Browser connections)') + + # Check SFTP completeness for RDP/VNC + sftp = conn.get('sftp', {}) + if protocol in ('rdp', 'vnc') and sftp.get('enable_sftp'): + missing_sftp = [] + if not sftp.get('host'): + missing_sftp.append('host') + if not sftp.get('port'): + missing_sftp.append('port') + if not sftp.get('login'): + missing_sftp.append('login') + if not sftp.get('password') and not sftp.get('private_key'): + missing_sftp.append('password or private_key') + if missing_sftp: + issues.append( + f'SFTP enabled but missing: {", ".join(missing_sftp)}') + + # Check user login and passphrase for protocols that require it + launch_cred = conn.get('launch_credentials', '') + if protocol in login_required and launch_cred: + matched_users = user_by_title.get(launch_cred, []) + for u in matched_users: + if not u.get('login'): + issues.append( + f'User "{launch_cred}" has no login ' + f'(required for {protocol})') + break + # Note: SSH keys without a passphrase are valid and common. + # We don't flag missing passphrase as an issue — the key + # may simply be unencrypted. + + if not issues: + continue + + # Build note text + note_lines = [ + 'INCOMPLETE DATA AT KCM SOURCE', + '=' * 35, + f'Protocol: {protocol}', + f'Original folder: {resource.get("folder_path", "unknown")}', + '', + 'Issues found:', + ] + for i, issue in enumerate(issues, 1): + note_lines.append(f' {i}. {issue}') + note_lines.extend([ + '', + 'This record was imported from KCM (Guacamole) with incomplete', + 'configuration. It was moved to the "Incomplete (KCM Source)"', + 'folder for review. Fix the missing fields above, then move', + 'the record to the appropriate folder.', + ]) + resource['notes'] = '\n'.join(note_lines) + + # Move resource to incomplete folder + resource['folder_path'] = incomplete_res_folder + + # Move matching user(s) too + if launch_cred: + for u in user_by_title.get(launch_cred, []): + u['folder_path'] = incomplete_usr_folder + + @staticmethod + def _print_import_summary(project_name, config_uid, num_resources, + num_users, resources, users, skip_users): + """Print pre-import summary with folder hierarchy for user review.""" + mode = 'Extend existing project' if config_uid else 'New project' + + print() + print('=' * 60) + print('KCM Import Summary') + print('=' * 60) + print() + print(f' Project: {project_name}') + print(f' Mode: {mode}') + print(f' Resources: {num_resources}') + print(f' Users: {num_users}') + print() + + # Collect and display folder hierarchy + folders = set() + for r in resources: + fp = r.get('folder_path', '') + if fp: + folders.add(fp) + for nu in r.get('users', []): + nfp = nu.get('folder_path', '') + if nfp: + folders.add(nfp) + if not skip_users: + for u in users: + fp = u.get('folder_path', '') + if fp: + folders.add(fp) + + if folders: + print(' Folders to create:') + for f in sorted(folders): + # Count records in this folder + count = sum(1 for r in resources if r.get('folder_path') == f) + if not skip_users: + count += sum(1 for u in users if u.get('folder_path') == f) + for r in resources: + count += sum(1 for nu in r.get('users', []) + if nu.get('folder_path') == f) + print(f' {f} ({count} records)') + print() + + print(' This will create vault records that cannot be easily undone.') + print(' Use --dry-run to preview the full JSON first.') + + @staticmethod + def _get_project_assets(params, config_uid): + """Gather project asset UIDs/names for the import report.""" + assets = {} # type: Dict[str, str] + try: + # Get shared folder names + actual_res, actual_usr = \ + PAMProjectKCMImportCommand._discover_shared_folder_names( + params, config_uid) + assets['res_sf_name'] = actual_res or '' + assets['usr_sf_name'] = actual_usr or '' + + # Find SF UIDs + for sf_uid, sf in params.shared_folder_cache.items(): + name = sf.get('name_unencrypted', '') + if name == actual_res: + assets['res_sf_uid'] = sf_uid + elif name == actual_usr: + assets['usr_sf_uid'] = sf_uid + + # Get gateway UID from PAM config's pamResources field + config_rec = vault.KeeperRecord.load(params, config_uid) + if config_rec and hasattr(config_rec, 'fields'): + pam_res = next( + (f for f in config_rec.fields + if f.type == 'pamResources'), None) + if pam_res and pam_res.value: + val = pam_res.value[0] if isinstance( + pam_res.value, list) else pam_res.value + if isinstance(val, dict): + gw_uid = val.get('controllerUid', '') + if gw_uid: + assets['gateway_uid'] = gw_uid + from ..pam import gateway_helper + gateways = gateway_helper.get_all_gateways(params) + for g in gateways: + g_uid = utils.base64_url_encode( + g.controllerUid) + if g_uid == gw_uid: + assets['gateway_name'] = \ + g.controllerName + if g.applicationUid: + assets['app_uid'] = \ + utils.base64_url_encode( + g.applicationUid) + break + except Exception as e: + logging.debug('Could not gather project assets: %s', e) + + return assets + + @staticmethod + def _build_import_report(project_name, config_uid, is_new_project, + assets, num_resources, num_users, created, + expected, total_time, throttler_summary, + warnings, kwargs, import_results=None): + """Build a structured import report for console and vault storage.""" + ts = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + skipped = expected - created + mins = total_time / 60 + + lines = [ + '', + '=' * 60, + f' KCM Import Complete — {project_name}', + '=' * 60, + '', + f' Timestamp: {ts}', + f' Config UID: {config_uid}', + f' Mode: {"New project" if is_new_project else "Extend existing"}', + '', + ' PROJECT ASSETS', + ' ' + '-' * 40, + ] + + gw_name = assets.get('gateway_name', 'Unknown') + gw_uid = assets.get('gateway_uid', '') + gw_label = 'NEW' if is_new_project else 'EXISTING' + lines.append(f' Gateway: {gw_name} ({gw_uid}) [{gw_label}]') + + app_uid = assets.get('app_uid', '') + if app_uid: + lines.append(f' KSM App: {app_uid}') + + res_sf = assets.get('res_sf_name', '') + res_uid = assets.get('res_sf_uid', '') + if res_sf: + lines.append(f' Resources: {res_sf} ({res_uid})') + usr_sf = assets.get('usr_sf_name', '') + usr_uid = assets.get('usr_sf_uid', '') + if usr_sf: + lines.append(f' Users: {usr_sf} ({usr_uid})') + + lines.extend([ + '', + ' IMPORT RESULTS', + ' ' + '-' * 40, + f' Resources: {num_resources}', + f' Users: {num_users}', + f' Created: {created} records', + ]) + if skipped > 0: + lines.append(f' Skipped: {skipped} (missing required fields)') + lines.append(f' Duration: {total_time:.0f}s ({mins:.1f}m)') + + # Per-record detail + if import_results: + ok_items = [r for r in import_results if r['status'] == 'ok'] + skip_items = [r for r in import_results if r['status'] == 'skipped'] + err_items = [r for r in import_results if r['status'] == 'error'] + + if skip_items or err_items: + lines.extend([ + '', + ' FAILED / SKIPPED RECORDS', + ' ' + '-' * 40, + ]) + for r in skip_items: + lines.append( + f' SKIP {r["type"]:20s} {r["name"][:40]}' + f' — {r["reason"][:60]}') + for r in err_items: + lines.append( + f' ERR {r["type"]:20s} {r["name"][:40]}' + f' — {r["reason"][:60]}') + + # Summary counts by type + type_counts = {} + for r in import_results: + key = (r['type'], r['status']) + type_counts[key] = type_counts.get(key, 0) + 1 + if type_counts: + lines.extend([ + '', + ' RECORD BREAKDOWN', + ' ' + '-' * 40, + f' {"Type":<22s} {"OK":>5s} {"Skip":>5s} {"Err":>5s}', + ]) + all_types = sorted(set(r['type'] for r in import_results)) + for rtype in all_types: + ok_n = type_counts.get((rtype, 'ok'), 0) + sk_n = type_counts.get((rtype, 'skipped'), 0) + er_n = type_counts.get((rtype, 'error'), 0) + lines.append( + f' {rtype:<22s} {ok_n:>5d} {sk_n:>5d} {er_n:>5d}') + totals = (len(ok_items), len(skip_items), len(err_items)) + lines.append( + f' {"TOTAL":<22s} {totals[0]:>5d} ' + f'{totals[1]:>5d} {totals[2]:>5d}') + + # Throttle stats + if throttler_summary: + tc = throttler_summary.get('throttle_count', 0) + tb = throttler_summary.get('total_batches', 0) + rtt = throttler_summary.get('base_rtt') + lines.extend([ + '', + ' THROTTLE STATISTICS', + ' ' + '-' * 40, + f' Throttle events: {tc}', + f' Total batches: {tb}', + f' Final params: res=' + f'{throttler_summary.get("final_res_batch", "?")}@' + f'{throttler_summary.get("final_res_delay", 0):.0f}s, ' + f'usr={throttler_summary.get("final_usr_batch", "?")}@' + f'{throttler_summary.get("final_usr_delay", 0):.0f}s', + ]) + if rtt: + lines.append(f' Probe RTT: {rtt:.1f}s') + + # Warnings + if warnings: + lines.extend([ + '', + ' WARNINGS', + ' ' + '-' * 40, + ]) + for w in warnings: + lines.append(f' - {w}') + + # Gateway deployment (new only) + if is_new_project: + gw_token = assets.get('gateway_token', '') + token_display = gw_token if gw_token else '' + lines.extend([ + '', + ' GATEWAY DEPLOYMENT', + ' ' + '-' * 40, + ]) + if gw_token: + lines.append(f' Access Token: {gw_token}') + lines.append('') + else: + lines.append(' (Token not captured — check console output above)') + lines.append('') + lines.extend([ + ' docker run -d --name keeper-gateway \\', + f' -e GATEWAY_CONFIG="{token_display}" \\', + ' -e ACCEPT_EULA=Y \\', + ' --shm-size=2g \\', + ' --restart unless-stopped \\', + ' keeper/gateway:latest', + ]) + + # Post-import instructions + lines.extend([ + '', + ' WHAT TO DO NEXT', + ' ' + '-' * 40, + '', + ' 1. DEPLOY THE GATEWAY (if new project)', + ' Run the docker command on a host that can reach your managed machines.', + ' Best practice: deploy on the same host as KCM, or any server with', + ' network access to the target machines. If KCM runs in Docker, join', + ' the same Docker network (--network ).', + ' Copy the "Deploy Gateway" field from this record and run it.', + '', + ' 2. VERIFY GATEWAY IS ONLINE', + ' Run: pam gateway list', + ' The gateway should appear as "connected" within 60 seconds.', + '', + ' 3. ADD MISSING CREDENTIALS', + ]) + # Check for KSM token warnings + ksm_affected = [w for w in warnings if 'KSM dynamic token' in w] + missing_pw = [w for w in warnings if 'missing password' in w] + if ksm_affected or missing_pw: + lines.append(' Some records were imported without credentials:') + if ksm_affected: + lines.append(' - KSM token connections: see WARNINGS below for affected records.') + lines.append(' These had ${KEEPER_*} tokens in KCM. Add passwords manually') + lines.append(' or share the KSM app folder with your account and re-import.') + if missing_pw: + lines.append(' - Records with no password in KCM source: add credentials in vault.') + else: + lines.append(' All credentials imported successfully.') + lines.extend([ + '', + ' 4. TEST A CONNECTION', + ' Open a PAM connection in the vault to verify it works.', + '', + ' 5. CLEAN UP (when done testing)', + f' Dry run: pam project kcm-cleanup --name "{project_name}" --dry-run', + f' Delete: pam project kcm-cleanup --name "{project_name}" --yes', + '', + ' 6. ADD NEW CONNECTIONS LATER', + f' Extend: pam project kcm-import --config "{config_uid}" --docker-detect --yes', + ' This adds only NEW connections (existing records are skipped).', + ]) + + # Command used (redacted) + redacted = PAMProjectKCMImportCommand._build_redacted_command(kwargs) + lines.extend([ + '', + ' COMMAND USED (redacted)', + ' ' + '-' * 40, + f' {redacted}', + '', + '=' * 60, + '', + ]) + + return '\n'.join(lines) + + @staticmethod + def _build_redacted_command(kwargs): + """Reconstruct the CLI command from kwargs, redacting secrets.""" + parts = ['pam project kcm-import'] + flag_map = { + 'db_host': '--db-host', + 'db_port': '--db-port', + 'db_name': '--db-name', + 'db_type': '--db-type', + 'db_user': '--db-user', + 'project_name': '--name', + 'gateway': '--gateway', + 'docker_container': '--docker-container', + 'folder_mode': '--folder-mode', + 'config': '--config', + 'batch_size': '--batch-size', + 'batch_delay': '--batch-delay', + 'max_instances': '--max-instances', + 'include_groups': '--groups', + 'exclude_groups': '--exclude-groups', + } + bool_flags = { + 'docker_detect': '--docker-detect', + 'dry_run': '--dry-run', + 'skip_users': '--skip-users', + 'include_disabled': '--include-disabled', + 'include_credentials': '--include-credentials', + 'auto_confirm': '--yes', + 'estimate': '--estimate', + 'list_groups': '--list-groups', + 'db_ssl': '--db-ssl', + 'allow_cleartext': '--allow-cleartext', + } + for kwarg, flag in flag_map.items(): + val = kwargs.get(kwarg) + if val is not None and val != '': + parts.append(f'{flag} "{val}"') + for kwarg, flag in bool_flags.items(): + if kwargs.get(kwarg): + parts.append(flag) + if kwargs.get('db_password_record'): + parts.append('--db-password-record "[REDACTED]"') + return ' '.join(parts) + + @staticmethod + def _create_summary_record(params, res_folder_name, report_text, + project_name='', assets=None, kwargs=None, + import_results=None, warnings=None): + """Create an import report record with copyable fields and + the full report as a file attachment. + + Placed at the project's top-level folder (alongside Resources/Users). + Key values (gateway token, config UID, docker command) are stored as + separate custom fields so users can copy them directly from the vault UI. + """ + from ..record_edit import RecordAddCommand + from ... import attachment as att + + if assets is None: + assets = {} + if kwargs is None: + kwargs = {} + if import_results is None: + import_results = [] + + # --- Find the project folder --- + # Sync vault to pick up folders created during import + from ... import api as keeper_api + try: + keeper_api.sync_down(params) + except Exception: + pass + + # folder_cache contains BaseFolderNode objects with .name and .parent_uid + target_folder_uid = None + + # Strategy 1: Find the project folder by name in folder_cache + if project_name: + for folder_uid, folder in params.folder_cache.items(): + if getattr(folder, 'name', '') == project_name: + target_folder_uid = folder_uid + break + + # Strategy 2: Walk up from the Resources shared folder + if not target_folder_uid: + for folder_uid, folder in params.folder_cache.items(): + if getattr(folder, 'name', '') == res_folder_name: + parent = getattr(folder, 'parent_uid', '') + if parent: + target_folder_uid = parent + break + + # Strategy 3: Search PAM Environments root + if not target_folder_uid and project_name: + pam_root_uid = None + for folder_uid, folder in params.folder_cache.items(): + if getattr(folder, 'name', '') == 'PAM Environments': + pam_root_uid = folder_uid + break + if pam_root_uid: + for folder_uid, folder in params.folder_cache.items(): + if (getattr(folder, 'name', '') == project_name + and getattr(folder, 'parent_uid', '') == pam_root_uid): + target_folder_uid = folder_uid + break + + if not target_folder_uid: + logging.warning('Could not find project folder "%s" for report record', + project_name) + return + + # --- Build record with copyable fields --- + ts = datetime.datetime.now().strftime('%Y%m%d-%H%M%S') + title = f'KCM Import Report - {ts}' + + # Custom fields for easy copy from vault UI + gw_token = assets.get('gateway_token', '') + config_uid = assets.get('config_uid', '') + gw_name = assets.get('gateway_name', '') + gw_uid = assets.get('gateway_uid', '') + app_uid = assets.get('app_uid', '') + res_sf = assets.get('res_sf_name', '') + res_sf_uid = assets.get('res_sf_uid', '') + usr_sf = assets.get('usr_sf_name', '') + usr_sf_uid = assets.get('usr_sf_uid', '') + + docker_cmd = ( + 'docker run -d --name keeper-gateway ' + '-e GATEWAY_CONFIG="" ' + '-e ACCEPT_EULA=Y --shm-size=2g ' + '--restart unless-stopped keeper/gateway:latest' + ) + + fields = [] + + # --- Gateway & deploy --- + if gw_token: + fields.append( + f'c.password.Gateway Token — paste into GATEWAY_CONFIG={gw_token}') + deploy_cmd = ( + f'docker run -d --name keeper-gateway ' + f'-e GATEWAY_CONFIG="{gw_token}" ' + f'-e ACCEPT_EULA=Y --shm-size=2g ' + f'--restart unless-stopped keeper/gateway:latest' + ) + fields.append( + f'c.password.Deploy Gateway — copy & run in terminal={deploy_cmd}') + else: + fields.append( + f'c.text.Deploy Gateway — copy & run in terminal={docker_cmd}') + + # --- Project info --- + if project_name: + fields.append(f'c.text.Project Name={project_name}') + if config_uid: + fields.append( + f'c.text.PAM Config UID — use with --config flag={config_uid}') + + # --- Action commands --- + fields.append( + 'c.text.Verify Gateway — run after deploy=pam gateway list') + + if config_uid: + extend_parts = [f'pam project kcm-import --config "{config_uid}"'] + if kwargs.get('docker_detect'): + extend_parts.append('--docker-detect') + elif kwargs.get('db_host'): + extend_parts.append(f'--db-host "{kwargs["db_host"]}"') + if kwargs.get('db_type'): + extend_parts.append(f'--db-type {kwargs["db_type"]}') + if kwargs.get('include_groups'): + extend_parts.append(f'--groups "{kwargs["include_groups"]}"') + extend_parts.append('--yes') + fields.append( + f'c.text.Add New Connections — re-scan KCM, skip existing=' + f'{" ".join(extend_parts)}') + + if project_name: + fields.append( + f'c.text.Preview Cleanup — shows what will be deleted=' + f'pam project kcm-cleanup --name "{project_name}" --dry-run') + fields.append( + f'c.text.Delete Project — destructive, removes all imported records=' + f'pam project kcm-cleanup --name "{project_name}" --yes') + + redacted = PAMProjectKCMImportCommand._build_redacted_command(kwargs) + fields.append( + f'c.text.Original Import Command — what was used for this import=' + f'{redacted}') + + reimport_parts = [redacted.replace('--dry-run', '').strip()] + if '--yes' not in reimport_parts[0]: + reimport_parts.append('--yes') + fields.append( + f'c.text.Re-import Everything — fresh run, same settings=' + f'{" ".join(reimport_parts)}') + + # --- Source & stats --- + db_source_parts = [] + if kwargs.get('docker_detect'): + db_source_parts.append('Docker auto-detect') + if kwargs.get('docker_container'): + db_source_parts.append(f'container={kwargs["docker_container"]}') + if kwargs.get('db_host'): + db_source_parts.append(f'host={kwargs["db_host"]}') + if kwargs.get('db_type'): + db_source_parts.append(f'type={kwargs["db_type"]}') + if kwargs.get('db_name'): + db_source_parts.append(f'db={kwargs["db_name"]}') + if kwargs.get('db_port'): + db_source_parts.append(f'port={kwargs["db_port"]}') + if db_source_parts: + fields.append( + f'c.text.KCM Database Source={", ".join(db_source_parts)}') + + if import_results: + ok = sum(1 for r in import_results if r['status'] == 'ok') + skipped = sum(1 for r in import_results if r['status'] == 'skipped') + errors = sum(1 for r in import_results if r['status'] == 'error') + fields.append( + f'c.text.Import Results=Created: {ok}, Skipped: {skipped}, Errors: {errors}') + + problem_items = [r for r in import_results if r['status'] != 'ok'] + if problem_items: + detail_lines = [f"{r['name']} ({r['status']}: {r.get('reason', '')})" + for r in problem_items[:15]] + if len(problem_items) > 15: + detail_lines.append(f'(+{len(problem_items)-15} more)') + fields.append( + f'c.text.Failed Records={"; ".join(detail_lines)}') + + # --- KSM affected connections --- + ksm_unresolved = [w for w in (warnings or []) + if 'need credentials added' in w] + ksm_resolved = [w for w in (warnings or []) + if 'matched existing vault records' in w] + if ksm_unresolved: + fields.append( + f'c.text.Needs Credentials — had KSM tokens, no vault match=' + f'{ksm_unresolved[0]}') + if ksm_resolved: + fields.append( + f'c.text.KSM Resolved — matched existing vault records=' + f'{ksm_resolved[0]}') + + # --- Step-by-step action plan (each step is a separate field) --- + step = 1 + if gw_token: + fields.append( + f'c.text.Step {step}: Deploy Gateway — run the "Deploy Gateway" command above=' + f'Copy the "Deploy Gateway" field and run it in your terminal. ' + f'The gateway should appear as "connected" within 60 seconds.') + step += 1 + + fields.append( + f'c.text.Step {step}: Verify Gateway is online=' + f'pam gateway list') + step += 1 + + # Check if there are KSM or missing credential issues + has_cred_issues = bool(ksm_unresolved) or any( + 'missing password' in w for w in (warnings or [])) + if has_cred_issues: + cred_note = 'Open records in the "Needs KSM Credentials" subfolder and add passwords.' + if not ksm_unresolved: + cred_note = 'Some records were imported without passwords — add credentials in vault.' + fields.append( + f'c.text.Step {step}: Add missing credentials={cred_note}') + step += 1 + + fields.append( + f'c.text.Step {step}: Test a connection=' + f'Open a PAM connection in the vault to verify it works.') + step += 1 + + if project_name: + fields.append( + f'c.text.Step {step}: Clean up when done testing=' + f'pam project kcm-cleanup --name "{project_name}" --dry-run') + step += 1 + + # --- Asset reference fields --- + if gw_name: + fields.append(f'c.text.Gateway Name={gw_name}') + if gw_uid: + fields.append(f'c.text.Gateway UID={gw_uid}') + if app_uid: + fields.append(f'c.text.KSM App UID={app_uid}') + if res_sf: + fields.append(f'c.text.Resources Folder={res_sf}') + if res_sf_uid: + fields.append(f'c.text.Resources Folder UID={res_sf_uid}') + if usr_sf: + fields.append(f'c.text.Users Folder={usr_sf}') + if usr_sf_uid: + fields.append(f'c.text.Users Folder UID={usr_sf_uid}') + + try: + # Strip gateway token from notes — it's in a dedicated custom + # field and shouldn't be duplicated in the notes text. + notes_text = report_text + if gw_token: + field_ref = '(see "Gateway Access Token" field on this record)' + notes_text = notes_text.replace(gw_token, field_ref) + + args = { + 'force': True, + 'folder': target_folder_uid, + 'record_type': 'encryptedNotes', + 'title': title, + 'notes': notes_text, + } + if fields: + args['fields'] = fields + uid = RecordAddCommand().execute(params, **args) + if not uid: + logging.warning('Could not create summary record') + return + + logging.warning(f'{bcolors.OKGREEN}Import report saved to record:{bcolors.ENDC} %s (%s)', + title, uid) + + # Attach the full report as a file + try: + record = vault.KeeperRecord.load(params, uid) + if record: + tmp_fd, tmp_path = tempfile.mkstemp( + suffix='.md', prefix='KCM-Import-Report-') + try: + with os.fdopen(tmp_fd, 'w') as f: + f.write(notes_text) + upload_task = att.FileUploadTask(tmp_path) + upload_task.name = 'KCM-Import-Report.md' + upload_task.title = 'KCM Import Report' + upload_task.mime_type = 'text/markdown' + att.upload_attachments(params, record, [upload_task]) + logging.warning(f'{bcolors.OKGREEN}Report file attached:{bcolors.ENDC} %s', + upload_task.name) + finally: + if os.path.exists(tmp_path): + os.unlink(tmp_path) + except Exception as e: + logging.debug('Could not attach report file: %s', e) + + except Exception as e: + logging.warning('Could not create summary record: %s', e) + + @staticmethod + def _resolve_gateway(params, gateway_arg): + """Interactive gateway selection. Returns PAM config UID or None. + + Flow: + --gateway → find matching gateway, find its config, use extend mode + (no flag, interactive) → list gateways, let user choose or create new + 'new' choice → return None (import engine creates new gateway) + """ + from ..pam import gateway_helper + from ..pam.router_helper import router_get_connected_gateways + + gateways = gateway_helper.get_all_gateways(params) + + # Determine online status by cross-referencing with router + online_uids = set() + try: + connected = router_get_connected_gateways(params) + if connected and connected.controllers: + online_uids = {c.controllerUid for c in connected.controllers} + except Exception: + logging.debug('Could not reach router to check online gateways') + + online = [g for g in gateways if g.controllerUid in online_uids] + + # If --gateway flag provided, find it directly + if gateway_arg: + match = None + for g in gateways: + uid_str = utils.base64_url_encode(g.controllerUid) + if uid_str == gateway_arg or g.controllerName == gateway_arg: + match = g + break + if not match: + raise CommandError('kcm-import', + f'Gateway "{gateway_arg}" not found. Use --dry-run to preview without a gateway.') + if match.controllerUid not in online_uids: + logging.warning('Gateway "%s" is OFFLINE — connections will not work until it is started.', + match.controllerName) + return PAMProjectKCMImportCommand._find_config_for_gateway(params, match) + + # Interactive: show options + print('\nGateway Selection') + print('─' * 50) + if online: + print(f' Found {len(online)} online gateway(s):\n') + for i, g in enumerate(online, 1): + uid_str = utils.base64_url_encode(g.controllerUid) + print(f' [{i}] {g.controllerName} ({uid_str})') + print(f'\n [N] Create a new gateway') + print() + choice = input(' Select gateway [N]: ').strip() + if choice and choice.upper() != 'N': + try: + idx = int(choice) - 1 + if 0 <= idx < len(online): + selected = online[idx] + logging.info('Using existing gateway: %s', selected.controllerName) + return PAMProjectKCMImportCommand._find_config_for_gateway(params, selected) + except (ValueError, IndexError): + pass + logging.warning('Invalid selection — creating new gateway.') + else: + print(' No online gateways found.') + print(' A new gateway will be created. Deploy it with the token shown after import.\n') + + # Return None = import engine creates new gateway + return None + + @staticmethod + def _find_config_for_gateway(params, gateway): + """Find the PAM config UID associated with a gateway.""" + from ..pam.config_helper import configuration_controller_get + + gateway_uid_bytes = gateway.controllerUid + + # Search all PAM configs to find one linked to this gateway + all_records = params.record_cache.values() + for rec in all_records: + if rec.get('version') != 6: + continue + try: + rec_uid = rec.get('record_uid', '') + if not rec_uid: + continue + controller = configuration_controller_get( + params, utils.base64_url_decode(rec_uid)) + if controller and controller.controllerUid == gateway_uid_bytes: + logging.info('Found PAM config "%s" for gateway "%s"', + rec_uid, gateway.controllerName) + return rec_uid + except Exception as e: + logging.debug('Skipping record %s: %s', rec_uid, e) + continue + + raise CommandError('kcm-import', + f'No PAM configuration found for gateway "{gateway.controllerName}". ' + f'Create one first with: pam config create') + + @staticmethod + def _create_project_skeleton(params, project_name, pam_json): + """Phase 1: Create PAM project skeleton (folders, KSM app, gateway, + PAM config) without records. + + Returns (config_uid, gateway_token) where gateway_token is the + access_token printed by edit.py during gateway creation, or '' + if it could not be captured. + """ + from .edit import PAMProjectImportCommand + from ... import api as keeper_api + + skeleton_json = copy.deepcopy(pam_json) + skeleton_json['pam_data']['resources'] = [] + skeleton_json['pam_data']['users'] = [] + skeleton_json['project'] = project_name + + tmp_fd, tmp_path = tempfile.mkstemp(suffix='.json') + captured_output = '' + try: + with os.fdopen(tmp_fd, 'w') as tmp: + json.dump(skeleton_json, tmp, indent=2) + + # Capture stdout to extract the gateway access_token + # edit.py prints JSON with {"access_token": "..."} at the end + stdout_trap = io.StringIO() + with _STDOUT_LOCK: + original_stdout = sys.stdout + sys.stdout = stdout_trap + try: + cmd = PAMProjectImportCommand() + cmd.execute(params, + project_name=project_name, + file_name=tmp_path, + dry_run=False) + finally: + sys.stdout = original_stdout + captured_output = stdout_trap.getvalue() + + # Echo the captured output so user still sees it + if captured_output: + try: + print(captured_output, end='') + except (BrokenPipeError, OSError): + pass + + finally: + if os.path.exists(tmp_path): + os.unlink(tmp_path) + + # Extract gateway token from captured JSON output + gateway_token = '' + try: + # edit.py prints a JSON blob with "access_token" key + # Find the JSON object in the output + for line in captured_output.splitlines(): + stripped = line.strip() + if stripped.startswith('{'): + try: + parsed = json.loads(stripped) + if 'access_token' in parsed: + gateway_token = parsed['access_token'] + break + except json.JSONDecodeError: + continue + # If single-line parse failed, try parsing the full output + # (edit.py uses json.dumps with indent=2, so it's multi-line) + if not gateway_token: + match = re.search( + r'\{[^{}]*"access_token"\s*:\s*"([^"]*)"[^{}]*\}', + captured_output, re.DOTALL) + if match: + gateway_token = match.group(1) + except Exception as e: + logging.debug('Could not parse gateway token from output: %s', e) + + # Find the PAM config just created + keeper_api.sync_down(params) + config_name = f'{project_name} Configuration' + from ... import vault_extensions + for cfg in vault_extensions.find_records(params, record_version=6): + if cfg.title == config_name: + return cfg.record_uid, gateway_token + + raise CommandError('kcm-import', + f'Failed to find PAM config "{config_name}" after skeleton creation') + + @staticmethod + def _rewrite_folder_paths(pam_json, actual_res_name, actual_usr_name, project_name): + """Rewrite folder_path roots to match actual shared folder names. + + Needed when edit.py adds a #N dedup suffix to the project folder, + making the actual shared folder names differ from the computed ones. + """ + old_res = f'{project_name} - Resources' + old_usr = f'{project_name} - Users' + if old_res == actual_res_name and old_usr == actual_usr_name: + return + + pam_data = pam_json.get('pam_data', {}) + for item in pam_data.get('resources', []): + fp = item.get('folder_path', '') + if fp == old_res or fp.startswith(old_res + '/'): + item['folder_path'] = actual_res_name + fp[len(old_res):] + # Also rewrite nested users inside this resource + for nested in item.get('users', []): + nfp = nested.get('folder_path', '') + if nfp == old_usr or nfp.startswith(old_usr + '/'): + nested['folder_path'] = actual_usr_name + nfp[len(old_usr):] + for item in pam_data.get('users', []): + fp = item.get('folder_path', '') + if fp == old_usr or fp.startswith(old_usr + '/'): + item['folder_path'] = actual_usr_name + fp[len(old_usr):] + + @staticmethod + def _discover_shared_folder_names(params, config_uid): + """Discover shared folder names from an existing PAM config's KSM app. + + Returns (resources_folder_name, users_folder_name) or (None, None). + """ + from ..pam.config_helper import configuration_controller_get + from ..pam import gateway_helper + from ...loginv3 import CommonHelperMethods + from ... import api as keeper_api + + keeper_api.sync_down(params) + configuration = vault.KeeperRecord.load(params, config_uid) + if not configuration: + return None, None + + try: + controller = configuration_controller_get( + params, CommonHelperMethods.url_safe_str_to_bytes( + configuration.record_uid)) + except Exception as e: + logging.debug('Could not resolve controller for config %s: %s', + config_uid, e) + return None, None + if not controller or not controller.controllerUid: + return None, None + + all_gateways = gateway_helper.get_all_gateways(params) + found = [g for g in all_gateways + if g.controllerUid == controller.controllerUid] + if not found: + return None, None + + # Import extend only when needed — it pulls in pydantic which may + # not be available in all CI environments. + from .extend import PAMProjectExtendCommand + ksmapp_uid = utils.base64_url_encode(found[0].applicationUid) + cmd = PAMProjectExtendCommand() + ksm_shared_folders = cmd.get_app_shared_folders(params, ksmapp_uid) + + res_name = usr_name = None + for shf in ksm_shared_folders: + name = shf.get('name', '') + if name.endswith('- Resources'): + res_name = name + elif name.endswith('- Users'): + usr_name = name + return res_name, usr_name + + def _resolve_db_password(self, params, kwargs): + record_ref = kwargs.get('db_password_record') or '' + if record_ref: + pw = self._extract_password_from_record(params, record_ref) + if pw: + return pw + raise CommandError('kcm-import', + f'Record "{record_ref}" not found or has no password field') + + # Interactive: search vault for candidate records + if not getattr(params, 'batch_mode', False): + candidates = self._search_vault_for_db_records(params) + if candidates: + print('\nDatabase Password') + print('─' * 50) + print(' Found vault records that may contain the DB password:\n') + for i, (rec, title) in enumerate(candidates, 1): + uid = rec.record_uid if hasattr(rec, 'record_uid') else '?' + print(f' [{i}] {title} ({uid})') + print(f'\n [M] Enter password manually') + print() + choice = input(' Select [M]: ').strip() + if choice and choice.upper() != 'M': + try: + idx = int(choice) - 1 + if 0 <= idx < len(candidates): + rec, _ = candidates[idx] + pw = self._extract_password_from_record( + params, rec.record_uid) + if pw: + return pw + logging.warning('Selected record has no password.') + except (ValueError, IndexError): + logging.warning('Invalid selection.') + + return getpass.getpass('KCM Database Password: ') + + @staticmethod + def _extract_password_from_record(params, record_ref): + """Load a vault record by UID or title and extract its password.""" + record = vault.KeeperRecord.load(params, record_ref) + if not record: + return None + if hasattr(record, 'password') and record.password: + return record.password + if hasattr(record, 'get_typed_field'): + field = record.get_typed_field('password') + if field and field.value: + val = field.value + if isinstance(val, list) and val: + return str(val[0]) + if isinstance(val, str) and val: + return val + return None + + @staticmethod + def _search_vault_for_db_records(params): + """Search vault for records that might contain a DB password.""" + search_terms = ['guacamole', 'kcm', 'database password', 'db password'] + candidates = [] + seen_uids = set() + for uid in params.record_cache: + rec = vault.KeeperRecord.load(params, uid) + if not rec or uid in seen_uids: + continue + title = rec.title if hasattr(rec, 'title') else '' + title_lower = title.lower() + if not any(t in title_lower for t in search_terms): + continue + # Check if record has a password field + has_pw = False + if hasattr(rec, 'password') and rec.password: + has_pw = True + elif hasattr(rec, 'get_typed_field'): + field = rec.get_typed_field('password') + if field and field.value: + has_pw = True + if has_pw: + seen_uids.add(uid) + candidates.append((rec, title)) + return candidates + + @staticmethod + def _is_local_host(host): + """Check if host is a local/private address (no SSL warning needed). + + Only trusts literal IP addresses and 'localhost'. Does NOT resolve + hostnames via DNS to prevent TOCTOU / SSRF bypass of SSL enforcement. + """ + if not host: + return False + if host in ('localhost', '127.0.0.1', '::1'): + return True + try: + addr = ipaddress.ip_address(host) + except ValueError: + # Hostname, not a literal IP — treat as remote (require SSL) + return False + if addr.is_loopback: + return True + return any(addr in net for net in PAMProjectKCMImportCommand._PRIVATE_NETS) + + @staticmethod + def _discover_docker_container(): + """Scan running Docker containers for one with Guacamole DB env vars. + + Returns the container name, or raises CommandError if none found. + """ + try: + result = subprocess.run( + ['docker', 'ps', '--format', '{{.Names}}'], + capture_output=True, text=True, timeout=10 + ) + except (subprocess.TimeoutExpired, FileNotFoundError) as e: + raise CommandError('kcm-import', + f'Docker not available: {type(e).__name__}. ' + f'Use --docker-container to specify manually.') + + if result.returncode != 0: + raise CommandError('kcm-import', + 'Could not list Docker containers. Is Docker running?') + + containers = [c.strip() for c in result.stdout.strip().splitlines() + if c.strip()] + if not containers: + raise CommandError('kcm-import', + 'No running Docker containers found.') + + db_env_vars = { + 'GUACAMOLE_PASSWORD', 'MYSQL_PASSWORD', + 'POSTGRESQL_PASSWORD', 'POSTGRES_PASSWORD' + } + candidates = [] + + for name in containers: + try: + result = subprocess.run( + ['docker', 'inspect', '--format', + '{{range .Config.Env}}{{println .}}{{end}}', '--', name], + capture_output=True, text=True, timeout=5 + ) + except (subprocess.TimeoutExpired, FileNotFoundError): + continue + if result.returncode != 0: + continue + env_keys = {line.split('=', 1)[0] + for line in result.stdout.strip().splitlines() + if '=' in line} + result = None # clear raw output + if env_keys & db_env_vars: + candidates.append(name) + + if not candidates: + raise CommandError('kcm-import', + 'No running containers with Guacamole/KCM database credentials ' + 'found. Use --docker-container to specify the container name.') + + if len(candidates) == 1: + logging.info('Auto-discovered Docker container: %s', candidates[0]) + return candidates[0] + + # Multiple candidates — apply tiered filtering to find the KCM DB + # Tier 1: KCM-specific containers (name contains 'kcm' and 'db') + kcm_db = [c for c in candidates + if 'kcm' in c.lower() and 'db' in c.lower()] + if len(kcm_db) == 1: + logging.info('Auto-discovered KCM DB container: %s ' + '(selected from %d candidates)', kcm_db[0], len(candidates)) + return kcm_db[0] + # Tier 2: container with 'guacamole' AND 'db' in name + guac_db = [c for c in candidates + if 'guacamole' in c.lower() and 'db' in c.lower()] + if len(guac_db) == 1: + logging.info('Auto-discovered Docker container: %s ' + '(selected from %d candidates)', guac_db[0], len(candidates)) + return guac_db[0] + # Tier 3: container with 'guacamole' in name + guac = [c for c in candidates if 'guacamole' in c.lower()] + if len(guac) == 1: + logging.info('Auto-discovered Docker container: %s ' + '(selected from %d candidates)', guac[0], len(candidates)) + return guac[0] + + # Interactive selection when multiple candidates remain + print('\nMultiple Docker containers with database credentials found:\n') + for i, name in enumerate(candidates, 1): + print(f' [{i}] {name}') + print() + try: + choice = input(f' Select container [1-{len(candidates)}]: ').strip() + idx = int(choice) - 1 + if 0 <= idx < len(candidates): + logging.info('User selected Docker container: %s', + candidates[idx]) + return candidates[idx] + except (ValueError, IndexError, EOFError): + pass + + names_str = ', '.join(candidates) + raise CommandError('kcm-import', + f'Could not auto-select container. Found {len(candidates)}: ' + f'{names_str}. Use --docker-container to specify one.') + + @staticmethod + def _detect_db_type_from_docker(container): + """Detect database type from Docker container environment variables. + + Returns 'mysql' or 'postgresql'. + """ + try: + result = subprocess.run( + ['docker', 'inspect', '--format', + '{{range .Config.Env}}{{println .}}{{end}}', '--', container], + capture_output=True, text=True, timeout=10 + ) + except (subprocess.TimeoutExpired, FileNotFoundError): + return 'mysql' + + if result.returncode != 0: + return 'mysql' + + env_keys = {line.split('=', 1)[0] + for line in result.stdout.strip().splitlines() + if '=' in line} + result = None # clear raw output + + has_pg = any(k.startswith(('POSTGRES_', 'POSTGRESQL_')) + for k in env_keys) + has_mysql = any(k.startswith('MYSQL_') for k in env_keys) + + if has_pg and not has_mysql: + return 'postgresql' + if has_mysql and not has_pg: + return 'mysql' + if has_pg and has_mysql: + return 'postgresql' # KCM standard + + # Check GUACAMOLE_* hints + for k in env_keys: + if 'POSTGRES' in k: + return 'postgresql' + if 'MYSQL' in k: + return 'mysql' + + return 'mysql' # fallback + + @staticmethod + def _detect_docker_credentials(db_type, container='guacamole'): + # Env var prefixes vary across deployments: + # KCM docker-compose: GUACAMOLE_* + # Vanilla Guacamole: MYSQL_* / POSTGRESQL_* + # Some images: POSTGRES_* (short form) + if db_type == 'mysql': + db_prefixes = ['MYSQL'] + else: + db_prefixes = ['POSTGRESQL', 'POSTGRES'] + default_port = 3306 if db_type == 'mysql' else 5432 + + # Single docker inspect call, parse all env vars at once + try: + result = subprocess.run( + ['docker', 'inspect', '--format', + '{{range .Config.Env}}{{println .}}{{end}}', '--', container], + capture_output=True, text=True, timeout=10 + ) + except (subprocess.TimeoutExpired, FileNotFoundError) as e: + raise CommandError('kcm-import', + f'Docker inspect failed: {type(e).__name__}') + + if result.returncode != 0: + logging.debug('Docker stderr: %s', result.stderr.strip()) + raise CommandError('kcm-import', + f'Docker inspect failed for container "{container}" (exit code {result.returncode})') + + env_vars = {} + for line in result.stdout.strip().splitlines(): + if '=' in line: + k, v = line.split('=', 1) + env_vars[k] = v + # Clear raw docker output — contains all container env vars including secrets + result = None + + def _env(field, default=''): + val = env_vars.get(f'GUACAMOLE_{field}') + if not val: + for prefix in db_prefixes: + val = env_vars.get(f'{prefix}_{field}') + if val: + break + return val or default + + try: + password = _env('PASSWORD') + if not password: + prefix_list = ' or '.join( + f'{p}_PASSWORD' for p in ['GUACAMOLE'] + db_prefixes) + raise CommandError('kcm-import', + f'Could not detect database password from Docker container "{container}". ' + f'Expected {prefix_list} in container env.') + + host = _env('HOSTNAME', '') + user = _env('USER') or _env('USERNAME', 'guacamole_user') + database = _env('DATABASE', 'guacamole_db') + port_str = _env('PORT') + try: + port = int(port_str) if port_str else default_port + except ValueError: + raise CommandError('kcm-import', + f'Invalid port value from Docker: {port_str}') + + # When the container IS the database (no hostname env var), + # get its own IP directly from Docker networking + if not host or host in ('127.0.0.1', 'localhost'): + container_ip = PAMProjectKCMImportCommand._get_container_ip( + container) + if container_ip: + host = container_ip + logging.info('Using container IP: %s (from %s)', + host, container) + else: + host = host or '127.0.0.1' + + # Resolve Docker service hostnames (e.g. "db") to container IPs + if host: + host = PAMProjectKCMImportCommand._resolve_docker_host( + host, container) + + logging.info('Docker auto-detected: host=%s, port=%d, db=%s', + host, port, database) + # Return connection info separate from password so static analysis + # can distinguish sensitive from non-sensitive values + return (host, port, database, user), password + finally: + env_vars.clear() + + @staticmethod + def _get_container_ip(container): + """Get a Docker container's IP address from its network settings. + + Returns the first non-empty IP, or '' if unavailable. + """ + try: + result = subprocess.run( + ['docker', 'inspect', '--format', + '{{range .NetworkSettings.Networks}}{{.IPAddress}} {{end}}', + '--', container], + capture_output=True, text=True, timeout=10 + ) + if result.returncode == 0: + ips = [ip.strip() for ip in result.stdout.strip().split() + if ip.strip()] + if ips: + return ips[0] + except (subprocess.TimeoutExpired, FileNotFoundError): + pass + return '' + + @staticmethod + def _resolve_docker_host(host, source_container): + """Resolve a Docker service hostname to a reachable container IP. + + When the detected hostname is a Docker service name (e.g. 'db'), + it is only resolvable inside the Docker network. This method + finds the actual container IP by inspecting containers on the + same Docker network(s) as the source container. + """ + # If already an IP address or localhost, return as-is + if host in ('localhost', '127.0.0.1', '::1'): + return host + try: + ipaddress.ip_address(host) + return host + except ValueError: + pass + + # hostname is a service name — resolve via Docker inspect + try: + # Get networks the source container is on + result = subprocess.run( + ['docker', 'inspect', '--format', + '{{range $net, $conf := .NetworkSettings.Networks}}' + '{{$net}} {{end}}', '--', source_container], + capture_output=True, text=True, timeout=10 + ) + if result.returncode != 0: + logging.debug('Could not inspect networks for %s', source_container) + return host + networks = result.stdout.strip().split() + + # Search for a container whose name/alias matches the hostname + for network in networks: + result = subprocess.run( + ['docker', 'network', 'inspect', '--format', + '{{range .Containers}}{{.Name}}|{{.IPv4Address}}\n{{end}}', + network], + capture_output=True, text=True, timeout=10 + ) + if result.returncode != 0: + continue + for line in result.stdout.strip().splitlines(): + if '|' not in line: + continue + name, ip_cidr = line.split('|', 1) + # Match: exact name, or host appears as a segment + # (e.g., "postgres" matches "project-postgres-1") + name_lower = name.lower() + host_lower = host.lower() + segments = name_lower.replace('_', '-').split('-') + if (host_lower == name_lower or host_lower in segments) \ + and '/' in ip_cidr: + resolved_ip = ip_cidr.split('/')[0] + logging.info('Resolved Docker hostname "%s" to %s ' + '(container: %s)', host, resolved_ip, name) + return resolved_ip + + logging.warning('Could not resolve Docker hostname "%s" — ' + 'using as-is. If connection fails, use --db-host ' + 'with the container IP.', host) + except (subprocess.TimeoutExpired, FileNotFoundError): + logging.debug('Docker command failed during hostname resolution') + return host + + @staticmethod + def _redact_for_display(pam_json): + """Deep-copy JSON and replace password values with [REDACTED].""" + redacted = copy.deepcopy(pam_json) + sensitive_keys = {'password', 'private_pem_key', 'private_key', 'otp', + '_totp_parts'} + def _walk(obj): + if isinstance(obj, dict): + for k, v in obj.items(): + if k in sensitive_keys and v: + obj[k] = '[REDACTED]' + else: + _walk(v) + elif isinstance(obj, list): + for item in obj: + _walk(item) + _walk(redacted) + return redacted + + +class PAMProjectKCMCleanupCommand(Command): + """Remove a KCM-imported project: shared folders, records, gateway, KSM app. + + Usage: + pam project kcm-cleanup --name "KCM-Import-20260404-203552" + pam project kcm-cleanup --config + """ + + parser = argparse.ArgumentParser( + prog='pam project kcm-cleanup', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=''' +Examples: + # Preview what would be deleted + pam project kcm-cleanup --name "KCM Migration" --dry-run + + # Delete by project name + pam project kcm-cleanup --name "KCM Migration" --yes + + # Delete by PAM config UID + pam project kcm-cleanup --config VxANFEPLi8E9gdtlDmfBvw --yes + ''') + parser.add_argument('--name', '-n', dest='project_name', action='store', + help='Project name (matches PAM config title prefix)') + parser.add_argument('--config', '-c', dest='config_uid', action='store', + help='PAM config record UID') + parser.add_argument('--dry-run', '-d', dest='dry_run', action='store_true', + default=False, help='Show what would be deleted') + parser.add_argument('--yes', '-y', dest='auto_confirm', action='store_true', + default=False, help='Skip confirmation prompt') + + def get_parser(self): + return PAMProjectKCMCleanupCommand.parser + + def execute(self, params, **kwargs): + project_name = kwargs.get('project_name') or '' + config_uid = kwargs.get('config_uid') or '' + dry_run = kwargs.get('dry_run', False) + auto_confirm = kwargs.get('auto_confirm', False) + + if not project_name and not config_uid: + raise CommandError('kcm-cleanup', + 'Either --name or --config is required') + + from ... import api as keeper_api + from ..pam import gateway_helper + from ..pam.config_helper import configuration_controller_get + from ...loginv3 import CommonHelperMethods + + keeper_api.sync_down(params) + + # Step 1: Find the PAM config record + config_record = None + if config_uid: + config_record = vault.KeeperRecord.load(params, config_uid) + if not config_record: + raise CommandError('kcm-cleanup', + f'PAM config record "{config_uid}" not found') + project_name = config_record.title.replace(' Configuration', '') + else: + # Search by project name + config_name = f'{project_name} Configuration' + from ... import vault_extensions + for cfg in vault_extensions.find_records(params, record_version=6): + if cfg.title == config_name: + config_record = cfg + config_uid = cfg.record_uid + break + if not config_record: + raise CommandError('kcm-cleanup', + f'PAM config "{config_name}" not found. ' + f'Use --config with the exact UID.') + + # Step 2: Find the gateway + gateway_uid = None + gateway_name = None + gw_match = None + try: + controller = configuration_controller_get( + params, CommonHelperMethods.url_safe_str_to_bytes( + config_record.record_uid)) + if controller and controller.controllerUid: + gateway_uid = controller.controllerUid + all_gw = gateway_helper.get_all_gateways(params) + gw_match = next((g for g in all_gw + if g.controllerUid == gateway_uid), None) + if gw_match: + gateway_name = gw_match.controllerName + except Exception as e: + logging.debug('Could not resolve gateway: %s', e) + + # Step 3: Find the KSM app + ksm_app_uid = None + ksm_app_name = None + if gw_match and gw_match.applicationUid: + ksm_app_uid = utils.base64_url_encode(gw_match.applicationUid) + # Find app name from shared_folder_cache or record_cache + app_rec = vault.KeeperRecord.load(params, ksm_app_uid) + if app_rec: + ksm_app_name = getattr(app_rec, 'title', ksm_app_uid) + + # Step 4: Find shared folders + sf_names = [] + sf_uids = set() + res_name = f'{project_name} - Resources' + usr_name = f'{project_name} - Users' + for sf_uid, sf in params.shared_folder_cache.items(): + name = sf.get('name_unencrypted', '') + if name in (res_name, usr_name) or name.startswith(f'{project_name} '): + sf_names.append(name) + sf_uids.add(sf_uid) + + # Step 5: Count records in shared folders (only KCM-imported records) + record_uids = set() + kcm_prefixes = ('KCM Resource - ', 'KCM User - ') + for sf_uid in sf_uids: + sf = params.shared_folder_cache.get(sf_uid, {}) + for rec in sf.get('records', []): + rec_uid = rec.get('record_uid', '') + if rec_uid: + rec_obj = vault.KeeperRecord.load(params, rec_uid) + title = getattr(rec_obj, 'title', '') if rec_obj else '' + if title.startswith(kcm_prefixes): + record_uids.add(rec_uid) + + # Also count records in subfolders + for folder_uid, folder in params.folder_cache.items(): + if hasattr(folder, 'shared_folder_uid') and folder.shared_folder_uid in sf_uids: + for rec_uid in params.subfolder_record_cache.get(folder_uid, []): + rec_obj = vault.KeeperRecord.load(params, rec_uid) + title = getattr(rec_obj, 'title', '') if rec_obj else '' + if title.startswith(kcm_prefixes): + record_uids.add(rec_uid) + + # Display what will be deleted + print() + print('=' * 60) + print('KCM Project Cleanup') + print('=' * 60) + print() + print(f' Project: {project_name}') + print(f' PAM Config: {config_uid}') + print(f' Gateway: {gateway_name or "(not found)"}') + print(f' KSM App: {ksm_app_name or "(not found)"}') + print(f' Shared Folders: {len(sf_names)}') + for name in sorted(sf_names): + print(f' - {name}') + print(f' Records: {len(record_uids)}') + print() + + if dry_run: + print(' (dry run — no changes made)') + print('=' * 60) + return + + if not auto_confirm: + answer = input(' Delete all of the above? [y/N]: ').strip().lower() + if answer not in ('y', 'yes'): + raise CommandError('kcm-cleanup', 'Cleanup cancelled.') + + # Step 6: Delete records (same API as api.delete_record but batched + # to avoid N individual sync_down calls) + deleted_count = 0 + if record_uids: + logging.warning('Deleting %d records...', len(record_uids)) + uid_list = list(record_uids) + batch_size = 50 + for i in range(0, len(uid_list), batch_size): + batch = uid_list[i:i + batch_size] + try: + rq = {'command': 'record_update', 'delete_records': batch} + keeper_api.communicate(params, rq) + deleted_count += len(batch) + except Exception as e: + logging.warning('Failed to delete batch: %s', e) + + # Step 7: Delete shared folders + if sf_uids: + logging.warning('Removing %d shared folder(s)...', len(sf_uids)) + for sf_uid in sf_uids: + try: + # Build folder delete request + folder = params.folder_cache.get(sf_uid) + if folder: + del_obj = { + 'delete_resolution': 'unlink', + 'object_uid': folder.uid, + 'object_type': folder.type, + } + parent = params.folder_cache.get(folder.parent_uid) + if parent: + del_obj['from_uid'] = parent.uid + del_obj['from_type'] = parent.type + else: + del_obj['from_type'] = 'user_folder' + rq = { + 'command': 'pre_delete', + 'objects': [del_obj] + } + rs = keeper_api.communicate(params, rq) + if rs.get('result') == 'success': + pdr = rs.get('pre_delete_response', {}) + del_rq = { + 'command': 'delete', + 'pre_delete_token': pdr.get('pre_delete_token', '') + } + keeper_api.communicate(params, del_rq) + except Exception as e: + logging.warning('Failed to remove shared folder %s: %s', + sf_uid, e) + + # Step 8: Remove gateway + if gateway_uid: + logging.warning('Removing gateway "%s"...', gateway_name or gateway_uid) + try: + gateway_helper.remove_gateway(params, gateway_uid) + except Exception as e: + logging.warning('Failed to remove gateway: %s', e) + + # Step 9: Remove KSM app + if ksm_app_uid: + logging.warning('Removing KSM app "%s"...', ksm_app_name or ksm_app_uid) + try: + from ..ksm import KSMCommand + KSMCommand.remove_v5_app(params, ksm_app_uid, + purge=True, force=True) + except Exception as e: + logging.warning('Failed to remove KSM app: %s', e) + + # Step 10: Delete PAM config record + if config_uid: + logging.warning('Deleting PAM config record...') + try: + keeper_api.delete_record(params, config_uid) + except Exception as e: + logging.warning('Failed to delete config record: %s', e) + + keeper_api.sync_down(params) + + print() + print('=' * 60) + print(f'Cleanup complete: {deleted_count} records deleted') + print('=' * 60) diff --git a/keepercommander/commands/pam_import/kcm_mappings.json b/keepercommander/commands/pam_import/kcm_mappings.json new file mode 100644 index 000000000..3ac103045 --- /dev/null +++ b/keepercommander/commands/pam_import/kcm_mappings.json @@ -0,0 +1,158 @@ +{ + "users":{ + "username":"login", + "password":"password", + "private-key": "private_pem_key", + "public-key": "ignore", + "passphrase": "ignore", + "totp-algorithm": "totp-algorithm", + "totp-digits": "totp-digits", + "totp-period": "totp-period", + "totp-secret": "totp-secret" + }, + "resources":{ + "domain": "domain_name", + "create-recording-path": "pam_settings.options.graphical_session_recording=on", + "create-typescript-path": "pam_settings.options.text_session_recording=on", + "recording-include-keys": "pam_settings.connection.recording_include_keys", + "security": "pam_settings.connection.security", + "color-depth": null, + "enable-audio": null, + "disable-copy": "pam_settings.connection.disable_copy", + "disable-paste": "pam_settings.connection.disable_paste", + "force-lossless": null, + "read-only": null, + "backspace": null, + "url": "url", + "allow-url-manipulation": "pam_settings.connection.allow_url_manipulation", + "ignore-initial-ssl-cert": "pam_settings.connection.ignore_server_cert", + "allowed-resource-url-patterns": "pam_settings.connection.allowed_resource_url_patterns", + "allowed-url-patterns": "pam_settings.connection.allowed_url_patterns", + "autofill-configuration": "pam_settings.connection.autofill_targets", + "disable-audio": "pam_settings.connection.disable_audio", + "audio-bps": null, + "audio-channels": null, + "audio-sample-rate": null, + "ca-cert": "pam_settings.connection.ca_certificate", + "client-cert": "pam_settings.connection.client_certificate", + "client-key": "pam_settings.connection.client_key", + "color-scheme": "pam_settings.connection.color_scheme", + "font-name": null, + "font-size": "pam_settings.connection.font_size", + "scrollback": null, + "ignore-cert": "pam_settings.connection.ignore_server_cert", + "namespace": "pam_settings.connection.namespace", + "pod": "pam_settings.connection.pod_name", + "container": "pam_settings.connection.container", + "use-ssl": "use_ssl", + "database": "pam_settings.connection.default_database", + "disable-csv-export": "pam_settings.connection.disable_csv_export", + "disable-csv-import": "pam_settings.connection.disable_csv_import", + "client-name": null, + "console": null, + "console-audio": null, + "disable-auth": "pam_settings.connection.disable_authentication", + "disable-bitmap-caching": null, + "disable-glyph-caching": null, + "disable-offscreen-caching": null, + "dpi": null, + "enable-audio-input": null, + "disable-display-resize": "pam_settings.connection.disable_dynamic_resizing", + "enable-desktop-composition": "pam_settings.connection.enable_desktop_composition", + "enable-font-smoothing": "pam_settings.connection.enable_font_smoothing", + "enable-full-window-drag": "pam_settings.connection.enable_full_window_drag", + "enable-menu-animations": null, + "enable-printing": null, + "enable-theming": null, + "enable-touch": null, + "enable-wallpaper": "pam_settings.connection.enable_wallpaper", + "initial-program": null, + "load-balance-info": "pam_settings.connection.load_balance_info", + "normalize-clipboard": null, + "preconnection-blob": "pam_settings.connection.preconnection_blob", + "preconnection-id": "pam_settings.connection.preconnection_id", + "printer-name": null, + "remote-app": null, + "remote-app-args": null, + "remote-app-dir": null, + "resize-method": null, + "timezone": null, + "width": null, + "height": null, + "locale": null, + "host-key": "pam_settings.connection.public_host_key", + "command": "pam_settings.connection.command", + "server-alive-interval": null, + "terminal-type": null, + "login-failure-regex": "pam_settings.connection.login_failure_regex", + "login-success-regex": "pam_settings.connection.login_success_regex", + "password-regex": "pam_settings.connection.password_regex", + "username-regex": "pam_settings.connection.username_regex", + "audio-servername": null, + "clipboard-buffer-size": null, + "clipboard-encoding": null, + "compress-level": null, + "cursor": null, + "dest-host": null, + "dest-port": null, + "disable-server-input": null, + "encodings": null, + "quality-level": null, + "swap-red-blue": null, + "wol-broadcast-addr": null, + "wol-mac-addr": null, + "wol-send-packet": null, + "wol-udp-port": null, + "wol-wait-time": null, + "create-profile-directory": null, + "profile-storage-directory": null, + "exec-command": null, + "unix-socket": null, + "cert-fingerprints": null, + "cert-tofu": null, + "disable-download": null, + "disable-gfx": null, + "disable-upload": null, + "drive-name": null, + "drive-path": null, + "enable-drive": null, + "create-drive-path": null, + "gateway-domain": null, + "gateway-hostname": null, + "gateway-password": null, + "gateway-port": null, + "gateway-username": null, + "server-layout": null, + "static-channels": null, + "timeout": null, + "ca-certificate": null, + "disable-cert-hostname-verification": null, + "force-encryption": null, + "protocol-version": null, + "ksm-user-config-enabled": "ignore", + "recording-name": "ignore", + "recording-path": "ignore", + "recording-write-existing": "ignore", + "typescript-name": "ignore", + "typescript-path": "ignore", + "typescript-write-existing": "ignore", + "recording-exclude-mouse": null, + "recording-exclude-output": null, + "recording-exclude-touch": null, + "enable-sftp": "pam_settings.connection.sftp.enable_sftp", + "sftp-directory": "pam_settings.connection.sftp.sftp_upload_directory", + "sftp-disable-download": null, + "sftp-disable-upload": null, + "sftp-host-key": null, + "sftp-hostname": "pam_settings.connection.sftp.host", + "sftp-passphrase": null, + "sftp-password": "pam_settings.connection.sftp.password", + "sftp-port": "pam_settings.connection.sftp.port", + "sftp-private-key": "pam_settings.connection.sftp.private_key", + "sftp-public-key": null, + "sftp-root-directory": "pam_settings.connection.sftp.sftp_root_directory", + "sftp-server-alive-interval": "pam_settings.connection.sftp.sftp_keepalive_interval", + "sftp-timeout":null, + "sftp-username": "pam_settings.connection.sftp.login" + } +} diff --git a/keepercommander/commands/pam_launch/launch.py b/keepercommander/commands/pam_launch/launch.py index 6646aec7c..6d64cf993 100644 --- a/keepercommander/commands/pam_launch/launch.py +++ b/keepercommander/commands/pam_launch/launch.py @@ -17,7 +17,10 @@ import re import shutil import signal +import sys import time + +from colorama import Fore, Style from typing import TYPE_CHECKING, Dict, Any, Optional, Tuple from keeper_secrets_manager_core.utils import url_safe_str_to_bytes @@ -271,6 +274,52 @@ def _record_has_host_port(record: Any) -> bool: return bool(host) and port is not None +# Exit codes for involuntary ``pam launch`` terminations. Distinct from the +# generic CommandError path (1) and normal exit (0) so scripts can branch. +# Avoid 0/1, sysexits.h (64-78), shell-reserved (126-127), signal range (128+). +EXIT_CODE_AI_TERMINATED = 40 +EXIT_CODE_ADMIN_TERMINATED = 41 + + +def _print_close_reason_notice( + reason: Optional[str], + *, + pending_exit_code: Optional[int], +) -> Optional[int]: + """Show a user-facing notice for an involuntary remote close. + + Stays silent for ``normal`` / ``client`` (user-initiated). Called from the + inner ``finally`` of ``_start_cli_session`` after the input handler has + stopped and the local terminal is back in cooked mode — the message is the + last thing the user sees before returning to Commander, so no acknowledge + prompt is needed. + """ + if not reason or reason in ('normal', 'client'): + return pending_exit_code + + is_stdout_tty = sys.stdout.isatty() + + def _color(text: str, color: str) -> str: + if not is_stdout_tty: + return text + return f'{color}{Style.BRIGHT}{text}{Style.RESET_ALL}' + + if reason == 'ai_closed': + print() + print(_color('Session terminated by KeeperAI.', Fore.RED), flush=True) + print('Critical activity was detected during this session.', flush=True) + print('Contact your administrator to unlock this record.', flush=True) + return EXIT_CODE_AI_TERMINATED + + if reason == 'admin_closed': + print() + print(_color('Session terminated by administrator.', Fore.YELLOW), flush=True) + return EXIT_CODE_ADMIN_TERMINATED + + print(f'\nSession ended ({reason}).', flush=True) + return pending_exit_code + + class PAMLaunchCommand(Command): """PAM Launch command to launch a connection to a PAM resource""" @@ -279,7 +328,9 @@ class PAMLaunchCommand(Command): parser = argparse.ArgumentParser(prog='pam launch', description='Launch a connection to a PAM resource') parser.add_argument('record', type=str, action='store', - help='Record path or UID of the PAM resource to launch') + help='PAM resource to launch — record UID, path, exact title, ' + 'or any substring of the title or a host/pamHostname field. ' + 'Multiple matches prompt for selection on a TTY.') parser.add_argument('--no-trickle-ice', '-nti', required=False, dest='no_trickle_ice', action='store_true', help='Disable trickle ICE for WebRTC connections. By default, trickle ICE is enabled ' 'for real-time candidate exchange.') @@ -382,6 +433,13 @@ def find_record(self, params: KeeperParams, record_token: str) -> Optional[str]: if record_uid: return record_uid + # Step 4: Substring fallback — title + host/pamHostname fields across + # PAM records. Catches cases like `pam launch prod-server` (multiple + # title matches) and `pam launch db.example.com` (hostname lookup). + record_uid = self._find_by_substring(params, record_token) + if record_uid: + return record_uid + return None def _find_by_path(self, params: KeeperParams, path: str) -> Optional[str]: @@ -424,13 +482,13 @@ def _find_by_path(self, params: KeeperParams, path: str) -> Optional[str]: logging.debug(f"Found record by path: {path} -> {pam_matched[0]} (1 PAM among {len(all_matched)} matches)") return pam_matched[0] if len(pam_matched) == 0: - logging.error( - 'pam launch: path "%s" matches %d record(s) but none are PAM types (pamMachine, pamDirectory, pamDatabase). Use UID or a path that resolves to a single PAM record.', + logging.debug( + 'path %r matches %d record(s) but none are PAM types', path, len(all_matched), ) return None - logging.error( - 'pam launch: path "%s" matches %d PAM records. Please use a unique identifier (UID or full path).', + logging.debug( + 'path %r matches %d PAM records — falling through to substring search', path, len(pam_matched), ) return None @@ -464,19 +522,115 @@ def _find_by_title(self, params: KeeperParams, title: str) -> Optional[str]: logging.debug(f"Found record by title: {title} -> {pam_matched[0]} (1 PAM among {len(all_matched)} matches)") return pam_matched[0] if len(pam_matched) == 0: - logging.error( - 'pam launch: title "%s" matches %d record(s) but none are PAM types (pamMachine, pamDirectory, pamDatabase). Use UID or full path.', + logging.debug( + 'title %r matches %d record(s) but none are PAM types', title, len(all_matched), ) return None - logging.error( - 'pam launch: title "%s" matches %d PAM records. Please use a unique identifier (UID or full path).', + logging.debug( + 'title %r matches %d PAM records — falling through to substring search', title, len(pam_matched), ) return None return None + def _find_by_substring(self, params: KeeperParams, token: str) -> Optional[str]: + """Substring fallback for ``find_record`` — case-insensitive contains + match across PAM record titles and any ``host`` / ``pamHostname`` field. + + Limited to ``VALID_PAM_RECORD_TYPES`` (pamMachine, pamDirectory, + pamDatabase). Connections-enabled / config gating is intentionally not + checked here — that's expensive (DAG fetch per record) and the + downstream gates in ``execute()`` reject inappropriate records anyway. + + Returns None if no candidates match. Returns the unique UID if exactly + one matches. With multiple matches, prompts on a TTY or prints the + list and returns None on a non-TTY. + """ + token_lower = token.lower() + # candidate tuple: (uid, title, [(hostName, port), ...]) + candidates: list = [] + for record_uid in params.record_cache: + try: + record = vault.KeeperRecord.load(params, record_uid) + except Exception: + continue + if not isinstance(record, vault.TypedRecord) or record.version != 3: + continue + if record.record_type not in self.VALID_PAM_RECORD_TYPES: + continue + title = record.title or '' + hosts: list = [] + for field in _iter_record_fields(record): + if getattr(field, 'type', None) not in ('pamHostname', 'host'): + continue + value = field.get_default_value(dict) if hasattr(field, 'get_default_value') else {} + if not isinstance(value, dict): + continue + host_name = (value.get('hostName') or '').strip() + if not host_name: + continue + hosts.append((host_name, value.get('port'))) + + if token_lower in title.lower() or any(token_lower in h.lower() for (h, _) in hosts): + candidates.append((record_uid, title, hosts)) + + if not candidates: + return None + + if len(candidates) == 1: + uid, title, _ = candidates[0] + logging.debug('substring %r -> %s (%s)', token, uid, title) + return uid + + return self._pick_candidate(candidates, token) + + @staticmethod + def _pick_candidate(candidates: list, token: str) -> Optional[str]: + """Render a numbered list of candidates and prompt for selection. + + On non-TTY stdin, prints the list once and returns None — caller + surfaces ``Record not found`` so the user knows to be more specific. + """ + title_w = max(len(c[1]) for c in candidates) + print(f'\n{len(candidates)} matching resources:', flush=True) + for idx, (uid, title, hosts) in enumerate(candidates, 1): + host_str = '' + if hosts: + host_str = ', '.join(f'{h}:{p}' if p else h for (h, p) in hosts) + host_str = f' ({host_str})' + print(f' {idx:>2}. {uid} {title:<{title_w}}{host_str}', flush=True) + + if not sys.stdin.isatty(): + logging.error( + 'pam launch: %d matches for %r — re-run with a UID, full path, ' + 'or a more specific token.', len(candidates), token, + ) + return None + + while True: + try: + answer = input('Specify the resource: ').strip() + except (EOFError, KeyboardInterrupt): + print() + return None + if not answer: + return None + try: + n = int(answer) + except ValueError: + print( + f'Invalid selection {answer!r}. Enter a number 1..{len(candidates)} ' + 'or press Enter to cancel.', + flush=True, + ) + continue + if not (1 <= n <= len(candidates)): + print(f'Selection out of range. Enter 1..{len(candidates)}.', flush=True) + continue + return candidates[n - 1][0] + def find_gateway( self, params: KeeperParams, @@ -1278,6 +1432,14 @@ def _start_cli_session( ) shutdown_requested = False lease_expired = False + # Latest close reason from the rust webrtc layer (snake_case name from + # PyCloseConnectionReason). Set asynchronously by _on_session_disconnect + # below; consumed in the inner finally to print a user-facing notice. + closure_reason: Optional[str] = None + # Distinct exit code for involuntary terminations (KeeperAI, admin). + # Raised as SystemExit at the end of the method so the inner/outer + # finally cleanup blocks run first. + pending_exit_code: Optional[int] = None def signal_handler_fn(signum, frame): nonlocal shutdown_requested @@ -1322,6 +1484,17 @@ def _on_lease_expired(): if not python_handler: raise CommandError('pam launch', 'No python_handler in tunnel result - ensure Rust module supports PythonHandler mode') + # Capture remote close reason from the rust webrtc layer so the + # finally block below can show a reason-specific notice (KeeperAI, + # admin, etc.). Runs on the rust callback thread — do not print + # here; terminal is still in raw mode. + def _on_session_disconnect(reason: str) -> None: + nonlocal closure_reason, shutdown_requested + closure_reason = reason + shutdown_requested = True + + python_handler.on_disconnect = _on_session_disconnect + conversation_id = tunnel_result['tunnel'].get('conversation_id') logging.debug(f"Starting PythonHandler CLI session for tube {tube_id}") @@ -1810,6 +1983,14 @@ def _remote_key_ctrl_c() -> None: if lease_expired: print('\nAccess expired — session terminated by workflow lease.', flush=True) + # Reason-specific notice for involuntary closures from the + # gateway / rust webrtc layer (KeeperAI, admin, transport errors). + # Stays silent for normal/client-initiated closes. + pending_exit_code = _print_close_reason_notice( + closure_reason, + pending_exit_code=pending_exit_code, + ) + # Cleanup - check if connection is already closed to avoid deadlock logging.debug("Stopping Python handler...") try: @@ -1868,3 +2049,12 @@ def _remote_key_ctrl_c() -> None: logging.debug("Auto check-in failed: %s", e) exit_pam_launch_terminal_rust_logging(rust_log_token) signal.signal(signal.SIGINT, original_handler) + + # Surface a distinct OS exit code (KeeperAI=40, admin=41) only when + # Commander is running in batch / scripted mode (e.g. `keeper pam + # launch UID` from a shell). In the interactive shell (`keeper shell` + # -> `pam launch UID`) the user expects to land back at the Keeper> + # prompt — a SystemExit would tear the whole shell down. Raised after + # both finally blocks so cleanup is already complete. + if pending_exit_code is not None and getattr(params, 'batch_mode', False): + raise SystemExit(pending_exit_code) diff --git a/keepercommander/commands/pam_launch/python_handler.py b/keepercommander/commands/pam_launch/python_handler.py index 44aba376e..19554fcdc 100644 --- a/keepercommander/commands/pam_launch/python_handler.py +++ b/keepercommander/commands/pam_launch/python_handler.py @@ -1053,15 +1053,33 @@ def is_data_flowing(self) -> bool: @staticmethod def _close_reason_name(reason: int) -> str: - """Convert close reason code to name.""" + """Convert close reason code to snake_case name. + + Mirrors ``PyCloseConnectionReason`` in + ``keeper-pam-webrtc-rs/src/python/enums.rs``. Code 3 is + intentionally absent in the rust enum. + """ reasons = { - 0: "unknown", - 1: "normal", + 0: "normal", + 1: "error", 2: "timeout", - 3: "error", - 4: "refused", - 5: "unreachable", - 6: "reset", + 4: "server_refuse", + 5: "client", + 6: "unknown", + 7: "invalid_instruction", + 8: "guacd_refuse", + 9: "connection_lost", + 10: "connection_failed", + 11: "tunnel_closed", + 12: "admin_closed", + 13: "error_recording", + 14: "guacd_error", + 15: "ai_closed", + 16: "address_resolution_failed", + 17: "decryption_failed", + 18: "configuration_error", + 19: "protocol_error", + 20: "upstream_closed", } return reasons.get(reason, f"code_{reason}") diff --git a/keepercommander/commands/record.py b/keepercommander/commands/record.py index ba7bbf321..2091c55c5 100644 --- a/keepercommander/commands/record.py +++ b/keepercommander/commands/record.py @@ -350,6 +350,13 @@ def _format_expiration(expiration_value): print(json.dumps(fo, indent=2)) else: f.display() + if f.type == BaseFolderNode.KeeperDriveFolderType: + try: + from .keeper_drive.display_commands import KeeperDriveGetCommand + KeeperDriveGetCommand._print_folder_permissions( + params, f.uid, kwargs.get('verbose', False)) + except Exception as e: + logging.debug('KeeperDrive permission display skipped: %s', e) direct_match = True return @@ -453,6 +460,32 @@ def _format_expiration(expiration_value): ro['user_permissions'] = rec['shares']['user_permissions'].copy() if 'shared_folder_permissions' in rec['shares']: ro['shared_folder_permissions'] = rec['shares']['shared_folder_permissions'].copy() + + # For KeeperDrive records, replace the user_permissions + # block with role-aware entries fetched from the KD + # access graph (matches ``kd-get --format json``). + if (hasattr(params, 'keeper_drive_records') + and uid in getattr(params, 'keeper_drive_records', {})): + try: + from .. import keeper_drive as _kd + from .keeper_drive.helpers import get_access_role_label + kd_accesses = (_kd.get_record_accesses_v3(params, [uid]) + .get('record_accesses', [])) + if kd_accesses: + kd_perms = [] + for a in kd_accesses: + accessor = a.get('accessor_name') or a.get('access_type_uid', '') + kd_perms.append({ + 'username': accessor, + 'owner': a.get('owner', False), + 'shareable': a.get('can_approve_access', False) or a.get('can_update_access', False), + 'editable': a.get('can_edit', False), + 'role': get_access_role_label(a), + }) + ro['user_permissions'] = kd_perms + except Exception as e: + logging.debug('Could not enrich KD user_permissions for %s: %s', uid, e) + if admins: ro['share_admins'] = admins @@ -533,8 +566,52 @@ def _format_expiration(expiration_value): else: unmask = kwargs.get('unmask') is True r.display(unmask=unmask) + + # KeeperDrive records carry their permissions on the KD + # access graph, not in ``rec['shares']['user_permissions']``. + # Render the KD-style "User Permissions" block (with Role) + # so ``get`` matches ``kd-get`` for KD records. + is_kd_record = ( + hasattr(params, 'keeper_drive_records') + and uid in getattr(params, 'keeper_drive_records', {}) + ) + kd_user_perms_rendered = False + if is_kd_record: + try: + from .. import keeper_drive as _kd + accesses = (_kd.get_record_accesses_v3(params, [uid]) + .get('record_accesses', [])) + if accesses: + from .keeper_drive.helpers import ( + get_access_role_label, + RECORD_PERM_LABELS, + ) + print('') + print('User Permissions:') + for a in accesses: + accessor = a.get('accessor_name') or a.get('access_type_uid', '') + is_owner = a.get('owner', False) + can_edit = a.get('can_edit', False) + can_share = a.get('can_approve_access', False) or a.get('can_update_access', False) + role = get_access_role_label(a) + + print('') + print(' User: ' + accessor) + if is_owner: + print(' Owner: Yes') + else: + # Skip role for owners - their access is implicit. + print(' Role: ' + role) + print(' Shareable: ' + ('Yes' if can_share else 'No')) + print(' Read-Only: ' + ('Yes' if not can_edit else 'No')) + kd_user_perms_rendered = True + except Exception as e: + logging.debug('Could not render KD permissions for %s: %s', uid, e) + if rec.get('shares'): - if 'user_permissions' in rec['shares'] and rec['shares']['user_permissions']: + if (not kd_user_perms_rendered + and 'user_permissions' in rec['shares'] + and rec['shares']['user_permissions']): print('') print('User Permissions:') for user in rec['shares']['user_permissions']: @@ -1336,13 +1413,16 @@ def execute(self, params, **kwargs): search_fields=search_fields, use_regex=True)] if any(records): - headers = ['record_uid', 'type', 'title', 'description', 'shared'] + headers = ['record_uid', 'type', 'title', 'description', 'shared', 'record_category'] if fmt == 'table': headers = [base.field_to_title(x) for x in headers] table = [] for record in records: + # Determine if record is from Keeper Drive or Legacy + is_keeper_drive = hasattr(params, 'keeper_drive_records') and record.record_uid in params.keeper_drive_records + source = 'KeeperDrive' if is_keeper_drive else 'Legacy' row = [record.record_uid, record.record_type, record.title, - vault_extensions.get_record_description(record), record.shared] + vault_extensions.get_record_description(record), record.shared, source] table.append(row) table.sort(key=lambda x: (x[2] or '').lower()) if fmt != 'json': diff --git a/keepercommander/commands/supershell/app.py b/keepercommander/commands/supershell/app.py index 8c12dbd55..ae4747753 100644 --- a/keepercommander/commands/supershell/app.py +++ b/keepercommander/commands/supershell/app.py @@ -250,6 +250,12 @@ def _get_welcome_screen_content(self) -> str: [{t['text_dim']}]•[/{t['text_dim']}] [{t['primary']}]![/{t['primary']}] - Exit to Keeper shell [{t['text_dim']}]•[/{t['text_dim']}] [{t['primary']}]Ctrl+q[/{t['primary']}] - Quit SuperShell +[bold {t['primary_bright']}]Folder Icons[/bold {t['primary_bright']}] + [{t['text_dim']}]•[/{t['text_dim']}] Legacy Personal Folder 🔒 + [{t['text_dim']}]•[/{t['text_dim']}] Legacy Shared Folder 📦 + [{t['text_dim']}]•[/{t['text_dim']}] Drive Shared Folder 👥 + [{t['text_dim']}]•[/{t['text_dim']}] Drive NonShared Folder 📁 + [{t['text_dim']}]Press [/{t['text_dim']}][{t['primary']}]?[/{t['primary']}][{t['text_dim']}] for full keyboard shortcuts[/{t['text_dim']}]""" def _apply_theme_css(self): @@ -383,6 +389,12 @@ async def on_mount(self): [{t['text_dim']}]•[/{t['text_dim']}] [{t['primary']}]![/{t['primary']}] - Exit to Keeper shell [{t['text_dim']}]•[/{t['text_dim']}] [{t['primary']}]Ctrl+q[/{t['primary']}] - Quit SuperShell +[bold {t['primary_bright']}]Folder Icons[/bold {t['primary_bright']}] + [{t['text_dim']}]•[/{t['text_dim']}] Legacy Personal Folder 🔒 + [{t['text_dim']}]•[/{t['text_dim']}] Legacy Shared Folder 📦 + [{t['text_dim']}]•[/{t['text_dim']}] Drive Shared Folder 👥 + [{t['text_dim']}]•[/{t['text_dim']}] Drive NonShared Folder 📁 + [{t['text_dim']}]Press [/{t['text_dim']}][{t['primary']}]?[/{t['primary']}][{t['text_dim']}] for full keyboard shortcuts[/{t['text_dim']}]""" detail_widget.update(help_content) @@ -411,17 +423,22 @@ def on_resize(self, event) -> None: def _load_vault_data(self): """Load vault data from params""" - # Build record to folder mapping using subfolder_record_cache - # Records in root folder have folder_uid = '' (empty string) + # Build record to folder mapping using subfolder_record_cache. + # Records in root folder have folder_uid = '' (empty string). + real_folder_uids = set(getattr(self.params, 'folder_cache', {}).keys()) + real_folder_uids.update(getattr(self.params, 'keeper_drive_folders', {}).keys()) + self.record_to_folder = {} # Maps record_uid -> folder_uid - self.records_in_subfolders = set() # Track records that are in actual subfolders (not root) + self.records_in_subfolders = set() # Records that are in actual subfolders (not root) if hasattr(self.params, 'subfolder_record_cache'): for folder_uid, record_uids in self.params.subfolder_record_cache.items(): + is_real_subfolder = bool(folder_uid) and folder_uid in real_folder_uids for record_uid in record_uids: - self.record_to_folder[record_uid] = folder_uid - # Track records in non-root folders - if folder_uid and folder_uid != '': + if is_real_subfolder: + self.record_to_folder[record_uid] = folder_uid self.records_in_subfolders.add(record_uid) + else: + self.record_to_folder.setdefault(record_uid, '') # Track file attachments and their parent records self.file_attachment_to_parent = {} # Maps attachment_uid -> parent_record_uid @@ -953,6 +970,75 @@ def _add_record_with_attachments(self, parent_node, record: dict, idx: int, auto data={'type': 'record', 'uid': record_uid, 'has_attachments': bool(attachments or linked_records)} ) + def _get_folder_icon(self, folder_node): + """Return the appropriate icon for a folder based on its type and sharing status. + + Icon mapping (matches Keeper desktop client): + - Legacy Personal Folder (user_folder) → 🔒 + - Legacy Shared Folder (shared_folder) → 📦 + - Subfolder in Shared (shared_folder_folder) → 📦 + - Drive Shared Folder (keeper_drive_folder, shared) → 👥 + - Drive NonShared Folder (keeper_drive_folder, not shared) → 📁 + """ + from ...subfolder import BaseFolderNode + if folder_node is None: + return "📁" + ft = folder_node.type + if ft == BaseFolderNode.UserFolderType: + return "🔒" + if ft in (BaseFolderNode.SharedFolderType, BaseFolderNode.SharedFolderFolderType): + return "📦" + if ft == BaseFolderNode.KeeperDriveFolderType: + try: + from ...proto import folder_pb2 + from ..keeper_drive.helpers import ( + _current_user_account_uid, + _is_current_user_access, + ) + + folder_obj = (getattr(self.params, 'keeper_drive_folders', {}) or {}).get( + folder_node.uid, {} + ) or {} + owner_username = folder_obj.get('owner_username') or '' + owner_account_uid = folder_obj.get('owner_account_uid') or '' + current_username = getattr(self.params, 'user', '') or '' + current_account_uid = _current_user_account_uid(self.params) + + + current_user_is_owner = False + owner_known = bool(owner_username) or bool(owner_account_uid) + if owner_username and current_username and \ + owner_username.lower() == current_username.lower(): + current_user_is_owner = True + elif owner_account_uid and current_account_uid and \ + owner_account_uid == current_account_uid: + current_user_is_owner = True + + if owner_known and not current_user_is_owner: + return "👥" + + sharing_state = ( + getattr(self.params, 'keeper_drive_folder_sharing_states', {}) or {} + ).get(folder_node.uid) + if sharing_state and sharing_state.get('shared'): + return "👥" + + accesses = (getattr(self.params, 'keeper_drive_folder_accesses', {}) or {}).get( + folder_node.uid, [] + ) or [] + at_user = int(folder_pb2.AT_USER) + at_team = int(folder_pb2.AT_TEAM) + for a in accesses: + if int(a.get('access_type', 0) or 0) not in (at_user, at_team): + continue + if _is_current_user_access(a, self.params, current_account_uid): + continue + return "👥" + except Exception: + pass + return "📁" + return "📁" + def _setup_folder_tree(self): """Setup the folder tree structure with records as children""" tree = self.query_one("#folder_tree", Tree) @@ -1007,12 +1093,8 @@ def add_folder_node(parent_tree_node, folder_node, folder_uid): # Determine label and color based on folder type color = t['folder'] - if folder_node.type == 'shared_folder': - # Shared folder: bold green name with share icon after - label = f"[bold {color}]{folder_node.name}[/bold {color}] 👥" - else: - # Regular folder: bold green name - label = f"[bold {color}]{folder_node.name}[/bold {color}]" + folder_icon = self._get_folder_icon(folder_node) + label = f"[bold {color}]{folder_node.name}[/bold {color}] {folder_icon}" # Add this folder to the tree with color tree_node = parent_tree_node.add( @@ -1428,9 +1510,7 @@ def _format_folder_for_tui(self, folder_uid: str) -> str: # Fallback to basic folder info if get command didn't work folder = self.params.folder_cache.get(folder_uid) if folder: - folder_type = folder.get_folder_type() if hasattr(folder, 'get_folder_type') else folder.type - folder_type_str = str(folder_type) if folder_type else 'Folder' - folder_icon = "👥" if 'shared' in folder_type_str.lower() else "📁" + folder_icon = self._get_folder_icon(folder) return ( f"[bold {t['secondary']}]{folder_icon} {rich_escape(str(folder.name))}[/bold {t['secondary']}]\n\n" f"[{t['text_dim']}]Folder:[/{t['text_dim']}] [bold {t['primary']}]{rich_escape(str(folder.name))}[/bold {t['primary']}]\n" @@ -1445,8 +1525,7 @@ def _format_folder_for_tui(self, folder_uid: str) -> str: # Determine folder header with icon and name folder = self.params.folder_cache.get(folder_uid) folder_name = folder.name if folder else "Folder" - is_shared = 'Shared Folder UID' in output - folder_icon = "👥" if is_shared else "📁" + folder_icon = self._get_folder_icon(folder) lines.append(f"[bold {t['secondary']}]{folder_icon} {rich_escape(str(folder_name))}[/bold {t['secondary']}]") lines.append("") @@ -2362,14 +2441,7 @@ def mount_line(content: str, copy_value: str = None): # Determine folder header with icon and name folder_name = folder.name if folder else "Folder" - if folder: - ft = folder.get_folder_type() if hasattr(folder, 'get_folder_type') else str(folder.type) - if 'shared' in ft.lower(): - folder_icon = "👥" - else: - folder_icon = "📁" - else: - folder_icon = "📁" + folder_icon = self._get_folder_icon(folder) # Type header with icon and folder name mount_line(f"[bold {t['secondary']}]{folder_icon} {rich_escape(str(folder_name))}[/bold {t['secondary']}]", None) @@ -2506,14 +2578,7 @@ def mount_json_line(content: str, copy_value: str = None, indent: int = 0): # Determine folder header with icon and name folder = self.params.folder_cache.get(folder_uid) folder_name = folder.name if folder else "Folder" - if folder: - ft = folder.get_folder_type() if hasattr(folder, 'get_folder_type') else str(folder.type) - if 'shared' in ft.lower(): - folder_icon = "👥" - else: - folder_icon = "📁" - else: - folder_icon = "📁" + folder_icon = self._get_folder_icon(folder) # Build formatted JSON output with clickable values mount_json_line(f"[bold {t['secondary']}]{folder_icon} {rich_escape(str(folder_name))}[/bold {t['secondary']}] [{t['text_dim']}](JSON)[/{t['text_dim']}]", None) diff --git a/keepercommander/commands/supershell/screens/help.py b/keepercommander/commands/supershell/screens/help.py index 2261c3a62..0f10e406e 100644 --- a/keepercommander/commands/supershell/screens/help.py +++ b/keepercommander/commands/supershell/screens/help.py @@ -113,7 +113,13 @@ def compose(self) -> ComposeResult: d Sync vault W User info D Device info - P Preferences""", classes="help_column") + P Preferences + +[green]Folder Icons:[/green] + 🔒 Legacy Personal Folder + 📦 Legacy Shared Folder + 👥 Drive Shared Folder + 📁 Drive NonShared Folder""", classes="help_column") yield Static("[dim]Press Esc or q to close[/dim]", id="help_footer") def action_dismiss(self): diff --git a/keepercommander/commands/tunnel_and_connections.py b/keepercommander/commands/tunnel_and_connections.py index 01ff493b5..0117a4382 100644 --- a/keepercommander/commands/tunnel_and_connections.py +++ b/keepercommander/commands/tunnel_and_connections.py @@ -12,12 +12,17 @@ import argparse import datetime import http.client +import json import logging import os +import platform +import signal import socket import ssl import struct +import subprocess import sys +import threading import time from typing import List, Optional, Tuple from keeper_secrets_manager_core.utils import bytes_to_base64, base64_to_bytes, url_safe_str_to_bytes @@ -26,7 +31,16 @@ from .tunnel.port_forward.TunnelGraph import TunnelDAG from .tunnel.port_forward.tunnel_helpers import find_open_port, get_config_uid, get_keeper_tokens, \ get_or_create_tube_registry, get_gateway_uid_from_record, resolve_record, resolve_pam_config, resolve_folder, \ - remove_field, start_rust_tunnel, get_tunnel_session, CloseConnectionReasons + remove_field, start_rust_tunnel, get_tunnel_session, unregister_tunnel_session, CloseConnectionReasons, \ + wait_for_tunnel_connection, create_rust_webrtc_settings +from .tunnel_registry import ( + PARENT_GRACE_SECONDS, + is_pid_alive, + list_registered_tunnels, + register_tunnel, + stop_tunnel_process, + unregister_tunnel, +) from .. import api, vault, record_management from ..display import bcolors from ..error import CommandError @@ -41,9 +55,15 @@ # the original timer alive and produce duplicate "Tunnel access expired" # messages from the prior tunnel. _LEASE_EXPIRY_TIMERS_BY_RECORD = {} # type: dict[str, threading.Timer] +# Maps record_uid -> threading.Event used by --foreground / --run modes to break +# their blocking wait when the workflow lease expires. Set by the mode block, +# read by the lease-expiry callback. Default interactive mode does NOT register +# (it has no blocking wait to interrupt; user SSH session continues naturally). +_LEASE_SHUTDOWN_EVENTS_BY_RECORD = {} # type: dict[str, threading.Event] import threading as _lease_threading_module # noqa: E402 (used only by the tunnel-start timer) + # Group Commands class PAMTunnelCommand(GroupCommand): @@ -83,73 +103,65 @@ def get_parser(self): return PAMTunnelListCommand.pam_cmd_parser def execute(self, params, **kwargs): - # Try to get active tunnels from Rust PyTubeRegistry - # Logger initialization is handled by get_or_create_tube_registry() - tube_registry = get_or_create_tube_registry(params) - if tube_registry: - if not tube_registry.has_active_tubes(): - logging.warning(f"{bcolors.OKBLUE}No Tunnels running{bcolors.ENDC}") - return - - table = [] - headers = ['Record', 'Remote Target', 'Local Address', 'Tunnel ID', 'Conversation ID', 'Status'] + table = [] + headers = ['Record', 'Remote Target', 'Local Address', 'Tunnel ID', 'Conversation ID', 'Status'] - # Get all tube IDs + # In-process tunnels from the Rust PyTubeRegistry + tube_registry = get_or_create_tube_registry(params) + in_process_tube_ids = set() + if tube_registry and tube_registry.has_active_tubes(): tube_ids = tube_registry.all_tube_ids() - for tube_id in tube_ids: - # Get conversation IDs for this tube + in_process_tube_ids.add(tube_id) conversation_ids = tube_registry.get_conversation_ids_by_tube_id(tube_id) - - # Get tunnel session for detailed info tunnel_session = get_tunnel_session(tube_id) - # Record title record_title = tunnel_session.record_title if tunnel_session and tunnel_session.record_title else f"{bcolors.WARNING}unknown{bcolors.ENDC}" - # Remote target if tunnel_session and tunnel_session.target_host and tunnel_session.target_port: remote_target = f"{tunnel_session.target_host}:{tunnel_session.target_port}" else: remote_target = f"{bcolors.WARNING}unknown{bcolors.ENDC}" - # Local listening address if tunnel_session and tunnel_session.host and tunnel_session.port: local_addr = f"{bcolors.OKGREEN}{tunnel_session.host}:{tunnel_session.port}{bcolors.ENDC}" else: local_addr = f"{bcolors.WARNING}unknown{bcolors.ENDC}" - # Tunnel ID (tube_id) - this is what's needed for stopping - tunnel_id = tube_id - - # Conversation ID - WebRTC signaling identifier conv_id = conversation_ids[0] if conversation_ids else (tunnel_session.conversation_id if tunnel_session else 'none') - # Connection state try: state = tube_registry.get_connection_state(tube_id) status_color = f"{bcolors.OKGREEN}" if state.lower() == "connected" else f"{bcolors.WARNING}" status = f"{status_color}{state}{bcolors.ENDC}" - except: + except Exception: status = f"{bcolors.WARNING}unknown{bcolors.ENDC}" - row = [ - record_title, - remote_target, - local_addr, - tunnel_id, - conv_id, - status, - ] - table.append(row) - - dump_report_data(table, headers, fmt='table', filename="", row_number=False, column_width=None) - else: - # Rust WebRTC library is required for tunnel operations - print(f"{bcolors.FAIL}This command requires the Rust WebRTC library (keeper_pam_webrtc_rs).{bcolors.ENDC}") - print(f"{bcolors.OKBLUE}Please ensure the keeper_pam_webrtc_rs module is installed and available.{bcolors.ENDC}") + table.append([record_title, remote_target, local_addr, tube_id, conv_id, status]) + + # Cross-process tunnels from the file-based registry + for entry in list_registered_tunnels(): + if entry.get('tube_id') in in_process_tube_ids: + continue + pid = entry.get('pid') + rec = entry.get('record_title') or entry.get('record_uid', '?') + th = entry.get('target_host') + tp = entry.get('target_port') + remote = f"{th}:{tp}" if th and tp else f"{bcolors.WARNING}n/a{bcolors.ENDC}" + h = entry.get('host', '127.0.0.1') + p = entry.get('port', '?') + local = f"{bcolors.OKGREEN}{h}:{p}{bcolors.ENDC}" + tid = entry.get('tube_id', '') + mode = entry.get('mode', '?') + status = f"{bcolors.OKGREEN}{mode} (PID {pid}){bcolors.ENDC}" + table.append([rec, remote, local, tid, '', status]) + + if not table: + logging.warning(f"{bcolors.OKBLUE}No Tunnels running{bcolors.ENDC}") return + dump_report_data(table, headers, fmt='table', filename="", row_number=False, column_width=None) + class PAMTunnelStopCommand(Command): # Note on workflow lease lifecycle: stopping a tunnel intentionally does @@ -212,6 +224,22 @@ def execute(self, params, **kwargs): if tube_id: matching_tubes = [tube_id] + # Fall back to file-based registry (cross-process tunnels) + if not matching_tubes: + for entry in list_registered_tunnels(): + if uid in (entry.get('tube_id', ''), entry.get('record_uid', ''), + entry.get('record_title', '')): + pid = entry.get('pid') + if pid and is_pid_alive(pid): + if stop_tunnel_process(pid): + print(f"{bcolors.OKGREEN}Sent stop signal to tunnel process " + f"(PID {pid}, {entry.get('mode', '?')} mode){bcolors.ENDC}") + else: + print(f"{bcolors.FAIL}Failed to signal PID {pid}{bcolors.ENDC}") + else: + unregister_tunnel(pid) + return + if not matching_tubes: raise CommandError('tunnel stop', f"No active tunnels found matching '{uid}'") @@ -242,43 +270,48 @@ def execute(self, params, **kwargs): raise CommandError('tunnel stop', f"Failed to stop any tunnels matching '{uid}'") def _stop_all_tunnels(self, params): - """Stop all active tunnels""" + """Stop all active tunnels (in-process and cross-process).""" + stopped_count = 0 + failed_count = 0 + + # In-process tunnels tube_registry = get_or_create_tube_registry(params) - if not tube_registry: - raise CommandError('tunnel stop', 'This command requires the Rust WebRTC library') + if tube_registry: + all_tube_ids = tube_registry.all_tube_ids() + if all_tube_ids: + print(f"{bcolors.WARNING}Stopping {len(all_tube_ids)} in-process tunnel(s):{bcolors.ENDC}") + for tube_id in all_tube_ids: + try: + tube_registry.close_tube(tube_id, reason=CloseConnectionReasons.Normal) + print(f" {bcolors.OKGREEN}Stopped: {tube_id}{bcolors.ENDC}") + stopped_count += 1 + except Exception as e: + print(f" {bcolors.FAIL}Failed: {tube_id}: {e}{bcolors.ENDC}") + failed_count += 1 + + # Cross-process tunnels from file registry + registered = list_registered_tunnels() + if registered: + print(f"{bcolors.WARNING}Stopping {len(registered)} external tunnel(s):{bcolors.ENDC}") + for entry in registered: + pid = entry.get('pid') + if stop_tunnel_process(pid): + print(f" {bcolors.OKGREEN}Sent stop signal to PID {pid} " + f"({entry.get('mode', '?')} mode, {entry.get('host')}:{entry.get('port')}){bcolors.ENDC}") + stopped_count += 1 + else: + print(f" {bcolors.FAIL}Failed to signal PID {pid}{bcolors.ENDC}") + failed_count += 1 + unregister_tunnel(pid) - # Get all active tunnel IDs - all_tube_ids = tube_registry.all_tube_ids() - - if not all_tube_ids: + if stopped_count == 0 and failed_count == 0: print(f"{bcolors.WARNING}No active tunnels to stop.{bcolors.ENDC}") return - # Confirm with user - print(f"{bcolors.WARNING}About to stop {len(all_tube_ids)} active tunnel(s):{bcolors.ENDC}") - for tube_id in all_tube_ids: - print(f" - {tube_id}") - - # Stop all tunnels - stopped_count = 0 - failed_count = 0 - for tube_id in all_tube_ids: - try: - tube_registry.close_tube(tube_id, reason=CloseConnectionReasons.Normal) - print(f"{bcolors.OKGREEN}Stopped tunnel: {tube_id}{bcolors.ENDC}") - stopped_count += 1 - except Exception as e: - print(f"{bcolors.FAIL}Failed to stop tunnel {tube_id}: {e}{bcolors.ENDC}") - failed_count += 1 - - # Summary if stopped_count > 0: print(f"\n{bcolors.OKGREEN}Successfully stopped {stopped_count} tunnel(s).{bcolors.ENDC}") if failed_count > 0: print(f"{bcolors.FAIL}Failed to stop {failed_count} tunnel(s).{bcolors.ENDC}") - - if stopped_count == 0: - raise CommandError('tunnel stop', 'Failed to stop any tunnels') class PAMTunnelEditCommand(Command): @@ -538,6 +571,30 @@ class PAMTunnelStartCommand(Command): pam_cmd_parser.add_argument('--wait-timeout', '-wt', required=False, dest='workflow_wait_timeout', type=int, default=600, help='Maximum seconds to poll for approval when --wait is set (default: 600).') + pam_cmd_parser.add_argument('--foreground', '-fg', required=False, dest='foreground', action='store_true', + help='Keep the tunnel running in the foreground, blocking until ' + 'SIGTERM/SIGINT/Ctrl+C is received. Use this flag when running ' + 'tunnels from scripts, systemd services, or any non-interactive ' + 'context where the process would otherwise exit immediately.') + pam_cmd_parser.add_argument('--pid-file', required=False, dest='pid_file', action='store', + help='Write the process PID to a file when using --foreground. ' + 'Enables stopping the tunnel from another terminal via ' + 'kill -SIGTERM $(cat ). The file is removed on shutdown.') + pam_cmd_parser.add_argument('--run', '-R', required=False, dest='run_command', action='store', + help='Shell command to execute while tunnel is active. ' + 'The command runs via the system shell (supports pipes, redirects, env vars). ' + 'The tunnel is stopped and Commander exits with the command\'s exit code. ' + "Example: --run 'pg_dump -h localhost -p 5432 mydb > backup.sql'") + pam_cmd_parser.add_argument('--timeout', required=False, dest='connect_timeout', action='store', + type=int, default=30, + help='Seconds to wait for the tunnel to connect before giving up ' + '(used with --foreground, --background, and --run). Default: 30') + pam_cmd_parser.add_argument('--background', '-bg', required=False, dest='background', action='store_true', + help='Start the tunnel in a background process, wait for ' + 'connection readiness, then return control to the caller. ' + 'The tunnel continues running independently. Use --pid-file ' + 'to write the daemon PID for later shutdown. Use ' + "'pam tunnel list' / 'pam tunnel stop' from any session.") def get_parser(self): return PAMTunnelStartCommand.pam_cmd_parser @@ -658,8 +715,12 @@ def execute(self, params, **kwargs): target_host = kwargs.get('target_host') target_port = kwargs.get('target_port') - # If not provided via command line, prompt interactively + # If not provided via command line, prompt interactively (or error in batch mode) if not target_host: + if params.batch_mode: + raise CommandError('tunnel start', + 'Target host is required in non-interactive mode. ' + 'Use --target-host --target-port ') print(f"{bcolors.WARNING}This resource requires you to supply the target host and port.{bcolors.ENDC}") try: target_host = input(f"{bcolors.OKBLUE}Enter target hostname or IP address: {bcolors.ENDC}").strip() @@ -671,6 +732,10 @@ def execute(self, params, **kwargs): return if not target_port: + if params.batch_mode: + raise CommandError('tunnel start', + 'Target port is required in non-interactive mode. ' + 'Use --target-host --target-port ') try: target_port_str = input(f"{bcolors.OKBLUE}Enter target port number: {bcolors.ENDC}").strip() if not target_port_str: @@ -733,6 +798,117 @@ def execute(self, params, **kwargs): # Use Rust WebRTC implementation with configurable trickle ICE trickle_ice = not no_trickle_ice + + # Validate mutual exclusivity of mode flags + background = kwargs.get('background', False) + foreground = kwargs.get('foreground', False) + run_command = kwargs.get('run_command') + mode_flags = sum(bool(f) for f in [background, foreground, run_command]) + if mode_flags > 1: + raise CommandError('tunnel start', + '--foreground, --background, and --run are mutually exclusive. ' + 'Use only one at a time.') + + # --background: launch a separate Commander process with --foreground, + # then poll the file-based tunnel registry for readiness. + if background: + if not params.batch_mode: + print(f"\n{bcolors.OKBLUE}Note: --background is not needed inside the interactive shell.{bcolors.ENDC}") + print(f"{bcolors.OKBLUE}The tunnel is already running and will persist until you exit the shell.{bcolors.ENDC}") + print(f"{bcolors.OKBLUE}Use 'pam tunnel list' to see active tunnels, 'pam tunnel stop' to stop them.{bcolors.ENDC}\n") + return + + connect_timeout = kwargs.get('connect_timeout', 30) + pid_file = kwargs.get('pid_file') + + bg_cmd = [sys.executable, '-m', 'keepercommander'] + if params.config_filename: + bg_cmd.extend(['--config', os.path.abspath(params.config_filename)]) + if hasattr(params, 'server') and params.server: + bg_cmd.extend(['--server', params.server]) + + tunnel_parts = ['pam', 'tunnel', 'start', record_uid, + '--port', str(port), '--foreground', + '--timeout', str(connect_timeout)] + if host and host != '127.0.0.1': + tunnel_parts.extend(['--host', host]) + if target_host: + tunnel_parts.extend(['--target-host', str(target_host)]) + if target_port: + tunnel_parts.extend(['--target-port', str(target_port)]) + if pid_file: + tunnel_parts.extend(['--pid-file', pid_file]) + if no_trickle_ice: + tunnel_parts.append('--no-trickle-ice') + bg_cmd.append(' '.join(tunnel_parts)) + + print(f"{bcolors.OKBLUE}Starting tunnel in background...{bcolors.ENDC}") + try: + bg_proc = subprocess.Popen( + bg_cmd, + stdin=subprocess.DEVNULL, + stdout=subprocess.DEVNULL, + stderr=subprocess.PIPE, + start_new_session=True, + ) + except Exception as e: + raise CommandError('tunnel start', f'Failed to launch background process: {e}') + + # Parent waits longer than child to account for process startup time. + # The child's --timeout controls the actual WebRTC connection timeout. + bg_deadline = time.time() + connect_timeout + PARENT_GRACE_SECONDS + bg_info = None + while time.time() < bg_deadline: + for entry in list_registered_tunnels(clean_stale=False): + if entry.get('pid') == bg_proc.pid and entry.get('record_uid') == record_uid: + bg_info = entry + break + if bg_info: + break + poll_code = bg_proc.poll() + if poll_code is not None: + stderr_output = '' + try: + stderr_output = bg_proc.stderr.read().decode('utf-8', errors='replace').strip() + except Exception: + pass + print(f"{bcolors.FAIL}Background tunnel process exited before tunnel was ready " + f"(code {poll_code}){bcolors.ENDC}") + if stderr_output: + print(f"{bcolors.FAIL}{stderr_output}{bcolors.ENDC}") + elif poll_code == 0: + print(f"{bcolors.FAIL}Process exited before registry registration. " + f"Check WebRTC connectivity and gateway logs.{bcolors.ENDC}") + return + time.sleep(0.5) + + if not bg_info: + print(f"{bcolors.FAIL}Tunnel did not become ready within the timeout{bcolors.ENDC}") + try: + bg_proc.terminate() + except Exception: + pass + return + + print(f"\n{bcolors.OKGREEN}Tunnel running in background{bcolors.ENDC}") + print(f" Record: {bg_info.get('record_title') or record_uid}") + if bg_info.get('tube_id'): + print(f" Tube ID: {bg_info['tube_id']}") + print(f" Listening: {host}:{port}") + print(f" Daemon PID: {bg_proc.pid}") + if pid_file: + print(f" PID file: {pid_file}") + print(f"\n{bcolors.OKGREEN}To stop: pam tunnel stop {record_uid} or " + f"kill -SIGTERM {bg_proc.pid}{bcolors.ENDC}") + if pid_file: + print(f" or: kill -SIGTERM $(cat {pid_file})") + print(f"{bcolors.OKBLUE}Use 'pam tunnel list' from any Commander session " + f"to see this tunnel.{bcolors.ENDC}") + if platform.system() == 'Windows': + print(f"{bcolors.WARNING}Note: On Windows, tunnel stop uses hard termination. " + f"WebRTC cleanup is best-effort.{bcolors.ENDC}") + return + result = start_rust_tunnel(params, record_uid, gateway_uid, host, port, seed, target_host, target_port, socks, trickle_ice, record.title, allow_supply_host=allow_supply_host, two_factor_value=two_factor_value) if result and result.get("success"): @@ -794,6 +970,13 @@ def _close_on_lease_expiry(_tube_id=tube_id, _record_uid=record_uid): # keeper_pam_webrtc_rs provides a hard-kill that # also drops the local listener. # tube_registry.close_tube(_tube_id, reason=CloseConnectionReasons.Normal) + # Wake any --foreground / --run blocking wait so the + # process self-terminates instead of hanging past lease + # expiry. Default interactive mode does not register + # an event here — it has no blocking wait to break. + shutdown_event = _LEASE_SHUTDOWN_EVENTS_BY_RECORD.get(_record_uid) + if shutdown_event is not None: + shutdown_event.set() except Exception as e: logging.debug(f"Lease-expiry tunnel notice failed: {e}") finally: @@ -806,7 +989,181 @@ def _close_on_lease_expiry(_tube_id=tube_id, _record_uid=record_uid): _LEASE_EXPIRY_TIMERS_BY_RECORD[record_uid] = timer timer.start() # The helper will show endpoint table when local socket is actually listening - pass + connect_timeout = kwargs.get('connect_timeout', 30) + + if run_command: + run_tube_id = result.get("tube_id") + run_tube_registry = result.get("tube_registry") + + print(f"{bcolors.OKBLUE}Waiting for tunnel to connect (timeout: {connect_timeout}s)...{bcolors.ENDC}") + conn_status = wait_for_tunnel_connection(result, timeout=connect_timeout, show_progress=False) + + if not conn_status.get("connected"): + err = conn_status.get("error", "Connection failed") + print(f"{bcolors.FAIL}Tunnel did not connect: {err}{bcolors.ENDC}") + if run_tube_registry and run_tube_id: + try: + run_tube_registry.close_tube(run_tube_id, reason=CloseConnectionReasons.Normal) + unregister_tunnel_session(run_tube_id) + except Exception: + pass + return + + try: + register_tunnel(os.getpid(), record_uid, run_tube_id, host, port, + target_host, target_port, mode='run', + record_title=record.title if record else None) + except CommandError as reg_err: + print(f"{bcolors.FAIL}{reg_err}{bcolors.ENDC}") + if run_tube_registry and run_tube_id: + try: + run_tube_registry.close_tube(run_tube_id, reason=CloseConnectionReasons.Normal) + unregister_tunnel_session(run_tube_id) + except Exception: + pass + return + + print(f"{bcolors.OKGREEN}Tunnel ready{bcolors.ENDC} {host}:{port} -> {target_host}:{target_port}") + if platform.system() == 'Windows': + print(f"{bcolors.WARNING}Note: On Windows, tunnel stop uses hard termination. " + f"WebRTC cleanup is best-effort.{bcolors.ENDC}") + print(f"{bcolors.OKBLUE}Running:{bcolors.ENDC} {run_command}\n") + + cmd_exit = 1 + try: + # shell=True is intentional: --run commands need shell features (pipes, redirects, env vars). + # The user is already authenticated to Keeper and controls the command string. + proc = subprocess.run(run_command, shell=True) + cmd_exit = proc.returncode if proc.returncode is not None else 1 + except KeyboardInterrupt: + cmd_exit = 130 + except Exception as run_err: + logging.warning("Error running command: %s", run_err) + cmd_exit = 1 + finally: + unregister_tunnel() + print(f"\n{bcolors.OKBLUE}Stopping tunnel {run_tube_id or record_uid}...{bcolors.ENDC}") + try: + if run_tube_registry and run_tube_id: + run_tube_registry.close_tube(run_tube_id, reason=CloseConnectionReasons.Normal) + unregister_tunnel_session(run_tube_id) + print(f"{bcolors.OKGREEN}Tunnel stopped.{bcolors.ENDC}") + except Exception as stop_err: + logging.warning("Error stopping tunnel: %s", stop_err) + + raise SystemExit(cmd_exit) + + elif foreground: + if not params.batch_mode: + print(f"\n{bcolors.OKBLUE}Note: --foreground is not needed inside the interactive shell.{bcolors.ENDC}") + print(f"{bcolors.OKBLUE}The tunnel is already running and will persist until you exit the shell.{bcolors.ENDC}") + print(f"{bcolors.OKBLUE}Use 'pam tunnel list' to see active tunnels, 'pam tunnel stop' to stop them.{bcolors.ENDC}\n") + else: + fg_tube_id = result.get("tube_id") + fg_tube_registry = result.get("tube_registry") + fg_shutdown = threading.Event() + pid_file = kwargs.get('pid_file') + # Wire lease-expiry callback to break out of fg_shutdown.wait() + # if the workflow lease expires while we're blocking. Cleared + # in the finally block below. + _LEASE_SHUTDOWN_EVENTS_BY_RECORD[record_uid] = fg_shutdown + + def _fg_signal_handler(signum, _frame): + sig_name = signal.Signals(signum).name if hasattr(signal, 'Signals') else str(signum) + print(f"\n{bcolors.WARNING}Received {sig_name}, stopping tunnel...{bcolors.ENDC}") + fg_shutdown.set() + + prev_sigterm = signal.signal(signal.SIGTERM, _fg_signal_handler) + prev_sigint = signal.signal(signal.SIGINT, _fg_signal_handler) + prev_sighup = None + if hasattr(signal, 'SIGHUP'): + prev_sighup = signal.signal(signal.SIGHUP, _fg_signal_handler) + + print(f"{bcolors.OKBLUE}Waiting for tunnel to connect (timeout: {connect_timeout}s)...{bcolors.ENDC}") + conn_status = wait_for_tunnel_connection(result, timeout=connect_timeout, show_progress=False) + + if not conn_status.get("connected"): + signal.signal(signal.SIGTERM, prev_sigterm) + signal.signal(signal.SIGINT, prev_sigint) + if prev_sighup is not None: + signal.signal(signal.SIGHUP, prev_sighup) + err = conn_status.get("error", "Connection failed") + print(f"{bcolors.FAIL}Tunnel did not connect: {err}{bcolors.ENDC}") + if fg_tube_registry and fg_tube_id: + try: + fg_tube_registry.close_tube(fg_tube_id, reason=CloseConnectionReasons.Normal) + unregister_tunnel_session(fg_tube_id) + except Exception: + pass + return + + if pid_file: + try: + with open(pid_file, 'w') as pf: + pf.write(str(os.getpid())) + except Exception as e: + logging.warning("Could not write PID file '%s': %s", pid_file, e) + pid_file = None + + try: + register_tunnel(os.getpid(), record_uid, fg_tube_id, host, port, + target_host, target_port, mode='foreground', + record_title=record.title if record else None) + except CommandError as reg_err: + print(f"{bcolors.FAIL}{reg_err}{bcolors.ENDC}") + signal.signal(signal.SIGTERM, prev_sigterm) + signal.signal(signal.SIGINT, prev_sigint) + if prev_sighup is not None: + signal.signal(signal.SIGHUP, prev_sighup) + if fg_tube_registry and fg_tube_id: + try: + fg_tube_registry.close_tube(fg_tube_id, reason=CloseConnectionReasons.Normal) + unregister_tunnel_session(fg_tube_id) + except Exception: + pass + return + + print(f"\n{bcolors.OKGREEN}Tunnel running in foreground mode{bcolors.ENDC}") + print(f" Record: {record_uid}") + if fg_tube_id: + print(f" Tube ID: {fg_tube_id}") + print(f" Listening: {host}:{port}") + print(f" PID: {os.getpid()}") + if pid_file: + print(f" PID file: {pid_file}") + print(f"\n{bcolors.OKGREEN}To stop: kill -SIGTERM {os.getpid()} (or Ctrl+C) or pam tunnel stop {record_uid}{bcolors.ENDC}\n") + if platform.system() == 'Windows': + print(f"{bcolors.WARNING}Note: On Windows, tunnel stop uses hard termination. " + f"WebRTC cleanup is best-effort.{bcolors.ENDC}\n") + + try: + fg_shutdown.wait() + except KeyboardInterrupt: + pass + finally: + _LEASE_SHUTDOWN_EVENTS_BY_RECORD.pop(record_uid, None) + unregister_tunnel() + signal.signal(signal.SIGTERM, prev_sigterm) + signal.signal(signal.SIGINT, prev_sigint) + if prev_sighup is not None: + signal.signal(signal.SIGHUP, prev_sighup) + print(f"\n{bcolors.OKBLUE}Stopping tunnel {fg_tube_id or record_uid}...{bcolors.ENDC}") + try: + if fg_tube_registry and fg_tube_id: + fg_tube_registry.close_tube(fg_tube_id, reason=CloseConnectionReasons.Normal) + unregister_tunnel_session(fg_tube_id) + else: + stop_cmd = PAMTunnelStopCommand() + stop_cmd.execute(params, uid=record_uid) + print(f"{bcolors.OKGREEN}Tunnel stopped.{bcolors.ENDC}") + except Exception as fg_err: + logging.warning("Error stopping tunnel during foreground shutdown: %s", fg_err) + finally: + if pid_file: + try: + os.remove(pid_file) + except OSError: + pass else: # Print failure message error_msg = result.get("error", "Unknown error") if result else "Failed to start tunnel" diff --git a/keepercommander/commands/tunnel_registry.py b/keepercommander/commands/tunnel_registry.py new file mode 100644 index 000000000..4b8c9f850 --- /dev/null +++ b/keepercommander/commands/tunnel_registry.py @@ -0,0 +1,239 @@ +# _ __ +# | |/ /___ ___ _ __ ___ _ _ ® +# | ' /keeper-tunnel-sessions/.json so ``pam tunnel list`` and +``pam tunnel stop`` can discover tunnels across Commander processes. + +The registry lives under the system temp directory rather than ~/.keeper/ +so it survives credential removal/replacement. Temp directories are +cleared on reboot, matching tunnel lifecycle (tunnels do not survive reboots). +""" + +from __future__ import annotations + +import json +import logging +import os +import platform +import signal +import tempfile +import time +from pathlib import Path + +from ..error import CommandError + +logger = logging.getLogger(__name__) + +#: Parent poll allowance beyond the child's ``--timeout`` for ``--background`` (process startup). +PARENT_GRACE_SECONDS = 10 + +# Not thread-safe; concurrent first-access may double-clean (harmless, idempotent). +_registry_dir_initialized = False + + +def normalize_bind_host(host) -> str: + """Normalize host for duplicate local bind detection (best-effort). + + Only handles common aliases; ``0.0.0.0`` vs ``127.0.0.1`` conflicts + are caught at the OS bind level, not here. + """ + if host is None: + return '' + h = str(host).strip().lower() + if h == 'localhost': + return '127.0.0.1' + return h + + +def tunnel_registry_dir() -> Path: + """Return (and create) the tunnel session registry directory.""" + global _registry_dir_initialized + base = Path(tempfile.gettempdir()) / 'keeper-tunnel-sessions' + existed = base.exists() + base.mkdir(parents=True, exist_ok=True) + if os.name != 'nt': + try: + os.chmod(base, 0o700) + except OSError: + pass + if not _registry_dir_initialized: + _registry_dir_initialized = True + if existed: + _clean_stale_registry_files(base) + return base + + +def _clean_stale_registry_files(reg_dir: Path) -> None: + """Remove dead or corrupt JSON entries under reg_dir.""" + try: + for fname in os.listdir(reg_dir): + if not fname.endswith('.json'): + continue + fpath = reg_dir / fname + try: + with open(fpath, encoding='utf-8') as f: + data = json.load(f) + pid = data.get('pid') + if pid and is_pid_alive(pid): + continue + os.remove(fpath) + except Exception as exc: + logger.debug('Removing corrupt tunnel registry file %s: %s', fpath, exc) + try: + os.remove(fpath) + except OSError: + pass + except OSError: + pass + + +def register_tunnel( + pid, + record_uid, + tube_id, + host, + port, + target_host=None, + target_port=None, + mode='foreground', + record_title=None, +): + """Write a JSON file for an active tunnel so other processes can see it. + + Uses atomic write (temp file + rename) so readers never see partial data. + Stale entries are cleaned before duplicate checks (Issue 6 / 7). + """ + existing = list_registered_tunnels(clean_stale=True) + nh = normalize_bind_host(host) + try: + p_int = int(port) + except (TypeError, ValueError): + p_int = None + for entry in existing: + if entry.get('pid') == pid: + continue + try: + entry_port = int(entry.get('port') or 0) + except (TypeError, ValueError): + continue + if p_int is not None and normalize_bind_host(entry.get('host')) == nh and entry_port == p_int: + raise CommandError( + 'pam tunnel start', + f'Port {port} on {host} is already in use by tunnel PID {entry.get("pid")} ' + f'(record {entry.get("record_uid")}). ' + f'Use "pam tunnel stop {entry.get("record_uid")}" first.', + ) + + reg_dir = tunnel_registry_dir() + path = reg_dir / f'{pid}.json' + data = { + 'pid': pid, + 'record_uid': record_uid, + 'tube_id': tube_id, + 'host': host, + 'port': port, + 'target_host': target_host, + 'target_port': target_port, + 'mode': mode, + 'record_title': record_title, + 'started': time.strftime('%Y-%m-%d %H:%M:%S'), + } + tmp_path = path.with_suffix('.json.tmp') + try: + with open(tmp_path, 'w', encoding='utf-8') as f: + json.dump(data, f) + os.replace(tmp_path, path) + except Exception as exc: + logger.debug('Could not write tunnel registry file %s: %s', path, exc) + try: + os.remove(tmp_path) + except OSError: + pass + + +def unregister_tunnel(pid=None): + """Remove the registry file for a tunnel (defaults to current PID).""" + pid = pid or os.getpid() + path = tunnel_registry_dir() / f'{pid}.json' + try: + os.remove(path) + except OSError: + pass + + +def is_pid_alive(pid) -> bool: + """Return True if a process with the given PID is still running.""" + if os.name == 'nt': + import ctypes + kernel32 = ctypes.windll.kernel32 + handle = kernel32.OpenProcess(0x100000, False, pid) # SYNCHRONIZE + if handle: + kernel32.CloseHandle(handle) + return True + return False + try: + os.kill(pid, 0) + return True + except OSError: + return False + + +def stop_tunnel_process(pid: int) -> bool: + """Send termination to a tunnel process. Returns True if a signal was sent. + + On Unix, sends SIGTERM for graceful shutdown (target cleans registry/WebRTC). + On Windows, SIGTERM maps to TerminateProcess; the registry row is removed + here because the target cannot run cleanup handlers. + """ + if not is_pid_alive(pid): + return False + try: + if platform.system() == 'Windows': + unregister_tunnel(pid) + os.kill(pid, signal.SIGTERM) + return True + except (ProcessLookupError, PermissionError, OSError): + return False + + +def list_registered_tunnels(clean_stale=True): + """Read registry files. Optionally remove dead or corrupt entries. + + Returns a list of dicts for tunnels whose owning process is still alive. + """ + reg_dir = tunnel_registry_dir() + result = [] + try: + fnames = os.listdir(reg_dir) + except OSError: + return result + for fname in fnames: + if not fname.endswith('.json'): + continue + fpath = reg_dir / fname + try: + with open(fpath, encoding='utf-8') as f: + data = json.load(f) + pid = data.get('pid') + if pid and is_pid_alive(pid): + result.append(data) + elif clean_stale: + os.remove(fpath) + except Exception as exc: + logger.debug('Removing corrupt tunnel registry file %s: %s', fpath, exc) + if clean_stale: + try: + os.remove(fpath) + except OSError: + pass + return result diff --git a/keepercommander/commands/utils.py b/keepercommander/commands/utils.py index c35f5dc07..1df41bc88 100644 --- a/keepercommander/commands/utils.py +++ b/keepercommander/commands/utils.py @@ -2006,7 +2006,11 @@ def execute(self, params, **kwargs): show_legacy = kwargs.get('legacy', False) if not help_commands: from ..cli import display_command_help - display_command_help(params.enterprise_ec_key, show_legacy=show_legacy) + display_command_help( + params.enterprise_ec_key, + show_legacy=show_legacy, + show_keeper_drive=not params.is_feature_disallowed('keeper_drive') + ) return if isinstance(help_commands, list) and len(help_commands) > 0: diff --git a/keepercommander/keeper_drive/__init__.py b/keepercommander/keeper_drive/__init__.py new file mode 100644 index 000000000..f9f128f18 --- /dev/null +++ b/keepercommander/keeper_drive/__init__.py @@ -0,0 +1,78 @@ +""" +KeeperDrive API package — single public facade. + +Every symbol that the commands layer (or any external consumer) needs is +available here via ``from keepercommander import keeper_drive as _kd``. + +Imports are **lazy** (using module-level ``__getattr__``) to avoid circular +dependencies during early module loading (e.g. ``api.py`` → ``sync_down.py`` +→ this package → submodules that import ``api``). +""" + +import importlib as _importlib + +_SUBMODULE_MAP = { + 'permissions': [ + 'FolderUsageType', 'SetBooleanValue', 'ROLE_NAME_MAP', + 'get_folder_permissions_for_role', 'get_record_permissions_for_role', + 'resolve_role_name', + ], + 'common': [ + 'get_folder_key', 'get_record_key', 'get_user_public_key', + 'get_record_from_cache', 'parse_sharing_status', 'get_record_key_type', + 'encrypt_record_key_for_folder', 'encrypt_for_recipient', + 'handle_share_invite', 'resolve_user_uid_bytes', + 'load_user_public_key', 'parse_folder_access_result', + 'resolve_team_uid_bytes', 'resolve_team_identifier', + 'get_team_keys', 'encrypt_for_team', + ], + 'folder_api': [ + 'create_folder_data', 'encrypt_folder_key', + 'folder_add_v3', 'create_folder_v3', 'create_folders_batch_v3', + 'folder_access_update_v3', 'grant_folder_access_v3', + 'update_folder_access_v3', 'revoke_folder_access_v3', + 'manage_folder_access_batch_v3', + 'folder_update_v3', 'resolve_folder_identifier', + 'update_folder_v3', 'update_folders_batch_v3', + 'get_folder_access_v3', + ], + 'record_api': [ + 'create_record_data_v3', 'record_add_v3', 'record_update_v3', + 'create_record_v3', 'update_record_v3', 'create_records_batch_v3', + 'get_record_details_v3', 'get_record_accesses_v3', + 'share_record_v3', 'update_record_share_v3', 'unshare_record_v3', + 'batch_update_record_shares_v3', 'batch_create_record_shares_v3', + 'batch_unshare_records_v3', + 'transfer_record_ownership_v3', 'transfer_records_ownership_batch_v3', + ], + 'folder_record_api': [ + 'folder_record_update_v3', + 'add_record_to_folder_v3', 'update_record_in_folder_v3', + 'remove_record_from_folder_v3', + 'move_record_v3', 'manage_folder_records_batch_v3', + ], + 'removal_api': [ + 'remove_record_v3', 'remove_folder_v3', + 'find_kd_folders_for_record', + 'resolve_kd_record_uid', 'resolve_kd_folder_uid', + ], +} + +_LAZY_REGISTRY = {} +for _mod, _names in _SUBMODULE_MAP.items(): + for _name in _names: + _LAZY_REGISTRY[_name] = _mod + + +def __getattr__(name): + if name in _LAZY_REGISTRY: + submod = _LAZY_REGISTRY[name] + mod = _importlib.import_module(f'.{submod}', __name__) + val = getattr(mod, name) + globals()[name] = val + return val + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") + + +def __dir__(): + return list(globals().keys()) + list(_LAZY_REGISTRY.keys()) diff --git a/keepercommander/keeper_drive/common.py b/keepercommander/keeper_drive/common.py new file mode 100644 index 000000000..a54a5f8e6 --- /dev/null +++ b/keepercommander/keeper_drive/common.py @@ -0,0 +1,517 @@ +""" +KeeperDrive — shared utilities used across all API modules. + +DRY: centralises every repeated lookup pattern so no other module +in this package ever needs to inline key/user/response logic. +""" + +import logging +from typing import Optional, Dict, Any, Tuple + +from .. import utils, crypto, api +from ..proto import folder_pb2, record_sharing_pb2 +from ..error import KeeperApiError + +logger = logging.getLogger(__name__) + + +# ═══════════════════════════════════════════════════════════════════════════ +# Key lookup helpers (previously copy-pasted 9+ / 3+ times) +# ═══════════════════════════════════════════════════════════════════════════ + +def get_folder_key(params, folder_uid: str, raise_on_missing: bool = True) -> Optional[bytes]: + """Retrieve the unencrypted folder key from keeper_drive_folders or subfolder_cache.""" + for cache in (getattr(params, 'keeper_drive_folders', {}), + getattr(params, 'subfolder_cache', {})): + obj = cache.get(folder_uid) + if obj and 'folder_key_unencrypted' in obj: + return obj['folder_key_unencrypted'] + if raise_on_missing: + raise ValueError( + f"Folder key not found for folder {folder_uid}. " + f"Try running 'sync-down' first.") + return None + + +def get_record_key(params, record_uid: str, raise_on_missing: bool = True) -> Optional[bytes]: + """Retrieve the unencrypted record key from keeper_drive_records or record_cache.""" + for cache in (getattr(params, 'keeper_drive_records', {}), + getattr(params, 'record_cache', {})): + obj = cache.get(record_uid) + if obj and 'record_key_unencrypted' in obj: + return obj['record_key_unencrypted'] + if raise_on_missing: + raise ValueError( + f"Record key not found for record {record_uid}. " + f"Record may not exist or is not accessible.") + return None + + +def get_record_from_cache(params, record_uid: str) -> Optional[dict]: + """Get a record dict from keeper_drive_records or record_cache.""" + for attr in ('keeper_drive_records', 'record_cache'): + cache = getattr(params, attr, {}) + if record_uid in cache: + return cache[record_uid] + return None + + +def get_record_key_type(params, record_uid: str) -> Optional[int]: + """Return the record key type if available (legacy AES-CBC vs AES-GCM).""" + meta = getattr(params, 'meta_data_cache', {}).get(record_uid) + if meta and 'record_key_type' in meta: + return meta['record_key_type'] + return None + + +def encrypt_record_key_for_folder( + record_key: bytes, + encryption_key: bytes, + record_key_type: Optional[int] +) -> Tuple[bytes, int]: + """Encrypt record_key with encryption_key; returns (ciphertext, key_type).""" + if record_key_type == folder_pb2.encrypted_by_data_key: + return crypto.encrypt_aes_v1(record_key, encryption_key), folder_pb2.encrypted_by_data_key + if record_key_type == folder_pb2.encrypted_by_data_key_gcm: + return crypto.encrypt_aes_v2(record_key, encryption_key), folder_pb2.encrypted_by_data_key_gcm + return crypto.encrypt_aes_v2(record_key, encryption_key), folder_pb2.encrypted_by_data_key_gcm + + +# ═══════════════════════════════════════════════════════════════════════════ +# Asymmetric encryption helper (previously inlined 5+ times) +# ═══════════════════════════════════════════════════════════════════════════ + +def encrypt_for_recipient(plaintext_key: bytes, public_key, use_ecc: bool) -> bytes: + """Encrypt *plaintext_key* with the recipient's public key.""" + if use_ecc: + return crypto.encrypt_ec(plaintext_key, public_key) + return crypto.encrypt_rsa(plaintext_key, public_key) + + +# ═══════════════════════════════════════════════════════════════════════════ +# User resolution (previously copy-pasted 4+ times) +# ═══════════════════════════════════════════════════════════════════════════ + +def resolve_user_uid_bytes(params, user_identifier: str) -> Optional[bytes]: + """Resolve an email or base64-url UID to raw UID bytes. + + Lookup order: user_cache → enterprise users → base64 decode. + Returns None when the identifier cannot be resolved. + """ + is_email = '@' in user_identifier + + if is_email: + lower = user_identifier.lower() + if hasattr(params, 'user_cache'): + for uid_str, username in params.user_cache.items(): + if username.lower() == lower: + return utils.base64_url_decode(uid_str) + if hasattr(params, 'enterprise') and params.enterprise: + for user in params.enterprise.get('users', []): + if user.get('username', '').lower() == lower: + if user.get('user_account_uid'): + return utils.base64_url_decode(user['user_account_uid']) + break + return None + + try: + return utils.base64_url_decode(user_identifier) + except Exception: + return None + + + +def resolve_team_uid_bytes(params, team_identifier: str) -> Optional[bytes]: + """Resolve a team name or base64-url team UID to raw UID bytes. + """ + if not team_identifier: + return None + + team_cache = getattr(params, 'team_cache', None) or {} + if team_identifier in team_cache: + return utils.base64_url_decode(team_identifier) + lower = team_identifier.lower() + for uid, t in team_cache.items(): + name = t.get('name') if isinstance(t, dict) else getattr(t, 'name', None) + if name and name.lower() == lower: + return utils.base64_url_decode(uid) + + try: + share_objects = api.get_share_objects(params).get('teams', {}) or {} + except Exception: + share_objects = {} + if team_identifier in share_objects: + return utils.base64_url_decode(team_identifier) + for uid, t in share_objects.items(): + name = (t.get('name') if isinstance(t, dict) else None) or '' + if name.lower() == lower: + return utils.base64_url_decode(uid) + + if len(share_objects) >= 500 or getattr(params, 'available_team_cache', None) is None: + try: + api.load_available_teams(params) + except Exception: + pass + for t in (getattr(params, 'available_team_cache', None) or []): + uid = t.get('team_uid') + name = t.get('team_name', '') + if uid and (uid == team_identifier or name.lower() == lower): + return utils.base64_url_decode(uid) + + try: + decoded = utils.base64_url_decode(team_identifier) + return decoded if decoded else None + except Exception: + return None + + +def resolve_team_identifier(params, team_identifier: str) -> Optional[Tuple[str, bytes]]: + """Resolve a team name/UID to ``(team_uid_b64, team_uid_bytes)`` or ``None``.""" + uid_bytes = resolve_team_uid_bytes(params, team_identifier) + if not uid_bytes: + return None + return utils.base64_url_encode(uid_bytes), uid_bytes + + +def get_team_keys(params, team_uid_b64: str): + """Return the cached ``PublicKeys`` for a team, loading them if needed. + """ + from ..params import PublicKeys + + api.load_team_keys(params, [team_uid_b64]) + keys = params.key_cache.get(team_uid_b64) + + has_asym = bool(keys and (getattr(keys, 'rsa', None) or getattr(keys, 'ec', None))) + if not has_asym: + try: + rq = {'command': 'team_get_keys', 'teams': [team_uid_b64]} + rs = api.communicate(params, rq) + existing_aes = getattr(keys, 'aes', None) if keys else None + rsa_pub = b'' + ec_pub = b'' + for tk in (rs or {}).get('keys', []): + if tk.get('team_uid') != team_uid_b64 or 'key' not in tk: + continue + key_type = tk.get('type') + encrypted_key = utils.base64_url_decode(tk['key']) + if key_type == -1: + ec_pub = encrypted_key + elif key_type == -3: + rsa_pub = encrypted_key + if rsa_pub or ec_pub: + params.key_cache[team_uid_b64] = PublicKeys( + aes=existing_aes, rsa=rsa_pub, ec=ec_pub) + keys = params.key_cache[team_uid_b64] + except Exception as exc: + logger.debug("team_get_keys fallback failed for %s: %s", + team_uid_b64, exc) + + if not keys: + raise ValueError(f"Team key not found for team {team_uid_b64}") + return keys + + +def encrypt_for_team(plaintext_key: bytes, team_keys, + prefer_aes: bool = False, + forbid_rsa: bool = False) -> Tuple[bytes, int]: + """Encrypt *plaintext_key* using the best available team key. + """ + aes = getattr(team_keys, 'aes', None) + ec_bytes = getattr(team_keys, 'ec', None) + rsa_bytes = getattr(team_keys, 'rsa', None) + + if prefer_aes and aes: + if forbid_rsa: + return (crypto.encrypt_aes_v2(plaintext_key, aes), + folder_pb2.encrypted_by_data_key_gcm) + return (crypto.encrypt_aes_v1(plaintext_key, aes), + folder_pb2.encrypted_by_data_key) + + if rsa_bytes and not forbid_rsa: + rsa_key = crypto.load_rsa_public_key(rsa_bytes) + return (crypto.encrypt_rsa(plaintext_key, rsa_key), + folder_pb2.encrypted_by_public_key) + + if ec_bytes: + ec_key = crypto.load_ec_public_key(ec_bytes) + return (crypto.encrypt_ec(plaintext_key, ec_key), + folder_pb2.encrypted_by_public_key_ecc) + + raise ValueError("No public key found for team") + + +def resolve_uid_email(params, user_identifier: str) -> Tuple[Optional[bytes], str]: + """Resolve user identifier and return (uid_bytes, email_str). + + If *user_identifier* is a UID, tries to find its email. + """ + uid_bytes = resolve_user_uid_bytes(params, user_identifier) + is_email = '@' in user_identifier + + if is_email: + return uid_bytes, user_identifier + + email = user_identifier + if uid_bytes and hasattr(params, 'user_cache'): + for uid_str, username in params.user_cache.items(): + if utils.base64_url_decode(uid_str) == uid_bytes: + email = username + break + return uid_bytes, email + + +# ═══════════════════════════════════════════════════════════════════════════ +# Public key resolution (the 175-line function, kept as single source) +# ═══════════════════════════════════════════════════════════════════════════ + +def get_user_public_key(params, recipient_email, require_uid=True): + """Get user's public key from cache / enterprise / API. + + Returns: (public_key_object, use_ecc, user_uid_bytes, needs_invite) + """ + from ..proto import APIRequest_pb2 + from ..proto.record_pb2 import GetShareObjectsRequest, GetShareObjectsResponse + from ..params import PublicKeys + + recipient_public_key = None + use_ecc = False + needs_invite = False + recipient_uid_bytes = None + + def _load_pk(rsa_bytes, ec_bytes): + nonlocal use_ecc + if rsa_bytes: + use_ecc = False + return crypto.load_rsa_public_key(rsa_bytes) + if ec_bytes: + use_ecc = True + return crypto.load_ec_public_key(ec_bytes) + return None + + cache_key = recipient_email.lower() + cached = params.key_cache.get(recipient_email) or params.key_cache.get(cache_key) + if cached: + recipient_public_key = _load_pk(cached.rsa, cached.ec) + + if not recipient_public_key and hasattr(params, 'enterprise') and params.enterprise: + for user in params.enterprise.get('users', []): + if user.get('username', '').lower() == recipient_email.lower(): + rsa_b = utils.base64_url_decode(user['public_key']) if user.get('public_key') else None + ec_b = utils.base64_url_decode(user['public_key_ecc']) if user.get('public_key_ecc') else None + recipient_public_key = _load_pk(rsa_b, ec_b) + break + + if not recipient_public_key: + recipient_public_key, use_ecc, recipient_uid_bytes, needs_invite = \ + _fetch_public_key_from_api(params, recipient_email, _load_pk, + APIRequest_pb2, GetShareObjectsRequest, + GetShareObjectsResponse, PublicKeys) + + if not recipient_uid_bytes: + recipient_uid_bytes = resolve_user_uid_bytes(params, recipient_email) + + if not recipient_uid_bytes and require_uid: + try: + rq = GetShareObjectsRequest() + rs = api.communicate_rest(params, rq, 'vault/get_share_objects', + rs_type=GetShareObjectsResponse) + if not hasattr(params, 'user_cache'): + params.user_cache = {} + for ul in (rs.shareRelationships, rs.shareFamilyUsers, + rs.shareEnterpriseUsers, rs.shareMCEnterpriseUsers): + for su in ul: + if su.userAccountUid and su.username: + su_uid_b64 = utils.base64_url_encode(su.userAccountUid) + params.user_cache[su_uid_b64] = su.username + if su.username.lower() == recipient_email.lower(): + recipient_uid_bytes = (su.userAccountUid if isinstance(su.userAccountUid, bytes) + else utils.base64_url_decode(su.userAccountUid)) + except Exception as e: + logger.debug('Failed to resolve user UID for %s: %s', + recipient_email, e) + + return recipient_public_key, use_ecc, recipient_uid_bytes, needs_invite + + +def _fetch_public_key_from_api(params, recipient_email, _load_pk, + APIRequest_pb2, GetShareObjectsRequest, + GetShareObjectsResponse, PublicKeys): + """Internal helper for the get_public_keys + share-objects fallback chain.""" + recipient_public_key = None + use_ecc = False + recipient_uid_bytes = None + needs_invite = False + + try: + lookup_email = recipient_email.lower() + rq = APIRequest_pb2.GetPublicKeysRequest() + rq.usernames.append(lookup_email) + rs = api.communicate_rest(params, rq, 'vault/get_public_keys', + rs_type=APIRequest_pb2.GetPublicKeysResponse) + for pk in rs.keyResponses: + if pk.username.lower() == recipient_email.lower(): + if pk.errorCode in ('', 'success'): + recipient_public_key = _load_pk(pk.publicKey or None, pk.publicEccKey or None) + if recipient_public_key: + params.key_cache[recipient_email] = PublicKeys( + aes=None, rsa=pk.publicKey or None, ec=pk.publicEccKey or None) + elif pk.errorCode == 'no_active_share_exist': + recipient_public_key, use_ecc, recipient_uid_bytes, needs_invite = \ + _retry_with_canonical_email( + params, recipient_email, _load_pk, + APIRequest_pb2, GetShareObjectsRequest, + GetShareObjectsResponse, PublicKeys) + break + except Exception as e: + logger.debug('Failed to get the user for %s: %s', + recipient_email, e) + + return recipient_public_key, use_ecc, recipient_uid_bytes, needs_invite + + +def _retry_with_canonical_email(params, recipient_email, _load_pk, + APIRequest_pb2, GetShareObjectsRequest, + GetShareObjectsResponse, PublicKeys): + """Retry get_public_keys with the server-stored canonical email.""" + recipient_public_key = None + use_ecc = False + recipient_uid_bytes = None + needs_invite = False + + try: + rq2 = GetShareObjectsRequest() + rs2 = api.communicate_rest(params, rq2, 'vault/get_share_objects', + rs_type=GetShareObjectsResponse) + canonical_email = None + for ul in (rs2.shareRelationships, rs2.shareFamilyUsers, + rs2.shareEnterpriseUsers, rs2.shareMCEnterpriseUsers): + for su in ul: + if su.username.lower() == recipient_email.lower(): + canonical_email = su.username + if su.userAccountUid: + uid = su.userAccountUid + recipient_uid_bytes = uid if isinstance(uid, bytes) else utils.base64_url_decode(uid) + break + if canonical_email: + break + + if canonical_email and canonical_email != recipient_email.lower(): + rq3 = APIRequest_pb2.GetPublicKeysRequest() + rq3.usernames.append(canonical_email) + rs3 = api.communicate_rest(params, rq3, 'vault/get_public_keys', + rs_type=APIRequest_pb2.GetPublicKeysResponse) + for pk3 in rs3.keyResponses: + if pk3.username.lower() == recipient_email.lower(): + if pk3.errorCode in ('', 'success'): + recipient_public_key = _load_pk(pk3.publicKey or None, pk3.publicEccKey or None) + if recipient_public_key: + params.key_cache[recipient_email] = PublicKeys( + aes=None, rsa=pk3.publicKey or None, ec=pk3.publicEccKey or None) + else: + needs_invite = True + break + else: + needs_invite = True + except Exception: + needs_invite = True + + return recipient_public_key, use_ecc, recipient_uid_bytes, needs_invite + + +# ═══════════════════════════════════════════════════════════════════════════ +# Share invite helper (previously duplicated in share + update_share) +# ═══════════════════════════════════════════════════════════════════════════ + +def handle_share_invite(params, recipient_email, needs_invite): + """Send a share invite if *needs_invite* is True; raise ValueError.""" + if not needs_invite: + return + try: + from ..proto import APIRequest_pb2 + rq = APIRequest_pb2.SendShareInviteRequest() + rq.email = recipient_email + api.communicate_rest(params, rq, 'vault/send_share_invite') + raise ValueError( + f"Share invitation has been sent to '{recipient_email}'. " + f"Please repeat this command once the invitation is accepted.") + except ValueError: + raise + except Exception: + raise ValueError( + f"No sharing relationship with '{recipient_email}'. " + f"Please invite them to share first, then repeat this command.") + + +# ═══════════════════════════════════════════════════════════════════════════ +# Folder access response parsing (previously duplicated 3 times) +# ═══════════════════════════════════════════════════════════════════════════ + +def parse_folder_access_result(response, folder_uid, user_uid, default_message): + """Parse a FolderAccessResponse into a standard result dict.""" + if response.folderAccessResults: + result = response.folderAccessResults[0] + status_value = result.status + is_failure = (status_value != 0) or (result.message and len(result.message) > 0) + status_name = (folder_pb2.FolderModifyStatus.Name(status_value) + if status_value != 0 else 'SUCCESS') + return { + 'folder_uid': folder_uid, + 'user_uid': user_uid, + 'status': 'ERROR' if is_failure and status_value == 0 else status_name, + 'message': result.message if result.message else default_message, + 'success': not is_failure, + } + return { + 'folder_uid': folder_uid, + 'user_uid': user_uid, + 'status': 'SUCCESS', + 'message': default_message, + 'success': True, + } + + +# ═══════════════════════════════════════════════════════════════════════════ +# Sharing status parsing (used by record sharing functions) +# ═══════════════════════════════════════════════════════════════════════════ + +def parse_sharing_status(status) -> Dict[str, Any]: + """Parse a RecordSharing.Status protobuf into a result dict.""" + try: + status_name = record_sharing_pb2.SharingStatus.Name(status.status) + except Exception: + status_name = str(status.status) + + is_success = status.status == record_sharing_pb2.SUCCESS + is_pending = status.status == record_sharing_pb2.PENDING_ACCEPT + + return { + 'record_uid': utils.base64_url_encode(status.recordUid), + 'recipient_uid': utils.base64_url_encode(status.recipientUid), + 'status': status_name, + 'message': status.message, + 'success': is_success or is_pending, + 'pending': is_pending, + } + + +# ═══════════════════════════════════════════════════════════════════════════ +# User public-key loading for folder access (used by grant/batch) +# ═══════════════════════════════════════════════════════════════════════════ + +def load_user_public_key(params, user_email): + """Load a user's public key from key_cache or the server. + + Returns (public_key_object, use_ecc) or raises ValueError. + """ + user_keys = params.key_cache.get(user_email) + if not user_keys: + api.load_user_public_keys(params, [user_email]) + user_keys = params.key_cache.get(user_email) + if not user_keys: + raise ValueError(f"Public key not found for user {user_email}") + + if user_keys.rsa: + return crypto.load_rsa_public_key(user_keys.rsa), False + if user_keys.ec: + return crypto.load_ec_public_key(user_keys.ec), True + raise ValueError(f"No valid public key (RSA or ECC) found for user {user_email}") diff --git a/keepercommander/keeper_drive/folder_api.py b/keepercommander/keeper_drive/folder_api.py new file mode 100644 index 000000000..82dc2a606 --- /dev/null +++ b/keepercommander/keeper_drive/folder_api.py @@ -0,0 +1,719 @@ +""" +KeeperDrive — folder CRUD, access/sharing, and retrieval. + +Every folder-related API call lives here; shared primitives come from +``common`` and ``permissions``. +""" + +import json +import logging +import os +from typing import Optional, List, Dict, Any + +from .. import utils, crypto, api +from ..params import KeeperParams +from ..proto import folder_pb2 +from ..error import KeeperApiError +from ..subfolder import try_resolve_path + +from .common import ( + get_folder_key, get_user_public_key, resolve_user_uid_bytes, + resolve_uid_email, encrypt_for_recipient, load_user_public_key, + parse_folder_access_result, + resolve_team_identifier, get_team_keys, encrypt_for_team, +) +from .permissions import ( + FolderUsageType, SetBooleanValue, resolve_role_name, ROLE_NAME_MAP, + get_folder_permissions_for_role, +) + +logger = logging.getLogger(__name__) + + +# ══════════════════════════════════════════════════════════════════════════ +# Low-level protobuf builders +# ══════════════════════════════════════════════════════════════════════════ + +def create_folder_data( + folder_uid, folder_name, encryption_key, + parent_uid=None, folder_type=None, + inherit_permissions=None, color=None, + owner_username=None, owner_account_uid=None +): + fd = folder_pb2.FolderData() + fd.folderUid = utils.base64_url_decode(folder_uid) + + data_dict = {'name': folder_name} + if color and color != 'none': + data_dict['color'] = color + fd.data = crypto.encrypt_aes_v2(json.dumps(data_dict).encode(), encryption_key) + + if parent_uid: + fd.parentUid = utils.base64_url_decode(parent_uid) + if folder_type is not None: + fd.type = folder_type + if inherit_permissions is not None: + fd.inheritUserPermissions = inherit_permissions + if owner_username or owner_account_uid: + oi = folder_pb2.UserInfo() + if owner_username: + oi.username = owner_username + if owner_account_uid: + oi.accountUid = utils.base64_url_decode(owner_account_uid) + fd.ownerInfo.CopyFrom(oi) + return fd + + +def encrypt_folder_key(folder_key, parent_key, use_gcm=True): + if use_gcm: + return crypto.encrypt_aes_v2(folder_key, parent_key) + return crypto.encrypt_aes_v1(folder_key, parent_key) + + +# ══════════════════════════════════════════════════════════════════════════ +# Internal: prepare folder data for creation (DRY for single + batch) +# ══════════════════════════════════════════════════════════════════════════ + +def _prepare_folder_for_creation(params, folder_uid, folder_name, parent_uid, + color, inherit_permissions): + folder_key = os.urandom(32) + enc_key = params.data_key + if parent_uid: + parent_key = get_folder_key(params, parent_uid, raise_on_missing=False) + if parent_key: + enc_key = parent_key + else: + logging.warning("Parent folder key not found for %s, using user data key", parent_uid) + + encrypted_fk = encrypt_folder_key(folder_key, enc_key, use_gcm=True) + fd = create_folder_data( + folder_uid=folder_uid, folder_name=folder_name, encryption_key=folder_key, + parent_uid=parent_uid, folder_type=FolderUsageType.NORMAL, + inherit_permissions=(SetBooleanValue.BOOLEAN_TRUE if inherit_permissions + else SetBooleanValue.BOOLEAN_FALSE), + color=color) + fd.folderKey = encrypted_fk + return fd + + +# ══════════════════════════════════════════════════════════════════════════ +# Transport: folder_add_v3 / folder_update_v3 / folder_access_update_v3 +# ══════════════════════════════════════════════════════════════════════════ + +def folder_add_v3(params, folders): + if not folders or len(folders) > 100: + raise ValueError("Provide 1..100 folders") + rq = folder_pb2.FolderAddRequest() + rq.folderData.extend(folders) + return api.communicate_rest(params, rq, 'vault/folders/v3/add', + rs_type=folder_pb2.FolderAddResponse) + + +def folder_update_v3(params, folders): + if not folders or len(folders) > 100: + raise ValueError("Provide 1..100 folders") + rq = folder_pb2.FolderUpdateRequest() + rq.folderData.extend(folders) + return api.communicate_rest(params, rq, 'vault/folders/v3/update', + rs_type=folder_pb2.FolderUpdateResponse) + + +def folder_access_update_v3(params, folder_access_adds=None, + folder_access_updates=None, + folder_access_removes=None): + for label, lst in [('adds', folder_access_adds), ('updates', folder_access_updates), + ('removes', folder_access_removes)]: + if lst and len(lst) > 500: + raise ValueError(f"Maximum 500 {label}") + if not any([folder_access_adds, folder_access_updates, folder_access_removes]): + raise ValueError("At least one access operation required") + rq = folder_pb2.FolderAccessRequest() + if folder_access_adds: + rq.folderAccessAdds.extend(folder_access_adds) + if folder_access_updates: + rq.folderAccessUpdates.extend(folder_access_updates) + if folder_access_removes: + rq.folderAccessRemoves.extend(folder_access_removes) + return api.communicate_rest(params, rq, 'vault/folders/v3/access_update', + rs_type=folder_pb2.FolderAccessResponse) + + +# ══════════════════════════════════════════════════════════════════════════ +# Resolution +# ══════════════════════════════════════════════════════════════════════════ + +def resolve_folder_identifier(params, folder_identifier): + if folder_identifier in getattr(params, 'keeper_drive_folders', {}): + return folder_identifier + if folder_identifier in getattr(params, 'subfolder_cache', {}): + return folder_identifier + if folder_identifier in getattr(params, 'folder_cache', {}): + return folder_identifier + + matching = [uid for uid, obj in getattr(params, 'keeper_drive_folders', {}).items() + if obj.get('name', '').lower() == folder_identifier.lower()] + if len(matching) == 1: + return matching[0] + if len(matching) > 1: + logging.warning("Multiple folders match '%s'. Use UID instead.", folder_identifier) + return None + + rs = try_resolve_path(params, folder_identifier) + if rs is not None: + folder, pattern = rs + if folder and not pattern: + return folder.uid + return None + + +# ══════════════════════════════════════════════════════════════════════════ +# High-level: create / create_batch +# ══════════════════════════════════════════════════════════════════════════ + +def create_folder_v3(params, folder_name, parent_uid=None, color=None, + inherit_permissions=True): + uid = utils.generate_uid() + fd = _prepare_folder_for_creation(params, uid, folder_name, parent_uid, + color, inherit_permissions) + response = folder_add_v3(params, [fd]) + if response.folderAddResults: + r = response.folderAddResults[0] + return { + 'folder_uid': uid, + 'status': folder_pb2.FolderModifyStatus.Name(r.status), + 'message': r.message, + 'success': r.status == folder_pb2.SUCCESS, + } + raise KeeperApiError('no_results', 'No results from folder creation') + + +def create_folders_batch_v3(params, folder_specs): + if len(folder_specs) > 100: + raise ValueError("Maximum 100 folders at a time") + fd_list, uid_map = [], {} + for idx, spec in enumerate(folder_specs): + uid = utils.generate_uid() + uid_map[idx] = uid + name = spec.get('name') + if not name: + raise ValueError(f"Spec at index {idx} missing 'name'") + fd = _prepare_folder_for_creation( + params, uid, name, spec.get('parent_uid'), + spec.get('color'), spec.get('inherit_permissions', True)) + fd_list.append(fd) + response = folder_add_v3(params, fd_list) + return [{ + 'folder_uid': uid_map.get(i, utils.base64_url_encode(r.folderUid)), + 'name': folder_specs[i].get('name'), + 'status': folder_pb2.FolderModifyStatus.Name(r.status), + 'message': r.message, + 'success': r.status == folder_pb2.SUCCESS, + } for i, r in enumerate(response.folderAddResults)] + + +# ══════════════════════════════════════════════════════════════════════════ +# High-level: update / update_batch +# ══════════════════════════════════════════════════════════════════════════ + +def _build_update_data(params, folder_uid, folder_name, color, inherit_permissions): + """Build FolderData for an update, preserving existing name/color.""" + fk = get_folder_key(params, folder_uid) + fd = folder_pb2.FolderData() + fd.folderUid = utils.base64_url_decode(folder_uid) + + obj = (getattr(params, 'keeper_drive_folders', {}).get(folder_uid) or + getattr(params, 'subfolder_cache', {}).get(folder_uid) or {}) + dd = {} + dd['name'] = folder_name if folder_name is not None else obj.get('name', '') + if color is not None: + if color not in ('none', ''): + dd['color'] = color + elif obj.get('color') and obj['color'] != 'none': + dd['color'] = obj['color'] + + fd.data = crypto.encrypt_aes_v2(json.dumps(dd).encode(), fk) + if inherit_permissions is not None: + fd.inheritUserPermissions = (SetBooleanValue.BOOLEAN_TRUE if inherit_permissions + else SetBooleanValue.BOOLEAN_FALSE) + return fd + + +def update_folder_v3(params, folder_uid, folder_name=None, color=None, + inherit_permissions=None): + if folder_name is None and color is None and inherit_permissions is None: + raise ValueError("At least one update field required") + resolved = resolve_folder_identifier(params, folder_uid) + if not resolved: + raise ValueError(f"Folder '{folder_uid}' not found") + fd = _build_update_data(params, resolved, folder_name, color, inherit_permissions) + response = folder_update_v3(params, [fd]) + if response.folderUpdateResults: + r = response.folderUpdateResults[0] + return { + 'folder_uid': resolved, + 'status': folder_pb2.FolderModifyStatus.Name(r.status), + 'message': r.message, + 'success': r.status == folder_pb2.SUCCESS, + } + raise KeeperApiError('no_results', 'No results from folder update') + + +def update_folders_batch_v3(params, folder_updates): + if len(folder_updates) > 100: + raise ValueError("Maximum 100 folders at a time") + fd_list = [] + for idx, spec in enumerate(folder_updates): + fi = spec.get('folder_uid') + if not fi: + raise ValueError(f"Spec at index {idx} missing 'folder_uid'") + name, color, inh = spec.get('name'), spec.get('color'), spec.get('inherit_permissions') + if name is None and color is None and inh is None: + raise ValueError(f"Spec at index {idx} must update at least one field") + resolved = resolve_folder_identifier(params, fi) + if not resolved: + raise ValueError(f"Folder '{fi}' at index {idx} not found") + fd_list.append(_build_update_data(params, resolved, name, color, inh)) + response = folder_update_v3(params, fd_list) + return [{ + 'folder_uid': folder_updates[i].get('folder_uid'), + 'status': folder_pb2.FolderModifyStatus.Name(r.status), + 'message': r.message, + 'success': r.status == folder_pb2.SUCCESS, + } for i, r in enumerate(response.folderUpdateResults)] + + +# ══════════════════════════════════════════════════════════════════════════ +# High-level: folder access grant / update / revoke +# ══════════════════════════════════════════════════════════════════════════ + +def _resolve_accessor(params, accessor_uid, as_team): + """Resolve a user/team identifier to (uid_bytes, label, access_type_enum). + """ + if as_team: + resolved = resolve_team_identifier(params, accessor_uid) + if not resolved: + raise ValueError(f"Team '{accessor_uid}' not found") + team_uid_b64, team_uid_bytes = resolved + return team_uid_bytes, team_uid_b64, folder_pb2.AT_TEAM + + is_email = '@' in accessor_uid + if is_email: + _, _, uid_bytes, _ = get_user_public_key(params, accessor_uid) + return uid_bytes, accessor_uid, folder_pb2.AT_USER + + uid_bytes = resolve_user_uid_bytes(params, accessor_uid) + return uid_bytes, accessor_uid, folder_pb2.AT_USER + + +def grant_folder_access_v3(params, folder_uid, user_uid, role='viewer', + share_folder_key=True, expiration_timestamp=None, + as_team=False): + """Grant a user *or team* access to a KeeperDrive folder. + """ + resolved = resolve_folder_identifier(params, folder_uid) + if not resolved: + raise ValueError(f"Folder '{folder_uid}' not found") + folder_uid = resolved + + actual_uid_bytes = None + user_email = None + user_public_key = None + use_ecc = False + team_keys = None + + if as_team: + resolved_team = resolve_team_identifier(params, user_uid) + if not resolved_team: + raise ValueError(f"Team '{user_uid}' not found") + team_uid_b64, actual_uid_bytes = resolved_team + if share_folder_key: + team_keys = get_team_keys(params, team_uid_b64) + access_type_enum = folder_pb2.AT_TEAM + access_type_label = 'AT_TEAM' + identifier_label = team_uid_b64 + else: + is_email = '@' in user_uid + user_email = user_uid if is_email else None + if is_email: + try: + user_public_key, use_ecc, actual_uid_bytes, _inv = get_user_public_key(params, user_email) + except Exception as e: + raise ValueError(f"User '{user_email}' not found or has no public key. {e}") + else: + actual_uid_bytes, user_email = resolve_uid_email(params, user_uid) + if not actual_uid_bytes: + raise ValueError(f"Invalid user UID: {user_uid}") + access_type_enum = folder_pb2.AT_USER + access_type_label = 'AT_USER' + identifier_label = user_uid + + access_role = resolve_role_name(role) + target_role_name = folder_pb2.AccessRoleType.Name(access_role) + + if actual_uid_bytes: + existing = _check_existing_access(params, folder_uid, actual_uid_bytes, + target_role_name, access_type_label) + if existing is not None: + if existing == target_role_name: + return {'folder_uid': folder_uid, 'user_uid': identifier_label, + 'access_type': access_type_label, + 'status': 'SUCCESS', + 'message': f"{'Team' if as_team else 'User'} already has {role} access", + 'success': True, 'action_taken': 'already_had_access'} + result = update_folder_access_v3(params, folder_uid, identifier_label, + role=role, as_team=as_team) + result['action_taken'] = 'updated' + return result + + ad = folder_pb2.FolderAccessData() + ad.folderUid = utils.base64_url_decode(folder_uid) + ad.accessTypeUid = actual_uid_bytes + ad.accessType = access_type_enum + ad.accessRoleType = access_role + ad.permissions.CopyFrom(get_folder_permissions_for_role(access_role)) + + if expiration_timestamp: + ad.tlaProperties.expiration = expiration_timestamp + + if share_folder_key: + fk = get_folder_key(params, folder_uid) + ek = folder_pb2.EncryptedDataKey() + if as_team: + # v3 folder team grants must use the team's *asymmetric* public + # key (server rejects AES with "Key type 2 required"). + efk, key_type = encrypt_for_team( + fk, team_keys, prefer_aes=False, + forbid_rsa=getattr(params, 'forbid_rsa', False)) + ek.encryptedKey = efk + ek.encryptedKeyType = key_type + else: + if not user_public_key: + user_public_key, use_ecc = load_user_public_key(params, user_email) + efk = encrypt_for_recipient(fk, user_public_key, use_ecc) + ek.encryptedKey = efk + ek.encryptedKeyType = (folder_pb2.encrypted_by_public_key_ecc if use_ecc + else folder_pb2.encrypted_by_public_key) + ad.folderKey.CopyFrom(ek) + + response = folder_access_update_v3(params, folder_access_adds=[ad]) + result = parse_folder_access_result(response, folder_uid, identifier_label, + 'Access granted successfully') + result['access_type'] = access_type_label + result.setdefault('action_taken', 'granted' if result['success'] else 'grant_failed') + return result + + +def _check_existing_access(params, folder_uid, uid_bytes, target_role_name, + access_type_label='AT_USER'): + """Return existing role name (for the matching access_type) or None.""" + try: + uid_encoded = utils.base64_url_encode(uid_bytes) + info = get_folder_access_v3(params, [folder_uid], resolve_usernames=False) + if info.get('results'): + for a in info['results'][0].get('accessors', []): + if (a.get('access_type') == access_type_label + and a.get('accessor_uid') == uid_encoded): + return a.get('role') + except Exception: + pass + return None + + +def update_folder_access_v3(params, folder_uid, user_uid, role=None, hidden=None, + as_team=False): + if role is None and hidden is None: + raise ValueError("At least one field (role or hidden) required") + resolved = resolve_folder_identifier(params, folder_uid) + if not resolved: + raise ValueError(f"Folder '{folder_uid}' not found") + folder_uid = resolved + + actual_uid_bytes, identifier_label, access_type_enum = _resolve_accessor( + params, user_uid, as_team) + if not actual_uid_bytes: + raise ValueError(f"{'Team' if as_team else 'User'} '{user_uid}' not found") + + ad = folder_pb2.FolderAccessData() + ad.folderUid = utils.base64_url_decode(folder_uid) + ad.accessTypeUid = actual_uid_bytes + ad.accessType = access_type_enum + if role: + resolved_role = resolve_role_name(role) + ad.accessRoleType = resolved_role + ad.permissions.CopyFrom(get_folder_permissions_for_role(resolved_role)) + if hidden is not None: + ad.hidden = hidden + + response = folder_access_update_v3(params, folder_access_updates=[ad]) + result = parse_folder_access_result(response, folder_uid, identifier_label, + 'Access updated successfully') + result['access_type'] = 'AT_TEAM' if as_team else 'AT_USER' + return result + + +def revoke_folder_access_v3(params, folder_uid, user_uid, as_team=False): + resolved = resolve_folder_identifier(params, folder_uid) + if not resolved: + raise ValueError(f"Folder '{folder_uid}' not found") + folder_uid = resolved + + actual_uid_bytes, identifier_label, access_type_enum = _resolve_accessor( + params, user_uid, as_team) + if not actual_uid_bytes: + raise ValueError(f"{'Team' if as_team else 'User'} '{user_uid}' not found") + + ad = folder_pb2.FolderAccessData() + ad.folderUid = utils.base64_url_decode(folder_uid) + ad.accessTypeUid = actual_uid_bytes + ad.accessType = access_type_enum + + response = folder_access_update_v3(params, folder_access_removes=[ad]) + result = parse_folder_access_result(response, folder_uid, identifier_label, + 'Access revoked successfully') + result['access_type'] = 'AT_TEAM' if as_team else 'AT_USER' + return result + + +def manage_folder_access_batch_v3(params, access_grants=None, + access_updates=None, access_revokes=None): + """Apply a batch of folder access grants/updates/revokes. + """ + adds, updates, removes = [], [], [] + tracking = [] + forbid_rsa = getattr(params, 'forbid_rsa', False) + + for spec in (access_grants or []): + fuid = resolve_folder_identifier(params, spec['folder_uid']) + if not fuid: + raise ValueError(f"Folder '{spec['folder_uid']}' not found") + as_team = bool(spec.get('as_team')) + role = spec.get('role', 'viewer') + fk = get_folder_key(params, fuid) + + ad = folder_pb2.FolderAccessData() + ad.folderUid = utils.base64_url_decode(fuid) + ad.accessRoleType = resolve_role_name(role) + + ek = folder_pb2.EncryptedDataKey() + if as_team: + resolved_team = resolve_team_identifier(params, spec['user_uid']) + if not resolved_team: + raise ValueError(f"Team '{spec['user_uid']}' not found") + team_uid_b64, uid_bytes = resolved_team + team_keys = get_team_keys(params, team_uid_b64) + + efk, key_type = encrypt_for_team(fk, team_keys, prefer_aes=False, + forbid_rsa=forbid_rsa) + ek.encryptedKey = efk + ek.encryptedKeyType = key_type + ad.accessType = folder_pb2.AT_TEAM + else: + uid_bytes, email = resolve_uid_email(params, spec['user_uid']) + if not uid_bytes: + raise ValueError(f"User '{spec['user_uid']}' not found") + pk, use_ecc = load_user_public_key(params, email) + efk = encrypt_for_recipient(fk, pk, use_ecc) + ek.encryptedKey = efk + ek.encryptedKeyType = (folder_pb2.encrypted_by_public_key_ecc if use_ecc + else folder_pb2.encrypted_by_public_key) + ad.accessType = folder_pb2.AT_USER + + ad.accessTypeUid = uid_bytes + ad.folderKey.CopyFrom(ek) + adds.append(ad) + tracking.append(('grant', fuid, spec['user_uid'], spec, as_team)) + + for spec in (access_updates or []): + fuid = resolve_folder_identifier(params, spec['folder_uid']) + if not fuid: + raise ValueError(f"Folder '{spec['folder_uid']}' not found") + as_team = bool(spec.get('as_team')) + if as_team: + resolved_team = resolve_team_identifier(params, spec['user_uid']) + if not resolved_team: + raise ValueError(f"Team '{spec['user_uid']}' not found") + _, uid_bytes = resolved_team + access_type_enum = folder_pb2.AT_TEAM + else: + uid_bytes = resolve_user_uid_bytes(params, spec['user_uid']) + if not uid_bytes: + raise ValueError(f"User '{spec['user_uid']}' not found") + access_type_enum = folder_pb2.AT_USER + + ad = folder_pb2.FolderAccessData() + ad.folderUid = utils.base64_url_decode(fuid) + ad.accessTypeUid = uid_bytes + ad.accessType = access_type_enum + if spec.get('role'): + ad.accessRoleType = resolve_role_name(spec['role']) + if spec.get('hidden') is not None: + ad.hidden = spec['hidden'] + updates.append(ad) + tracking.append(('update', fuid, spec['user_uid'], spec, as_team)) + + for spec in (access_revokes or []): + fuid = resolve_folder_identifier(params, spec['folder_uid']) + if not fuid: + raise ValueError(f"Folder '{spec['folder_uid']}' not found") + as_team = bool(spec.get('as_team')) + if as_team: + resolved_team = resolve_team_identifier(params, spec['user_uid']) + if not resolved_team: + raise ValueError(f"Team '{spec['user_uid']}' not found") + _, uid_bytes = resolved_team + access_type_enum = folder_pb2.AT_TEAM + else: + uid_bytes = resolve_user_uid_bytes(params, spec['user_uid']) + if not uid_bytes: + raise ValueError(f"User '{spec['user_uid']}' not found") + access_type_enum = folder_pb2.AT_USER + + ad = folder_pb2.FolderAccessData() + ad.folderUid = utils.base64_url_decode(fuid) + ad.accessTypeUid = uid_bytes + ad.accessType = access_type_enum + removes.append(ad) + tracking.append(('revoke', fuid, spec['user_uid'], spec, as_team)) + + response = folder_access_update_v3( + params, + folder_access_adds=adds or None, + folder_access_updates=updates or None, + folder_access_removes=removes or None) + + results = [{'operation': op, 'folder_uid': f, 'user_uid': u, + 'access_type': 'AT_TEAM' if at else 'AT_USER', + 'status': 'SUCCESS', 'message': f'{op.capitalize()} completed', 'success': True} + for op, f, u, _, at in tracking] + + if response.folderAccessResults: + for r in response.folderAccessResults: + f = utils.base64_url_encode(r.folderUid) + u = utils.base64_url_encode(r.accessUid) if r.accessUid else 'unknown' + for i, (op, tf, tu, _, at) in enumerate(tracking): + if tf == f and tu == u: + results[i] = { + 'operation': op, 'folder_uid': f, 'user_uid': u, + 'access_type': 'AT_TEAM' if at else 'AT_USER', + 'status': folder_pb2.FolderModifyStatus.Name(r.status), + 'message': r.message, 'success': False} + break + return results + + +# ══════════════════════════════════════════════════════════════════════════ +# Internal UID → username resolver +# ══════════════════════════════════════════════════════════════════════════ + +def _resolve_uid_to_username(params, uid_b64: str) -> Optional[str]: + """Try to resolve a base64-url accessor UID to a username. + + Checks the share-objects cache (``vault/get_share_objects``), which + includes every user the vault owner has ever interacted with and + carries both ``username`` and ``userAccountUid`` fields. The result + is a cached API call so repeated lookups are free. + """ + try: + from ..proto.record_pb2 import GetShareObjectsRequest, GetShareObjectsResponse + rq = GetShareObjectsRequest() + rs = api.communicate_rest(params, rq, 'vault/get_share_objects', + rs_type=GetShareObjectsResponse) + for user_list in (rs.shareRelationships, rs.shareFamilyUsers, + rs.shareEnterpriseUsers, rs.shareMCEnterpriseUsers): + for su in user_list: + if su.userAccountUid: + su_uid = utils.base64_url_encode(su.userAccountUid) + if su_uid == uid_b64: + return su.username + except Exception: + pass + return None + + +# ══════════════════════════════════════════════════════════════════════════ +# High-level: get_folder_access_v3 +# ══════════════════════════════════════════════════════════════════════════ + +def get_folder_access_v3(params, folder_uids, continuation_token=None, + page_size=None, resolve_usernames=True): + if not folder_uids or len(folder_uids) > 100: + raise ValueError("Provide 1..100 folder UIDs") + from ..proto import folder_access_pb2 + + rq = folder_access_pb2.GetFolderAccessRequest() + for fi in folder_uids: + resolved = resolve_folder_identifier(params, fi) + if not resolved: + raise ValueError(f"Folder '{fi}' not found") + rq.folderUid.append(utils.base64_url_decode(resolved)) + if continuation_token is not None: + tok = folder_access_pb2.ContinuationToken() + tok.lastModified = continuation_token + rq.continuationToken.CopyFrom(tok) + if page_size is not None: + if page_size > 1000: + raise ValueError("Maximum page size is 1000") + rq.pageSize = page_size + + rs = api.communicate_rest(params, rq, 'vault/folders/v3/access', + rs_type=folder_access_pb2.GetFolderAccessResponse) + results = [] + for fr in rs.folderAccessResults: + fuid = utils.base64_url_encode(fr.folderUid) + if fr.HasField('error'): + err = fr.error + results.append({ + 'folder_uid': fuid, + 'error': {'status': folder_pb2.FolderModifyStatus.Name(err.status), + 'message': err.message}, + 'success': False}) + else: + accessors = [] + for a in fr.accessors: + auid = utils.base64_url_encode(a.accessTypeUid) + at = folder_pb2.AccessType.Name(a.accessType) + rt = folder_pb2.AccessRoleType.Name(a.accessRoleType) + username = None + if resolve_usernames and at == 'AT_USER': + username = getattr(params, 'user_cache', {}).get(auid) + if not username and hasattr(params, 'enterprise') and params.enterprise: + for u in params.enterprise.get('users', []): + if u.get('user_account_uid') == auid: + username = u.get('username') + break + if not username: + username = _resolve_uid_to_username(params, auid) + if username: + if not hasattr(params, 'user_cache'): + params.user_cache = {} + params.user_cache[auid] = username + ai = { + 'accessor_uid': auid, 'access_type': at, 'role': rt, + 'inherited': bool(a.inherited), 'hidden': bool(a.hidden), + 'username': username, + 'date_created': a.dateCreated or None, + 'last_modified': a.lastModified or None, + } + if a.HasField('permissions'): + p = a.permissions + ai['permissions'] = { + 'can_add': bool(p.canAdd), 'can_remove': bool(p.canRemove), + 'can_delete': bool(p.canDelete), + 'can_list_access': bool(p.canListAccess), + 'can_update_access': bool(p.canUpdateAccess), + 'can_change_ownership': bool(p.canChangeOwnership), + 'can_edit_records': bool(p.canEditRecords), + 'can_view_records': bool(p.canViewRecords), + 'can_approve_access': bool(p.canApproveAccess), + 'can_request_access': bool(p.canRequestAccess), + 'can_update_setting': bool(p.canUpdateSetting), + 'can_list_records': bool(p.canListRecords), + 'can_list_folders': bool(p.canListFolders), + } + accessors.append(ai) + results.append({'folder_uid': fuid, 'accessors': accessors, 'success': True}) + + rd = {'results': results, 'has_more': bool(rs.hasMore)} + if rs.HasField('continuationToken'): + rd['continuation_token'] = rs.continuationToken.lastModified + return rd diff --git a/keepercommander/keeper_drive/folder_record_api.py b/keepercommander/keeper_drive/folder_record_api.py new file mode 100644 index 000000000..ae9ebabd4 --- /dev/null +++ b/keepercommander/keeper_drive/folder_record_api.py @@ -0,0 +1,250 @@ +""" +KeeperDrive — folder-record linking, moving, and batch operations. +""" + +import logging +from typing import Optional, List, Dict, Any + +from .. import utils, api +from ..proto import folder_pb2 + +from .common import ( + get_folder_key, get_record_key, get_record_key_type, + encrypt_record_key_for_folder, +) +from .folder_api import resolve_folder_identifier + +logger = logging.getLogger(__name__) + + +# ══════════════════════════════════════════════════════════════════════════ +# Transport +# ══════════════════════════════════════════════════════════════════════════ + +def folder_record_update_v3(params, folder_uid, add_records=None, + update_records=None, remove_records=None): + for label, lst in [('add', add_records), ('update', update_records), + ('remove', remove_records)]: + if lst and len(lst) > 500: + raise ValueError(f"Maximum 500 records to {label}") + if not any([add_records, update_records, remove_records]): + raise ValueError("At least one operation required") + rq = folder_pb2.FolderRecordUpdateRequest() + rq.folderUid = utils.base64_url_decode(folder_uid) + if add_records: + rq.addRecords.extend(add_records) + if update_records: + rq.updateRecords.extend(update_records) + if remove_records: + rq.removeRecords.extend(remove_records) + return api.communicate_rest(params, rq, 'vault/folders/v3/record_update', + rs_type=folder_pb2.FolderRecordUpdateResponse) + + +# ══════════════════════════════════════════════════════════════════════════ +# Internal builders (DRY for add/update/move/batch) +# ══════════════════════════════════════════════════════════════════════════ + +def _build_record_metadata(params, folder_uid, record_uid, + expiration_timestamp=None): + """Build RecordMetadata with encrypted record key and optional TLA.""" + fk = get_folder_key(params, folder_uid) + rk = get_record_key(params, record_uid) + rkt = get_record_key_type(params, record_uid) + enc_rk, enc_rkt = encrypt_record_key_for_folder(rk, fk, rkt) + + rm = folder_pb2.RecordMetadata() + rm.recordUid = utils.base64_url_decode(record_uid) + rm.encryptedRecordKey = enc_rk + rm.encryptedRecordKeyType = enc_rkt + if expiration_timestamp is not None: + rm.tlaProperties.expiration = expiration_timestamp + return rm + + +def _build_removal_metadata(record_uid): + """Build minimal RecordMetadata for removal (only UID needed).""" + rm = folder_pb2.RecordMetadata() + rm.recordUid = utils.base64_url_decode(record_uid) + rm.encryptedRecordKey = b'' + rm.encryptedRecordKeyType = folder_pb2.no_key + return rm + + +def _parse_folder_record_response(response, folder_uid, record_uid, success_msg): + if response.folderRecordUpdateResult: + r = response.folderRecordUpdateResult[0] + return { + 'folder_uid': folder_uid, 'record_uid': record_uid, + 'status': folder_pb2.FolderModifyStatus.Name(r.status), + 'message': r.message, 'success': r.status == folder_pb2.SUCCESS, + } + return { + 'folder_uid': folder_uid, 'record_uid': record_uid, + 'status': 'SUCCESS', 'message': success_msg, 'success': True, + } + + +# ══════════════════════════════════════════════════════════════════════════ +# High-level: add / update / remove record in folder +# ══════════════════════════════════════════════════════════════════════════ + +def add_record_to_folder_v3(params, folder_uid, record_uid, + expiration_timestamp=None): + resolved = resolve_folder_identifier(params, folder_uid) + if not resolved: + raise ValueError(f"Folder '{folder_uid}' not found") + rm = _build_record_metadata(params, resolved, record_uid, expiration_timestamp) + rs = folder_record_update_v3(params, resolved, add_records=[rm]) + return _parse_folder_record_response(rs, resolved, record_uid, + 'Record added to folder successfully') + + +def update_record_in_folder_v3(params, folder_uid, record_uid, + expiration_timestamp=None): + resolved = resolve_folder_identifier(params, folder_uid) + if not resolved: + raise ValueError(f"Folder '{folder_uid}' not found") + rm = _build_record_metadata(params, resolved, record_uid, expiration_timestamp) + rs = folder_record_update_v3(params, resolved, update_records=[rm]) + return _parse_folder_record_response(rs, resolved, record_uid, + 'Record updated in folder successfully') + + +def remove_record_from_folder_v3(params, folder_uid, record_uid): + resolved = resolve_folder_identifier(params, folder_uid) + if not resolved: + raise ValueError(f"Folder '{folder_uid}' not found") + rm = _build_removal_metadata(record_uid) + rs = folder_record_update_v3(params, resolved, remove_records=[rm]) + return _parse_folder_record_response(rs, resolved, record_uid, + 'Record removed from folder successfully') + + +# ══════════════════════════════════════════════════════════════════════════ +# High-level: move record between folders +# ══════════════════════════════════════════════════════════════════════════ + +def move_record_v3(params, record_uid, from_folder_uid=None, to_folder_uid=None): + from .. import sync_down + sync_down.sync_down(params) + + if not from_folder_uid and not to_folder_uid: + raise ValueError("Cannot move from root to root") + + if from_folder_uid: + resolved_from = resolve_folder_identifier(params, from_folder_uid) + if not resolved_from: + raise ValueError(f"Source folder '{from_folder_uid}' not found") + from_folder_uid = resolved_from + try: + rm_meta = _build_removal_metadata(record_uid) + rs = folder_record_update_v3(params, from_folder_uid, remove_records=[rm_meta]) + if rs.folderRecordUpdateResult: + r = rs.folderRecordUpdateResult[0] + if r.status != folder_pb2.SUCCESS: + return _move_failure(record_uid, from_folder_uid, to_folder_uid, + f"Remove failed: {r.message}") + except Exception as e: + return _move_failure(record_uid, from_folder_uid, to_folder_uid, + f"Remove error: {e}") + + rk = get_record_key(params, record_uid) + rkt = get_record_key_type(params, record_uid) + + if to_folder_uid: + resolved_to = resolve_folder_identifier(params, to_folder_uid) + if not resolved_to: + raise ValueError(f"Destination folder '{to_folder_uid}' not found") + to_folder_uid = resolved_to + fk = get_folder_key(params, to_folder_uid) + enc_rk, enc_rkt = encrypt_record_key_for_folder(rk, fk, rkt) + target_uid = to_folder_uid + else: + enc_rk, enc_rkt = encrypt_record_key_for_folder(rk, params.data_key, rkt) + target_uid = '' + + add_meta = folder_pb2.RecordMetadata() + add_meta.recordUid = utils.base64_url_decode(record_uid) + add_meta.encryptedRecordKey = enc_rk + add_meta.encryptedRecordKeyType = enc_rkt + + try: + rs = folder_record_update_v3(params, target_uid, add_records=[add_meta]) + if rs.folderRecordUpdateResult: + r = rs.folderRecordUpdateResult[0] + if r.status != folder_pb2.SUCCESS: + return _move_failure(record_uid, from_folder_uid, to_folder_uid, + f"Add failed: {r.message}") + except Exception as e: + return _move_failure(record_uid, from_folder_uid, to_folder_uid, + f"Add error: {e}") + + return { + 'record_uid': record_uid, + 'from_folder': from_folder_uid or 'root', + 'to_folder': to_folder_uid or 'root', + 'success': True, 'message': 'Record moved successfully', + } + + +def _move_failure(record_uid, from_folder, to_folder, msg): + return { + 'record_uid': record_uid, + 'from_folder': from_folder or 'root', + 'to_folder': to_folder or 'root', + 'success': False, 'message': msg, + } + + +# ══════════════════════════════════════════════════════════════════════════ +# High-level: batch add/remove records +# ══════════════════════════════════════════════════════════════════════════ + +def manage_folder_records_batch_v3(params, folder_uid, records_to_add=None, + records_to_remove=None): + resolved = resolve_folder_identifier(params, folder_uid) + if not resolved: + raise ValueError(f"Folder '{folder_uid}' not found") + folder_uid = resolved + fk = get_folder_key(params, folder_uid) + + adds, removes, tracking = [], [], [] + + for ruid in (records_to_add or []): + rk = get_record_key(params, ruid) + rkt = get_record_key_type(params, ruid) + enc_rk, enc_rkt = encrypt_record_key_for_folder(rk, fk, rkt) + rm = folder_pb2.RecordMetadata() + rm.recordUid = utils.base64_url_decode(ruid) + rm.encryptedRecordKey = enc_rk + rm.encryptedRecordKeyType = enc_rkt + adds.append(rm) + tracking.append(('add', ruid)) + + for ruid in (records_to_remove or []): + removes.append(_build_removal_metadata(ruid)) + tracking.append(('remove', ruid)) + + rs = folder_record_update_v3( + params, folder_uid, + add_records=adds or None, remove_records=removes or None) + + results = [{'operation': op, 'folder_uid': folder_uid, 'record_uid': ruid, + 'status': 'SUCCESS', 'message': f'{op.capitalize()} completed', + 'success': True} for op, ruid in tracking] + + if rs.folderRecordUpdateResult: + for r in rs.folderRecordUpdateResult: + ruid = utils.base64_url_encode(r.recordUid) + for i, (op, tracked) in enumerate(tracking): + if tracked == ruid: + results[i] = { + 'operation': op, 'folder_uid': folder_uid, + 'record_uid': ruid, + 'status': folder_pb2.FolderModifyStatus.Name(r.status), + 'message': r.message, + 'success': r.status == folder_pb2.SUCCESS, + } + break + return results diff --git a/keepercommander/keeper_drive/permissions.py b/keepercommander/keeper_drive/permissions.py new file mode 100644 index 000000000..f50cff65a --- /dev/null +++ b/keepercommander/keeper_drive/permissions.py @@ -0,0 +1,156 @@ +""" +KeeperDrive — role matrices, permission helpers, enum wrappers. + +Open/Closed: new roles can be added to the matrices without changing +any consumer code. +""" + +from typing import Dict + +from ..proto import folder_pb2 + + +class FolderUsageType: + NORMAL = folder_pb2.UT_NORMAL + + +class SetBooleanValue: + BOOLEAN_NO_CHANGE = folder_pb2.BOOLEAN_NO_CHANGE + BOOLEAN_TRUE = folder_pb2.BOOLEAN_TRUE + BOOLEAN_FALSE = folder_pb2.BOOLEAN_FALSE + + +_FOLDER_ROLE_PERMISSIONS: Dict[int, Dict[str, bool]] = { + 0: { # NAVIGATOR + 'canAdd': False, 'canRemove': False, 'canDelete': False, + 'canListAccess': False, 'canUpdateAccess': False, 'canChangeOwnership': False, + 'canEditRecords': False, 'canViewRecords': False, + 'canApproveAccess': False, 'canRequestAccess': False, + 'canUpdateSetting': False, 'canListRecords': False, 'canListFolders': True, + }, + 1: { # REQUESTOR + 'canAdd': False, 'canRemove': False, 'canDelete': False, + 'canListAccess': False, 'canUpdateAccess': False, 'canChangeOwnership': False, + 'canEditRecords': False, 'canViewRecords': False, + 'canApproveAccess': False, 'canRequestAccess': True, + 'canUpdateSetting': False, 'canListRecords': True, 'canListFolders': True, + }, + 2: { # VIEWER + 'canAdd': False, 'canRemove': False, 'canDelete': False, + 'canListAccess': True, 'canUpdateAccess': False, 'canChangeOwnership': False, + 'canEditRecords': False, 'canViewRecords': True, + 'canApproveAccess': False, 'canRequestAccess': False, + 'canUpdateSetting': False, 'canListRecords': True, 'canListFolders': True, + }, + 3: { # SHARED_MANAGER + 'canAdd': False, 'canRemove': False, 'canDelete': False, + 'canListAccess': True, 'canUpdateAccess': True, 'canChangeOwnership': False, + 'canEditRecords': False, 'canViewRecords': True, + 'canApproveAccess': True, 'canRequestAccess': False, + 'canUpdateSetting': False, 'canListRecords': True, 'canListFolders': True, + }, + 4: { # CONTENT_MANAGER + 'canAdd': True, 'canRemove': False, 'canDelete': False, + 'canListAccess': True, 'canUpdateAccess': False, 'canChangeOwnership': False, + 'canEditRecords': True, 'canViewRecords': True, + 'canApproveAccess': False, 'canRequestAccess': False, + 'canUpdateSetting': False, 'canListRecords': True, 'canListFolders': True, + }, + 5: { # CONTENT_SHARE_MANAGER + 'canAdd': True, 'canRemove': True, 'canDelete': False, + 'canListAccess': True, 'canUpdateAccess': True, 'canChangeOwnership': False, + 'canEditRecords': True, 'canViewRecords': True, + 'canApproveAccess': True, 'canRequestAccess': False, + 'canUpdateSetting': True, 'canListRecords': True, 'canListFolders': True, + }, + 6: { # MANAGER + 'canAdd': True, 'canRemove': True, 'canDelete': True, + 'canListAccess': True, 'canUpdateAccess': True, 'canChangeOwnership': True, + 'canEditRecords': True, 'canViewRecords': True, + 'canApproveAccess': True, 'canRequestAccess': False, + 'canUpdateSetting': True, 'canListRecords': True, 'canListFolders': True, + }, +} + +_RECORD_ROLE_PERMISSIONS: Dict[int, Dict[str, bool]] = { + 0: { + 'can_view_title': False, 'can_edit': False, 'can_view': False, + 'can_list_access': False, 'can_update_access': False, 'can_delete': False, + 'can_change_ownership': False, 'can_request_access': False, 'can_approve_access': False, + }, + 1: { + 'can_view_title': True, 'can_edit': False, 'can_view': False, + 'can_list_access': False, 'can_update_access': False, 'can_delete': False, + 'can_change_ownership': False, 'can_request_access': True, 'can_approve_access': False, + }, + 2: { + 'can_view_title': True, 'can_edit': False, 'can_view': True, + 'can_list_access': True, 'can_update_access': False, 'can_delete': False, + 'can_change_ownership': False, 'can_request_access': False, 'can_approve_access': False, + }, + 3: { + 'can_view_title': True, 'can_edit': False, 'can_view': True, + 'can_list_access': True, 'can_update_access': True, 'can_delete': False, + 'can_change_ownership': False, 'can_request_access': False, 'can_approve_access': True, + }, + 4: { + 'can_view_title': True, 'can_edit': True, 'can_view': True, + 'can_list_access': True, 'can_update_access': False, 'can_delete': False, + 'can_change_ownership': False, 'can_request_access': False, 'can_approve_access': False, + }, + 5: { + 'can_view_title': True, 'can_edit': True, 'can_view': True, + 'can_list_access': True, 'can_update_access': True, 'can_delete': False, + 'can_change_ownership': False, 'can_request_access': False, 'can_approve_access': True, + }, + 6: { + 'can_view_title': True, 'can_edit': True, 'can_view': True, + 'can_list_access': True, 'can_update_access': True, 'can_delete': True, + 'can_change_ownership': True, 'can_request_access': False, 'can_approve_access': True, + }, +} + +ROLE_NAME_MAP: Dict[str, int] = { + 'contributor': 1, + 'requestor': 1, + 'viewer': 2, + 'shared_manager': 3, + 'shared-manager': 3, + 'content_manager': 4, + 'content-manager': 4, + 'content_share_manager': 5, + 'content-share-manager': 5, + 'full-manager': 6, + 'full_manager': 6, +} + + +def get_folder_permissions_for_role(role_type: int) -> 'folder_pb2.FolderPermissions': + perms_dict = _FOLDER_ROLE_PERMISSIONS.get(role_type) + if perms_dict is None: + raise ValueError( + f"Unknown AccessRoleType {role_type}. " + f"Expected one of {list(_FOLDER_ROLE_PERMISSIONS.keys())}.") + perms = folder_pb2.FolderPermissions() + for field, value in perms_dict.items(): + setattr(perms, field, value) + return perms + + +def get_record_permissions_for_role(role_type: int) -> Dict[str, bool]: + perms_dict = _RECORD_ROLE_PERMISSIONS.get(role_type) + if perms_dict is None: + raise ValueError( + f"Unknown AccessRoleType {role_type}. " + f"Expected one of {list(_RECORD_ROLE_PERMISSIONS.keys())}.") + return dict(perms_dict) + + +def resolve_role_name(role: str) -> int: + normalised = role.strip().lower() + value = ROLE_NAME_MAP.get(normalised) + if value is None: + raise ValueError( + f"Invalid role '{role}'. " + f"Accepted values: {', '.join(sorted(set(ROLE_NAME_MAP.keys())))}.") + return value diff --git a/keepercommander/keeper_drive/record_api.py b/keepercommander/keeper_drive/record_api.py new file mode 100644 index 000000000..d2e0e96b0 --- /dev/null +++ b/keepercommander/keeper_drive/record_api.py @@ -0,0 +1,682 @@ +""" +KeeperDrive — record CRUD, sharing, and ownership transfer. +""" + +import json +import logging +import os +from typing import Optional, List, Dict, Any + +from .. import utils, crypto, api +from ..params import KeeperParams +from ..proto import record_pb2, folder_pb2, record_endpoints_pb2, record_details_pb2, record_sharing_pb2 +from ..error import KeeperApiError +from ..api import pad_aes_gcm + +from .common import ( + get_folder_key, get_record_key, get_record_from_cache, + get_user_public_key, encrypt_for_recipient, handle_share_invite, + parse_sharing_status, +) + +logger = logging.getLogger(__name__) + + +# ══════════════════════════════════════════════════════════════════════════ +# Low-level protobuf builder +# ══════════════════════════════════════════════════════════════════════════ + +def create_record_data_v3(record_uid, record_key, data, + non_shared_data=None, folder_uid=None, + folder_key=None, record_key_type=None, + client_modified_time=None, data_key=None): + ra = record_endpoints_pb2.RecordAdd() + ra.recordUid = utils.base64_url_decode(record_uid) + + if folder_uid and folder_key: + ra.recordKey = crypto.encrypt_aes_v2(record_key, folder_key) + ra.folderUid = utils.base64_url_decode(folder_uid) + ra.recordKeyEncryptedBy = folder_pb2.ENCRYPTED_BY_PARENT_KEY + elif folder_uid and not folder_key: + raise ValueError("folder_key required when folder_uid is provided") + else: + if data_key is None: + raise ValueError("data_key required when creating at vault root") + ra.recordKey = crypto.encrypt_aes_v2(record_key, data_key) + + ra.recordKeyType = (record_key_type if record_key_type is not None + else folder_pb2.encrypted_by_data_key_gcm) + + data_json = pad_aes_gcm(json.dumps(data)) + data_bytes = data_json.encode() if isinstance(data_json, str) else data_json + ra.data = crypto.encrypt_aes_v2(data_bytes, record_key) + + if non_shared_data: + ns = pad_aes_gcm(json.dumps(non_shared_data)) + ns_bytes = ns.encode() if isinstance(ns, str) else ns + ra.nonSharedData = crypto.encrypt_aes_v2(ns_bytes, record_key) + if client_modified_time: + ra.clientModifiedTime = client_modified_time + return ra + + +# ══════════════════════════════════════════════════════════════════════════ +# Transport: record_add_v3 / record_update_v3 +# ══════════════════════════════════════════════════════════════════════════ + +def record_add_v3(params, records, client_time=None, security_data_key_type=None): + if not records or len(records) > 1000: + raise ValueError("Provide 1..1000 records") + rq = record_endpoints_pb2.RecordsAddRequest() + rq.records.extend(records) + if client_time: + rq.clientTime = client_time + if security_data_key_type: + rq.securityDataKeyType = security_data_key_type + return api.communicate_rest(params, rq, 'vault/records/v3/add', + rs_type=record_pb2.RecordsModifyResponse) + + +def record_update_v3(params, records, client_time=None, security_data_key_type=None): + if not records or len(records) > 1000: + raise ValueError("Provide 1..1000 records") + rq = record_pb2.RecordsUpdateRequest() + rq.records.extend(records) + if client_time: + rq.client_time = client_time + if security_data_key_type: + rq.security_data_key_type = security_data_key_type + return api.communicate_rest(params, rq, 'vault/records/v3/update', + rs_type=record_pb2.RecordsModifyResponse) + + +# ══════════════════════════════════════════════════════════════════════════ +# High-level: create / update / batch +# ══════════════════════════════════════════════════════════════════════════ + +def create_record_v3(params, record_type='', title='', fields=None, + folder_uid=None, notes=None, custom_fields=None, + record_data=None): + uid = utils.generate_uid() + rk = os.urandom(32) + + if record_data is not None: + data = record_data + else: + data = {'type': record_type, 'title': title, 'fields': []} + if fields: + for ft, fv in fields.items(): + data['fields'].append({'type': ft, 'value': fv if isinstance(fv, list) else [fv]}) + if notes is not None: + data['notes'] = notes + if custom_fields: + data['fields'].extend(custom_fields) + + fk = get_folder_key(params, folder_uid, raise_on_missing=True) if folder_uid else None + + ra = create_record_data_v3( + record_uid=uid, record_key=rk, data=data, + folder_uid=folder_uid, folder_key=fk, + data_key=params.data_key, client_modified_time=utils.current_milli_time()) + response = record_add_v3(params, [ra]) + + if response.records: + r = response.records[0] + return { + 'record_uid': uid, + 'status': record_pb2.RecordModifyResult.Name(r.status), + 'message': r.message, + 'success': r.status == record_pb2.RS_SUCCESS, + 'revision': getattr(response, 'revision', 0), + } + raise KeeperApiError('no_results', 'No results from record creation') + + +def update_record_v3(params, record_uid, data=None, title=None, + record_type=None, fields=None, notes=None, + non_shared_data=None, revision=None): + if record_uid not in params.record_cache: + from .. import sync_down + sync_down.sync_down(params) + if record_uid not in params.record_cache: + raise ValueError(f"Record {record_uid} not found") + + rec = params.record_cache[record_uid] + rk = rec.get('record_key_unencrypted') + if not rk: + raise ValueError(f"Record key not available for {record_uid}") + + if data is None: + existing = None + if 'data_unencrypted' in rec: + raw = rec['data_unencrypted'] + if isinstance(raw, bytes): + existing = json.loads(raw.decode()) + data = existing.copy() if existing else {'fields': []} + if title is not None: + data['title'] = title + if record_type is not None: + data['type'] = record_type + if fields is not None: + by_type = {} + for ef in data.get('fields', []): + by_type.setdefault(ef.get('type'), []).append(ef) + for ft, fv in fields.items(): + fv = fv if isinstance(fv, list) else [fv] + if ft in by_type and by_type[ft]: + by_type[ft][0]['value'] = fv + else: + data.setdefault('fields', []).append({'type': ft, 'value': fv}) + if notes is not None: + data['notes'] = notes + + ru = record_pb2.RecordUpdate() + ru.record_uid = utils.base64_url_decode(record_uid) + ru.client_modified_time = utils.current_milli_time() + ru.revision = revision if revision is not None else rec.get('revision', 0) + + dj = pad_aes_gcm(json.dumps(data)) + db = dj.encode() if isinstance(dj, str) else dj + ru.data = crypto.encrypt_aes_v2(db, rk) + + if non_shared_data: + nsj = pad_aes_gcm(json.dumps(non_shared_data)) + nsb = nsj.encode() if isinstance(nsj, str) else nsj + ru.non_shared_data = crypto.encrypt_aes_v2(nsb, rk) + + response = record_update_v3(params, [ru]) + if response.records: + r = response.records[0] + return { + 'record_uid': record_uid, + 'status': record_pb2.RecordModifyResult.Name(r.status), + 'message': r.message, + 'success': r.status == record_pb2.RS_SUCCESS, + 'revision': getattr(response, 'revision', 0), + } + raise KeeperApiError('no_results', 'No results from record update') + + +def create_records_batch_v3(params, record_specs): + if len(record_specs) > 1000: + raise ValueError("Maximum 1000 records at a time") + adds, uid_map = [], {} + for idx, spec in enumerate(record_specs): + uid = utils.generate_uid() + uid_map[idx] = uid + rk = os.urandom(32) + data = {'type': spec['type'], 'title': spec['title'], 'fields': []} + for ft, fv in spec.get('fields', {}).items(): + data['fields'].append({'type': ft, 'value': fv if isinstance(fv, list) else [fv]}) + if spec.get('notes') is not None: + data['notes'] = spec['notes'] + if spec.get('custom_fields'): + data['fields'].extend(spec['custom_fields']) + + fuid = spec.get('folder_uid') + fk = get_folder_key(params, fuid) if fuid else None + adds.append(create_record_data_v3( + record_uid=uid, record_key=rk, data=data, + folder_uid=fuid, folder_key=fk, + data_key=params.data_key, client_modified_time=utils.current_milli_time())) + + response = record_add_v3(params, adds) + return [{ + 'record_uid': uid_map.get(i, utils.base64_url_encode(r.record_uid)), + 'title': record_specs[i].get('title'), + 'status': record_pb2.RecordModifyResult.Name(r.status), + 'message': r.message, + 'success': r.status == record_pb2.RS_SUCCESS, + 'revision': getattr(response, 'revision', 0), + } for i, r in enumerate(response.records)] + + +# ══════════════════════════════════════════════════════════════════════════ +# Record details / access +# ══════════════════════════════════════════════════════════════════════════ + +def get_record_details_v3(params, record_uids, client_time=None): + if not record_uids: + raise ValueError("At least one record UID required") + rq = record_details_pb2.RecordDataRequest() + for uid in record_uids: + rq.recordUids.append(utils.base64_url_decode(uid)) + rq.clientTime = client_time or utils.current_milli_time() + + rs = api.communicate_rest(params, rq, 'vault/records/v3/details/data', + rs_type=record_details_pb2.RecordDataResponse) + result = {'data': [], 'forbidden_records': []} + + for rd in rs.data: + uid = utils.base64_url_encode(getattr(rd, 'recordUid', getattr(rd, 'record_uid', b''))) + title, rtype = 'Unknown', 'Unknown' + try: + title, rtype = _decrypt_record_data(params, rd, uid) + except Exception: + pass + result['data'].append({ + 'record_uid': uid, 'title': title, 'type': rtype, + 'revision': getattr(rd, 'revision', 0), + 'version': getattr(rd, 'version', 0), + }) + for fu in rs.forbiddenRecords: + result['forbidden_records'].append(utils.base64_url_encode(fu)) + return result + + +def _decrypt_record_data(params, rd, uid): + rk_val = getattr(rd, 'recordKey', getattr(rd, 'record_key', None)) + if not rk_val: + raise ValueError("No record key") + enc_rk = utils.base64_url_decode(rk_val) if isinstance(rk_val, str) else rk_val + rk_type = getattr(rd, 'recordKeyType', getattr(rd, 'record_key_type', None)) + + drk = _try_decrypt_record_key(params, enc_rk, rk_type, uid) + enc_data = getattr(rd, 'encryptedRecordData', getattr(rd, 'encrypted_record_data', None)) + if drk and enc_data: + raw = utils.base64_url_decode(enc_data) if isinstance(enc_data, str) else enc_data + dec = crypto.decrypt_aes_v2(raw, drk) + dj = json.loads(dec.decode().rstrip(' ')) + return dj.get('title', 'Unknown'), dj.get('type', 'Unknown') + return 'Unknown', 'Unknown' + + +def _try_decrypt_record_key(params, enc_rk, rk_type, uid): + drk = None + if rk_type in (record_pb2.ENCRYPTED_BY_DATA_KEY, 'ENCRYPTED_BY_DATA_KEY'): + try: drk = crypto.decrypt_aes_v1(enc_rk, params.data_key) + except Exception: pass + elif rk_type in (record_pb2.ENCRYPTED_BY_DATA_KEY_GCM, 'ENCRYPTED_BY_DATA_KEY_GCM', None): + try: drk = crypto.decrypt_aes_v2(enc_rk, params.data_key) + except Exception: pass + elif rk_type in (record_pb2.ENCRYPTED_BY_PUBLIC_KEY, 'ENCRYPTED_BY_PUBLIC_KEY'): + drk = crypto.decrypt_rsa(enc_rk, params.rsa_key2) + elif rk_type in (record_pb2.ENCRYPTED_BY_PUBLIC_KEY_ECC, 'ENCRYPTED_BY_PUBLIC_KEY_ECC'): + drk = crypto.decrypt_ec(enc_rk, params.ecc_key) + + if not drk and uid in getattr(params, 'keeper_drive_record_keys', {}): + for rk_entry in params.keeper_drive_record_keys[uid]: + fuid = rk_entry.get('folder_uid') + if fuid: + fobj = getattr(params, 'keeper_drive_folders', {}).get(fuid, {}) + fk = fobj.get('folder_key_unencrypted') + if fk: + try: drk = crypto.decrypt_aes_v2(enc_rk, fk); break + except Exception: + try: drk = crypto.decrypt_aes_v1(enc_rk, fk); break + except Exception: pass + return drk + + +def get_record_accesses_v3(params, record_uids): + if not record_uids: + raise ValueError("At least one record UID required") + rq = record_details_pb2.RecordAccessRequest() + for uid in record_uids: + rq.recordUids.append(utils.base64_url_decode(uid)) + + rs = api.communicate_rest(params, rq, 'vault/records/v3/details/access', + rs_type=record_details_pb2.RecordAccessResponse) + result = {'record_accesses': [], 'forbidden_records': []} + for ra in rs.recordAccesses: + d = ra.data + ai = ra.accessorInfo + ao = { + 'record_uid': utils.base64_url_encode(d.recordUid), + 'accessor_name': ai.name, + 'access_type': folder_pb2.AccessType.Name(d.accessType) if hasattr(d, 'accessType') else 'UNKNOWN', + 'access_type_uid': utils.base64_url_encode(d.accessTypeUid), + 'owner': getattr(d, 'owner', False), + 'inherited': bool(getattr(d, 'inherited', False)), + 'access_role_type': int(getattr(d, 'accessRoleType', 0) or 0), + } + for flag in ('can_view_title', 'can_edit', 'can_view', 'can_list_access', + 'can_update_access', 'can_delete', 'can_change_ownership', + 'can_request_access', 'can_approve_access'): + ao[flag] = getattr(d, flag, False) + result['record_accesses'].append(ao) + for fu in rs.forbiddenRecords: + result['forbidden_records'].append(utils.base64_url_encode(fu)) + return result + + +# ══════════════════════════════════════════════════════════════════════════ +# Record sharing (Strategy: share / update / revoke) +# ══════════════════════════════════════════════════════════════════════════ + +def _build_share_permissions(params, record_uid, recipient_email, access_role_type, + expiration_timestamp, include_role): + """Build a Permissions protobuf for share/update — single source of truth.""" + from .. import sync_down as sd + sd.sync_down(params) + + rec = get_record_from_cache(params, record_uid) + if not rec: + raise ValueError(f"Record {record_uid} not found in cache") + rk = rec.get('record_key_unencrypted') + if not rk: + raise ValueError(f"Record {record_uid} has no decrypted key") + + pub_key, use_ecc, uid_bytes, needs_invite = get_user_public_key(params, recipient_email) + if not pub_key: + handle_share_invite(params, recipient_email, needs_invite) + raise ValueError(f"User {recipient_email} has no public key") + if not uid_bytes: + raise ValueError(f"User {recipient_email} not found") + + enc_rk = encrypt_for_recipient(rk, pub_key, use_ecc) + uid_b = utils.base64_url_decode(record_uid) + + perm = record_sharing_pb2.Permissions() + perm.recipientUid = uid_bytes + perm.recordUid = uid_b + perm.recordKey = enc_rk + perm.useEccKey = use_ecc + + perm.rules.accessTypeUid = uid_bytes + perm.rules.accessType = folder_pb2.AT_USER + perm.rules.recordUid = uid_b + perm.rules.owner = False + if include_role and access_role_type is not None: + perm.rules.accessRoleType = access_role_type + if expiration_timestamp: + perm.rules.tlaProperties.expiration = expiration_timestamp + return perm + + +def share_record_v3(params, record_uid, recipient_email, access_role_type, + expiration_timestamp=None): + perm = _build_share_permissions(params, record_uid, recipient_email, + access_role_type, expiration_timestamp, + include_role=True) + rq = record_sharing_pb2.Request() + rq.createSharingPermissions.append(perm) + rs = api.communicate_rest(params, rq, 'vault/records/v3/share', + rs_type=record_sharing_pb2.Response) + results = [parse_sharing_status(s) for s in rs.createdSharingStatus] + return {'results': results, 'success': all(r['success'] for r in results)} + + +def update_record_share_v3(params, record_uid, recipient_email, + access_role_type=None, expiration_timestamp=None): + perm = _build_share_permissions(params, record_uid, recipient_email, + access_role_type, expiration_timestamp, + include_role=True) + rq = record_sharing_pb2.Request() + rq.updateSharingPermissions.append(perm) + rs = api.communicate_rest(params, rq, 'vault/records/v3/share', + rs_type=record_sharing_pb2.Response) + results = [parse_sharing_status(s) for s in rs.updatedSharingStatus] + return {'results': results, 'success': all(r['success'] for r in results)} + + +def unshare_record_v3(params, record_uid, recipient_email): + from .. import sync_down as sd + sd.sync_down(params) + + rec = get_record_from_cache(params, record_uid) + if not rec: + raise ValueError(f"Record {record_uid} not found in cache") + _, _, uid_bytes, _ = get_user_public_key(params, recipient_email) + if not uid_bytes: + raise ValueError(f"User {recipient_email} not found") + + uid_b = utils.base64_url_decode(record_uid) + perm = record_sharing_pb2.Permissions() + perm.recipientUid = uid_bytes + perm.recordUid = uid_b + perm.rules.accessTypeUid = uid_bytes + perm.rules.accessType = folder_pb2.AT_USER + perm.rules.recordUid = uid_b + + rq = record_sharing_pb2.Request() + rq.revokeSharingPermissions.append(perm) + rs = api.communicate_rest(params, rq, 'vault/records/v3/share', + rs_type=record_sharing_pb2.Response) + results = [parse_sharing_status(s) for s in rs.revokedSharingStatus] + return {'results': results, 'success': all(r['success'] for r in results)} + + +# ══════════════════════════════════════════════════════════════════════════ +# Batch record sharing (bulk update / revoke in a single REST call per chunk) +# ══════════════════════════════════════════════════════════════════════════ + +_SHARE_BATCH_SIZE = 200 +"""Maximum number of permissions per vault/records/v3/share request.""" + + +def batch_update_record_shares_v3(params, updates, expiration_timestamp=None, chunk_size=_SHARE_BATCH_SIZE): + """Send multiple updateSharingPermissions in as few REST calls as possible. + + *updates* is a list of dicts with keys: + ``record_uid``, ``email``, ``access_role_type``, + ``cur_role`` (for logging), ``new_role`` (for logging). + + Returns a list of ``(item_dict, result_dict)`` pairs, one per permission + in input order (skipped items carry ``result['skipped'] = True``). + """ + from .. import sync_down as sd + sd.sync_down(params) + + outcomes = [] + for i in range(0, len(updates), chunk_size): + chunk = updates[i:i + chunk_size] + rq = record_sharing_pb2.Request() + built = [] + for u in chunk: + try: + perm = _build_share_permissions( + params, u['record_uid'], u['email'], + u['access_role_type'], expiration_timestamp, + include_role=True, + ) + rq.updateSharingPermissions.append(perm) + built.append(u) + except Exception as exc: + outcomes.append((u, {'success': False, 'skipped': True, + 'message': str(exc)})) + + if not built: + continue + + try: + rs = api.communicate_rest(params, rq, 'vault/records/v3/share', + rs_type=record_sharing_pb2.Response) + statuses = [parse_sharing_status(s) for s in rs.updatedSharingStatus] + status_by_uid = {s['record_uid']: s for s in statuses} + for u in built: + outcomes.append((u, status_by_uid.get(u['record_uid'], + {'success': False, + 'message': 'No status returned'}))) + except Exception as exc: + for u in built: + outcomes.append((u, {'success': False, 'message': str(exc)})) + + return outcomes + + +def batch_create_record_shares_v3(params, creates, expiration_timestamp=None, chunk_size=_SHARE_BATCH_SIZE): + """Send multiple createSharingPermissions in as few REST calls as possible. + + Used when granting a role to a recipient who currently only has an + *inherited* (folder-level) permission on the record. The + ``vault/records/v3/share`` endpoint cannot ``update`` an inherited row + because no direct sharing entry exists; a ``create`` adds a new direct + permission that overrides the inherited one. + + *creates* is a list of dicts with keys: + ``record_uid``, ``email``, ``access_role_type``, + ``cur_role`` (for logging), ``new_role`` (for logging). + + Returns a list of ``(item_dict, result_dict)`` pairs in input order + (skipped items carry ``result['skipped'] = True``). + """ + from .. import sync_down as sd + sd.sync_down(params) + + outcomes = [] + for i in range(0, len(creates), chunk_size): + chunk = creates[i:i + chunk_size] + rq = record_sharing_pb2.Request() + built = [] + for c in chunk: + try: + perm = _build_share_permissions( + params, c['record_uid'], c['email'], + c['access_role_type'], expiration_timestamp, + include_role=True, + ) + rq.createSharingPermissions.append(perm) + built.append(c) + except Exception as exc: + outcomes.append((c, {'success': False, 'skipped': True, + 'message': str(exc)})) + + if not built: + continue + + try: + rs = api.communicate_rest(params, rq, 'vault/records/v3/share', + rs_type=record_sharing_pb2.Response) + statuses = [parse_sharing_status(s) for s in rs.createdSharingStatus] + status_by_uid = {s['record_uid']: s for s in statuses} + for c in built: + outcomes.append((c, status_by_uid.get(c['record_uid'], + {'success': False, + 'message': 'No status returned'}))) + except Exception as exc: + for c in built: + outcomes.append((c, {'success': False, 'message': str(exc)})) + + return outcomes + + +def batch_unshare_records_v3(params, revokes, chunk_size=_SHARE_BATCH_SIZE): + """Send multiple revokeSharingPermissions in as few REST calls as possible. + + *revokes* is a list of dicts with keys: + ``record_uid``, ``email``, ``cur_role`` (for logging). + + Returns a list of ``(item_dict, result_dict)`` pairs. + """ + from .. import sync_down as sd + sd.sync_down(params) + + outcomes = [] + for i in range(0, len(revokes), chunk_size): + chunk = revokes[i:i + chunk_size] + rq = record_sharing_pb2.Request() + built = [] + for r in chunk: + try: + rec = get_record_from_cache(params, r['record_uid']) + if not rec: + raise ValueError(f"Record {r['record_uid']} not found in cache") + _, _, uid_bytes, _ = get_user_public_key(params, r['email']) + if not uid_bytes: + raise ValueError(f"User {r['email']} not found") + + uid_b = utils.base64_url_decode(r['record_uid']) + perm = record_sharing_pb2.Permissions() + perm.recipientUid = uid_bytes + perm.recordUid = uid_b + perm.rules.accessTypeUid = uid_bytes + perm.rules.accessType = folder_pb2.AT_USER + perm.rules.recordUid = uid_b + + rq.revokeSharingPermissions.append(perm) + built.append(r) + except Exception as exc: + outcomes.append((r, {'success': False, 'skipped': True, + 'message': str(exc)})) + + if not built: + continue + + try: + rs = api.communicate_rest(params, rq, 'vault/records/v3/share', + rs_type=record_sharing_pb2.Response) + statuses = [parse_sharing_status(s) for s in rs.revokedSharingStatus] + status_by_uid = {s['record_uid']: s for s in statuses} + for r in built: + outcomes.append((r, status_by_uid.get(r['record_uid'], + {'success': False, + 'message': 'No status returned'}))) + except Exception as exc: + for r in built: + outcomes.append((r, {'success': False, 'message': str(exc)})) + + return outcomes + + +# ══════════════════════════════════════════════════════════════════════════ +# Ownership transfer +# ══════════════════════════════════════════════════════════════════════════ + +def _build_transfer_record(params, record_uid, new_owner_email, require_uid=False): + rec = get_record_from_cache(params, record_uid) or params.record_cache.get(record_uid) + if not rec: + return None + rk = rec.get('record_key_unencrypted') + if not rk: + return None + + pub_key, use_ecc, _, _ = get_user_public_key(params, new_owner_email, require_uid=require_uid) + if not pub_key: + return None + enc_rk = encrypt_for_recipient(rk, pub_key, use_ecc) + + tr = record_pb2.TransferRecord() + tr.username = new_owner_email + tr.recordUid = utils.base64_url_decode(record_uid) + tr.recordKey = enc_rk + tr.useEccKey = use_ecc + return tr + + +def transfer_record_ownership_v3(params, record_uid, new_owner_email): + from .. import sync_down as sd + sd.sync_down(params) + + tr = _build_transfer_record(params, record_uid, new_owner_email, require_uid=False) + if not tr: + raise ValueError(f"Cannot prepare transfer for record {record_uid}") + + rq = record_pb2.RecordsOnwershipTransferRequest() + rq.transferRecords.append(tr) + rs = api.communicate_rest(params, rq, 'vault/records/v3/transfer', + rs_type=record_pb2.RecordsOnwershipTransferResponse) + results = [{ + 'record_uid': utils.base64_url_encode(s.recordUid), + 'username': s.username, 'status': s.status, 'message': s.message, + 'success': 'success' in s.status.lower(), + } for s in rs.transferRecordStatus] + params.sync_data = True + return {'results': results, 'success': all(r['success'] for r in results)} + + +def transfer_records_ownership_batch_v3(params, transfers): + from .. import sync_down as sd + sd.sync_down(params) + + trs = [] + for spec in transfers: + tr = _build_transfer_record(params, spec['record_uid'], spec['new_owner_email']) + if tr: + trs.append(tr) + if not trs: + raise ValueError("No valid transfer records to process") + + rq = record_pb2.RecordsOnwershipTransferRequest() + rq.transferRecords.extend(trs) + rs = api.communicate_rest(params, rq, 'vault/records/v3/transfer', + rs_type=record_pb2.RecordsOnwershipTransferResponse) + results = [{ + 'record_uid': utils.base64_url_encode(s.recordUid), + 'username': s.username, 'status': s.status, 'message': s.message, + 'success': 'success' in s.status.lower(), + } for s in rs.transferRecordStatus] + params.sync_data = True + ok = sum(1 for r in results if r['success']) + return {'results': results, 'success': all(r['success'] for r in results), + 'total': len(results), 'successful': ok, 'failed': len(results) - ok} diff --git a/keepercommander/keeper_drive/removal_api.py b/keepercommander/keeper_drive/removal_api.py new file mode 100644 index 000000000..336c00c4c --- /dev/null +++ b/keepercommander/keeper_drive/removal_api.py @@ -0,0 +1,254 @@ +""" +KeeperDrive — record and folder removal (preview/confirm pattern). + +The preview → confirm two-step is shared via ``_execute_removal``. +""" + +import logging +from typing import Optional, List, Dict, Any + +from .. import utils, api +from ..proto import remove_pb2 + +logger = logging.getLogger(__name__) + + +# ══════════════════════════════════════════════════════════════════════════ +# Operation type mappings +# ══════════════════════════════════════════════════════════════════════════ + +_RECORD_OP_MAP = { + 'unlink': remove_pb2.UNLINK_FROM_FOLDER, + 'folder-trash': remove_pb2.MOVE_TO_FOLDER_TRASH, + 'owner-trash': remove_pb2.MOVE_TO_OWNER_TRASH, +} + +_FOLDER_OP_MAP = { + 'folder-trash': remove_pb2.FOLDER_MOVE_TO_FOLDER_TRASH, + 'delete-permanent': remove_pb2.FOLDER_DELETE_PERMANENT, +} + + +# ══════════════════════════════════════════════════════════════════════════ +# Shared preview/confirm engine (DRY — used by both record & folder) +# ══════════════════════════════════════════════════════════════════════════ + +def _parse_impact(res): + """Parse a RemoveResult's impact field into a dict — shared logic.""" + if not res.HasField('impact'): + return None + imp = res.impact + return { + 'folders_count': imp.folders_count, + 'records_count': imp.records_count, + 'affected_users_count': imp.affected_users_count, + 'affected_teams_count': imp.affected_teams_count, + 'record_info': [{'record_uid': utils.base64_url_encode(ri.record_uid), + 'locations_count': ri.locations_count} + for ri in imp.record_info], + 'warnings': list(imp.warnings), + } + + +def _parse_error(res): + if not res.HasField('error'): + return None + return { + 'code': remove_pb2.RemoveErrorCode.Name(res.error.code), + 'message': res.error.message, + } + + +def _build_preview_results(preview_rs, uid_field='item_uid'): + """Parse preview results — works for both record and folder removal.""" + results = [] + for res in preview_rs.results: + item_uid = utils.base64_url_encode(getattr(res, uid_field, b'')) + entry = { + 'status': remove_pb2.RemoveStatus.Name(res.status), + 'impact': _parse_impact(res), + 'error': _parse_error(res), + } + if hasattr(res, 'folder_uid') and res.folder_uid: + entry['folder_uid'] = utils.base64_url_encode(res.folder_uid) + else: + entry['folder_uid'] = '' + entry['record_uid'] = item_uid + entry['folder_uid'] = entry.get('folder_uid', '') + results.append(entry) + return results + + +# ══════════════════════════════════════════════════════════════════════════ +# Resolution helpers +# ══════════════════════════════════════════════════════════════════════════ + +def find_kd_folders_for_record(params, record_uid): + """Return KeeperDrive folder UIDs that contain *record_uid*.""" + folders = [] + kd_fr = getattr(params, 'keeper_drive_folder_records', {}) + for fuid, rec_set in kd_fr.items(): + if record_uid in rec_set: + folders.append(fuid) + return folders + + +def resolve_kd_record_uid(params, identifier): + """Resolve a record identifier (UID, title, or name) to a record UID. + + Lookup order: + 1. Direct UID match in keeper_drive_records + 2. Direct UID match in record_cache (vault records) + 3. Title match from decrypted keeper_drive_record_data + 4. Title match from record_cache (data_unencrypted) + """ + import json + + kd = getattr(params, 'keeper_drive_records', {}) + if identifier in kd: + return identifier + + rc = getattr(params, 'record_cache', {}) + if identifier in rc: + return identifier + + lower = identifier.casefold() + + kd_data = getattr(params, 'keeper_drive_record_data', {}) + for uid in kd: + rd = kd_data.get(uid, {}) + dj = rd.get('data_json', {}) + if isinstance(dj, dict): + t = dj.get('title', '') + if isinstance(t, str) and t.casefold() == lower: + return uid + + for uid, rec in rc.items(): + if uid in kd: + continue + data = rec.get('data_unencrypted') + if data: + try: + dj = json.loads(data.decode('utf-8') if isinstance(data, bytes) else data) + t = dj.get('title', '') + if isinstance(t, str) and t.casefold() == lower: + return uid + except Exception: + pass + + return None + + +def resolve_kd_folder_uid(params, identifier): + """Resolve a folder identifier (UID or name) to a KeeperDrive folder UID.""" + kd = getattr(params, 'keeper_drive_folders', {}) + if identifier in kd: + return identifier + fc = getattr(params, 'folder_cache', {}) + if identifier in fc: + return identifier + lower = identifier.casefold() + for uid, f in kd.items(): + name = f.get('name', '') + if isinstance(name, str) and name.casefold() == lower: + return uid + for uid, f in getattr(params, 'subfolder_cache', {}).items(): + name = f.get('name', '') + if isinstance(name, str) and name.casefold() == lower: + return uid + return None + + +# ══════════════════════════════════════════════════════════════════════════ +# Record removal +# ══════════════════════════════════════════════════════════════════════════ + +def remove_record_v3(params, removals, dry_run=False): + if not removals: + raise ValueError("At least one record required") + if len(removals) > 500: + raise ValueError("Maximum 500 records per request") + + preview_rq = remove_pb2.RemoveRecordRequest() + preview_rq.action = remove_pb2.REMOVE_ACTION_PREVIEW + for item in removals: + op = item.get('operation_type', 'owner-trash') + if op not in _RECORD_OP_MAP: + raise ValueError(f"Invalid operation_type '{op}'. Use: {', '.join(_RECORD_OP_MAP)}") + rr = remove_pb2.RecordRemoval() + rr.record_uid = utils.base64_url_decode(item['record_uid']) + fuid = item.get('folder_uid') + rr.folder_uid = utils.base64_url_decode(fuid) if fuid else b'' + rr.operation_type = _RECORD_OP_MAP[op] + preview_rq.records.append(rr) + + preview_rs = api.communicate_rest(params, preview_rq, + 'vault/folders/v3/remove_record', + rs_type=remove_pb2.RemoveResponse) + + preview_results = _build_preview_results(preview_rs) + token_expires = preview_rs.token_expires_at or None + + if dry_run or not preview_rs.confirmation_token: + return {'preview_results': preview_results, 'confirmed': False, + 'confirmation_token_expires_at': token_expires} + + confirm_rq = remove_pb2.RemoveRecordRequest() + confirm_rq.action = remove_pb2.REMOVE_ACTION_CONFIRM + confirm_rq.confirmation_token = preview_rs.confirmation_token + confirm_rq.records.extend(preview_rq.records) + api.communicate_rest(params, confirm_rq, + 'vault/folders/v3/remove_record', + rs_type=remove_pb2.RemoveResponse) + return {'preview_results': preview_results, 'confirmed': True, + 'confirmation_token_expires_at': token_expires} + + +# ══════════════════════════════════════════════════════════════════════════ +# Folder removal +# ══════════════════════════════════════════════════════════════════════════ + +def remove_folder_v3(params, removals, dry_run=False): + if not removals: + raise ValueError("At least one folder required") + if len(removals) > 100: + raise ValueError("Maximum 100 folders per request") + + preview_rq = remove_pb2.RemoveFolderRequest() + preview_rq.action = remove_pb2.REMOVE_ACTION_PREVIEW + for item in removals: + op = item.get('operation_type', 'folder-trash') + if op not in _FOLDER_OP_MAP: + raise ValueError(f"Invalid operation_type '{op}'. Use: {', '.join(_FOLDER_OP_MAP)}") + fr = remove_pb2.FolderRemoval() + fr.folder_uid = utils.base64_url_decode(item['folder_uid']) + fr.operation_type = _FOLDER_OP_MAP[op] + preview_rq.folders.append(fr) + + preview_rs = api.communicate_rest(params, preview_rq, + 'vault/folders/v3/remove_folder', + rs_type=remove_pb2.RemoveResponse) + + preview_results = [] + for res in preview_rs.results: + preview_results.append({ + 'folder_uid': utils.base64_url_encode(res.item_uid), + 'status': remove_pb2.RemoveStatus.Name(res.status), + 'impact': _parse_impact(res), + 'error': _parse_error(res), + }) + token_expires = preview_rs.token_expires_at or None + + if dry_run or not preview_rs.confirmation_token: + return {'preview_results': preview_results, 'confirmed': False, + 'confirmation_token_expires_at': token_expires} + + confirm_rq = remove_pb2.RemoveFolderRequest() + confirm_rq.action = remove_pb2.REMOVE_ACTION_CONFIRM + confirm_rq.confirmation_token = preview_rs.confirmation_token + confirm_rq.folders.extend(preview_rq.folders) + api.communicate_rest(params, confirm_rq, + 'vault/folders/v3/remove_folder', + rs_type=remove_pb2.RemoveResponse) + return {'preview_results': preview_results, 'confirmed': True, + 'confirmation_token_expires_at': token_expires} diff --git a/keepercommander/keeper_drive/sync.py b/keepercommander/keeper_drive/sync.py new file mode 100644 index 000000000..18f4db7ab --- /dev/null +++ b/keepercommander/keeper_drive/sync.py @@ -0,0 +1,1059 @@ +import json +import logging +from typing import List, Dict + +import google + +from .. import utils, crypto +from ..params import RecordOwner +from ..proto import folder_pb2, record_pb2 + + +def _ensure_keeper_drive_attrs(params): + """Ensure Keeper Drive caches exist on params, even for older sessions.""" + if params is None: + return + if not hasattr(params, 'keeper_drive_folders'): + params.keeper_drive_folders = {} + if not hasattr(params, 'keeper_drive_folder_keys'): + params.keeper_drive_folder_keys = {} + if not hasattr(params, 'keeper_drive_folder_accesses'): + params.keeper_drive_folder_accesses = {} + if not hasattr(params, 'keeper_drive_records'): + params.keeper_drive_records = {} + if not hasattr(params, 'keeper_drive_record_data'): + params.keeper_drive_record_data = {} + if not hasattr(params, 'keeper_drive_record_keys'): + params.keeper_drive_record_keys = {} + if not hasattr(params, 'keeper_drive_record_accesses'): + params.keeper_drive_record_accesses = {} + if not hasattr(params, 'keeper_drive_folder_records'): + params.keeper_drive_folder_records = {} + if not hasattr(params, 'keeper_drive_folder_sharing_states'): + params.keeper_drive_folder_sharing_states = {} + if not hasattr(params, 'keeper_drive_record_sharing_states'): + params.keeper_drive_record_sharing_states = {} + if not hasattr(params, 'keeper_drive_record_links'): + params.keeper_drive_record_links = {} + if not hasattr(params, 'keeper_drive_raw_dag_data'): + params.keeper_drive_raw_dag_data = [] + + +def create_accumulator(): + return { + 'folders': [], + 'folder_keys': [], + 'folder_accesses': [], + 'revoked_folder_accesses': [], + 'denied_folder_accesses': [], + 'record_data': [], + 'record_keys': [], + 'record_accesses': [], + 'revoked_record_accesses': [], + 'records': [], + 'folder_records': [], + 'removed_folders': [], + 'removed_folder_records': [], + 'users': [], + 'folder_sharing_states': [], + 'record_sharing_states': [], + 'record_links': [], + 'removed_record_links': [], + 'record_rotations': [], + 'raw_dag_data': [], + } + + +def clear_caches(params): + _ensure_keeper_drive_attrs(params) + params.keeper_drive_folders.clear() + params.keeper_drive_folder_keys.clear() + params.keeper_drive_folder_accesses.clear() + params.keeper_drive_records.clear() + params.keeper_drive_record_data.clear() + params.keeper_drive_record_keys.clear() + params.keeper_drive_record_accesses.clear() + params.keeper_drive_folder_records.clear() + params.keeper_drive_folder_sharing_states.clear() + params.keeper_drive_record_sharing_states.clear() + params.keeper_drive_record_links.clear() + params.keeper_drive_raw_dag_data.clear() + # keeper_drive_trashed_folders is intentionally NOT cleared here. + # The server keeps sending trashed folders in every sync_down response + # (including full/CLEAR syncs), so the trashed-UID filter must survive + # cache clears. The set is persisted to disk and reloaded on session start. + + +def collect_from_response(acc, response, resp_bw_recs, resp_sec_data_recs, resp_sec_scores, record_rotation_items): + if not response.HasField('keeperDriveData'): + return + kd_data = response.keeperDriveData + if len(kd_data.folders) > 0: + acc['folders'].extend(kd_data.folders) + if len(kd_data.folderKeys) > 0: + acc['folder_keys'].extend(kd_data.folderKeys) + if len(kd_data.folderAccesses) > 0: + acc['folder_accesses'].extend(kd_data.folderAccesses) + if len(kd_data.revokedFolderAccesses) > 0: + acc['revoked_folder_accesses'].extend(kd_data.revokedFolderAccesses) + dfa_attr = getattr(kd_data, 'deniedFolderAccesses', None) + if dfa_attr: + acc['denied_folder_accesses'].extend(dfa_attr) + if len(kd_data.recordData) > 0: + acc['record_data'].extend(kd_data.recordData) + # recordKeys does not exist as a top-level field in KeeperDriveData; + # record keys are embedded in folderRecords[].recordMetadata.encryptedRecordKey + # and are extracted during _process_keeper_drive_sync. Use getattr defensively + # so that if the field is ever added to the proto it is collected automatically. + rk_attr = getattr(kd_data, 'recordKeys', None) + if rk_attr: + acc['record_keys'].extend(rk_attr) + if len(kd_data.recordAccesses) > 0: + acc['record_accesses'].extend(kd_data.recordAccesses) + if len(kd_data.revokedRecordAccesses) > 0: + acc['revoked_record_accesses'].extend(kd_data.revokedRecordAccesses) + if len(kd_data.records) > 0: + acc['records'].extend(kd_data.records) + if len(kd_data.folderRecords) > 0: + acc['folder_records'].extend(kd_data.folderRecords) + if len(kd_data.removedFolders) > 0: + acc['removed_folders'].extend(kd_data.removedFolders) + if len(kd_data.removedFolderRecords) > 0: + acc['removed_folder_records'].extend(kd_data.removedFolderRecords) + + users_attr = getattr(kd_data, 'users', None) + if users_attr: + acc['users'].extend(users_attr) + fss_attr = getattr(kd_data, 'folderSharingState', None) + if fss_attr: + acc['folder_sharing_states'].extend(fss_attr) + rss_attr = getattr(kd_data, 'recordSharingStates', None) + if rss_attr: + acc['record_sharing_states'].extend(rss_attr) + rl_attr = getattr(kd_data, 'recordLinks', None) + if rl_attr: + acc['record_links'].extend(rl_attr) + rrl_attr = getattr(kd_data, 'removedRecordLinks', None) + if rrl_attr: + acc['removed_record_links'].extend(rrl_attr) + rrd_attr = getattr(kd_data, 'recordRotationData', None) + if rrd_attr: + acc['record_rotations'].extend(rrd_attr) + dag_attr = getattr(kd_data, 'rawDagData', None) + if dag_attr: + acc['raw_dag_data'].extend(dag_attr) + bw_attr = getattr(kd_data, 'breachWatchRecords', None) + if bw_attr: + resp_bw_recs.extend(bw_attr) + bws_attr = getattr(kd_data, 'breachWatchSecurityData', None) + if bws_attr: + resp_sec_data_recs.extend(bws_attr) + ssd_attr = getattr(kd_data, 'securityScoreData', None) + if ssd_attr: + resp_sec_scores.extend(ssd_attr) + if acc['record_rotations']: + record_rotation_items.extend(acc['record_rotations']) + + +def has_data(acc): + return any(len(v) > 0 for v in acc.values()) + + +def process(params, acc): + if not has_data(acc): + return + _ensure_keeper_drive_attrs(params) + + _process_keeper_drive_sync(params, acc) + + +def _process_keeper_drive_sync(params, acc): + """Process Keeper Drive atomic sync objects and store in caches. + """ + folders = acc.get('folders') or [] + folder_keys = acc.get('folder_keys') or [] + folder_accesses = acc.get('folder_accesses') or [] + revoked_folder_accesses = acc.get('revoked_folder_accesses') or [] + denied_folder_accesses = acc.get('denied_folder_accesses') or [] + records = acc.get('records') or [] + record_data_list = acc.get('record_data') or [] + record_keys = acc.get('record_keys') or [] + record_accesses = acc.get('record_accesses') or [] + revoked_record_accesses = acc.get('revoked_record_accesses') or [] + folder_records = acc.get('folder_records') or [] + removed_folders = acc.get('removed_folders') or [] + removed_folder_records = acc.get('removed_folder_records') or [] + users = acc.get('users') or [] + folder_sharing_states = acc.get('folder_sharing_states') or [] + record_sharing_states = acc.get('record_sharing_states') or [] + record_links = acc.get('record_links') or [] + removed_record_links = acc.get('removed_record_links') or [] + raw_dag_data = acc.get('raw_dag_data') or [] + + _process_users(params, users) + _process_folders(params, folders) + _process_folder_keys(params, folder_keys) + _process_folder_accesses(params, folder_accesses) + _process_revoked_folder_accesses(params, revoked_folder_accesses) + _process_denied_folder_accesses(params, denied_folder_accesses) + _process_folder_sharing_states(params, folder_sharing_states) + + _process_records(params, records) + _process_record_data(params, record_data_list) + _process_record_keys(params, record_keys) + _process_record_accesses(params, record_accesses) + _process_revoked_record_accesses(params, revoked_record_accesses) + _process_record_sharing_states(params, record_sharing_states) + + _process_record_links(params, record_links) + _process_removed_record_links(params, removed_record_links) + + _process_folder_records(params, folder_records) + _process_removed_folder_records(params, removed_folder_records) + + _process_removed_folders(params, removed_folders) + _purge_orphaned_records(params) + _process_raw_dag_data(params, raw_dag_data) + + _decrypt_keeper_drive_keys(params) + _reconstruct_keeper_drive_entities(params) + + + +def _process_users(params, users): + """Populate ``params.user_cache`` from KeeperDrive ``Users`` records.""" + for user in users: + account_uid = utils.base64_url_encode(user.accountUid) + params.user_cache[account_uid] = user.username + + +def _process_folders(params, folders): + """Store base folder objects (encrypted; keys/data decrypted later).""" + for folder_data in folders: + folder_uid = utils.base64_url_encode(folder_data.folderUid) + folder_obj = { + 'folder_uid': folder_uid, + 'folder_type': folder_data.type if folder_data.type else 0, + 'parent_uid': utils.base64_url_encode(folder_data.parentUid) if folder_data.parentUid else None, + 'data': folder_data.data, + 'inherit_user_permissions': folder_data.inheritUserPermissions if folder_data.inheritUserPermissions else 0, + } + if folder_data.folderKey: + folder_obj['folder_key'] = folder_data.folderKey + if folder_data.dateCreated: + folder_obj['date_created'] = folder_data.dateCreated + if folder_data.lastModified: + folder_obj['last_modified'] = folder_data.lastModified + if folder_data.HasField('ownerInfo'): + folder_obj['owner_account_uid'] = utils.base64_url_encode(folder_data.ownerInfo.accountUid) + folder_obj['owner_username'] = folder_data.ownerInfo.username + params.keeper_drive_folders[folder_uid] = folder_obj + + +def _process_folder_keys(params, folder_keys): + """Store encrypted folder keys grouped by folder UID.""" + for fk in folder_keys: + folder_uid = utils.base64_url_encode(fk.folderUid) + if folder_uid not in params.keeper_drive_folder_keys: + params.keeper_drive_folder_keys[folder_uid] = [] + params.keeper_drive_folder_keys[folder_uid].append({ + 'folder_uid': folder_uid, + 'parent_uid': utils.base64_url_encode(fk.parentUid) if fk.parentUid else None, + 'encrypted_key': fk.folderKey, + 'key_type': fk.encryptedBy, + }) + + +def _process_folder_accesses(params, folder_accesses): + """Store folder access entries grouped by folder UID.""" + for fa in folder_accesses: + folder_uid = utils.base64_url_encode(fa.folderUid) + if folder_uid not in params.keeper_drive_folder_accesses: + params.keeper_drive_folder_accesses[folder_uid] = [] + access_uid = utils.base64_url_encode(fa.accessTypeUid) + username = params.user_cache.get(access_uid) if hasattr(params, 'user_cache') else None + fa_obj = { + 'folder_uid': folder_uid, + 'access_type_uid': access_uid, + 'access_type': fa.accessType, + 'access_role_type': fa.accessRoleType if fa.accessRoleType else 0, + 'inherited': fa.inherited if fa.inherited else False, + 'hidden': fa.hidden if fa.hidden else False, + 'date_created': fa.dateCreated if fa.dateCreated else 0, + 'last_modified': fa.lastModified if fa.lastModified else 0, + } + if username: + fa_obj['username'] = username + if fa.HasField('folderKey'): + fa_obj['folder_key'] = { + 'encrypted_key': fa.folderKey.encryptedKey, + 'encrypted_key_type': fa.folderKey.encryptedKeyType, + } + if fa.HasField('tlaProperties'): + fa_obj['tla_properties'] = google.protobuf.json_format.MessageToDict( + fa.tlaProperties, preserving_proto_field_name=True + ) + if fa.HasField('permissions'): + p = fa.permissions + fa_obj['permissions'] = { + 'can_add': p.canAdd, + 'can_remove': p.canRemove, + 'can_delete': p.canDelete, + 'can_list_access': p.canListAccess, + 'can_update_access': p.canUpdateAccess, + 'can_change_ownership': p.canChangeOwnership, + 'can_edit_records': p.canEditRecords, + 'can_view_records': p.canViewRecords, + 'can_approve_access': p.canApproveAccess, + 'can_request_access': p.canRequestAccess, + 'can_update_setting': p.canUpdateSetting, + 'can_list_records': p.canListRecords, + 'can_list_folders': p.canListFolders, + } + params.keeper_drive_folder_accesses[folder_uid].append(fa_obj) + + +def _process_folder_sharing_states(params, folder_sharing_states): + """Store the per-folder sharing state from sync-down. + + Each ``FolderSharingState`` carries ``shared`` (bool) and ``count`` + (number of accessors aside from the current user). This is the only + reliable signal in the sync response that tells us whether a folder + the current user owns has been shared with someone else, because the + ``folderAccesses`` list only contains the current user's own entry. + """ + for fss in folder_sharing_states: + try: + folder_uid = utils.base64_url_encode(fss.folderUid) + except Exception: + continue + params.keeper_drive_folder_sharing_states[folder_uid] = { + 'shared': bool(fss.shared), + 'count': int(fss.count) if fss.count else 0, + } + + +def _process_revoked_folder_accesses(params, revoked_folder_accesses): + """Drop folder access entries that the server explicitly revoked.""" + for rfa in revoked_folder_accesses: + folder_uid = utils.base64_url_encode(rfa.folderUid) + actor_uid = utils.base64_url_encode(rfa.actorUid) + if folder_uid in params.keeper_drive_folder_accesses: + params.keeper_drive_folder_accesses[folder_uid] = [ + fa for fa in params.keeper_drive_folder_accesses[folder_uid] + if fa['access_type_uid'] != actor_uid + ] + + +def _process_denied_folder_accesses(params, denied_folder_accesses): + """Treat denied folder accesses as inaccessible: clear access + cached key.""" + for dfa in denied_folder_accesses: + try: + folder_uid = utils.base64_url_encode(dfa.folderUid) + actor_uid = utils.base64_url_encode(dfa.actorUid) + if folder_uid in params.keeper_drive_folder_accesses: + params.keeper_drive_folder_accesses[folder_uid] = [ + fa for fa in params.keeper_drive_folder_accesses[folder_uid] + if fa['access_type_uid'] != actor_uid + ] + if folder_uid in params.keeper_drive_folders: + folder_obj = params.keeper_drive_folders[folder_uid] + folder_obj.pop('folder_key_unencrypted', None) + folder_obj['denied'] = True + logging.debug('Folder %s access denied for actor %s', folder_uid, actor_uid) + except Exception as e: + logging.debug('Failed to process denied folder access: %s', e) + + +def _process_records(params, records): + """Store DriveRecord metadata (no encrypted content).""" + for record in records: + record_uid = utils.base64_url_encode(record.recordUid) + record_obj = { + 'record_uid': record_uid, + 'revision': record.revision, + 'version': record.version, + 'shared': record.shared if record.shared else False, + 'client_modified_time': record.clientModifiedTime if record.clientModifiedTime else 0, + } + if record.fileSize: + record_obj['file_size'] = record.fileSize + if record.thumbnailSize: + record_obj['thumbnail_size'] = record.thumbnailSize + params.keeper_drive_records[record_uid] = record_obj + + +def _process_record_data(params, record_data_list): + """Store record data blobs (decrypted later in ``_decrypt_*``).""" + for rd in record_data_list: + record_uid = utils.base64_url_encode(rd.recordUid) + rd_obj = { + 'record_uid': record_uid, + 'data': rd.data, + } + if rd.HasField('user'): + rd_obj['user_account_uid'] = utils.base64_url_encode(rd.user.accountUid) + rd_obj['user_username'] = rd.user.username + params.keeper_drive_record_data[record_uid] = rd_obj + + +def _process_record_keys(params, record_keys): + """Store standalone encrypted record keys grouped by record UID.""" + for rk in record_keys: + record_uid = utils.base64_url_encode(rk.record_uid) + if record_uid not in params.keeper_drive_record_keys: + params.keeper_drive_record_keys[record_uid] = [] + params.keeper_drive_record_keys[record_uid].append({ + 'record_uid': record_uid, + 'user_uid': utils.base64_url_encode(rk.user_uid), + 'record_key': rk.record_key, + 'encrypted_key_type': rk.encrypted_key_type, + }) + + +def _process_record_accesses(params, record_accesses): + """Store record access entries grouped by record UID.""" + for ra in record_accesses: + record_uid = utils.base64_url_encode(ra.recordUid) + if record_uid not in params.keeper_drive_record_accesses: + params.keeper_drive_record_accesses[record_uid] = [] + access_uid = utils.base64_url_encode(ra.accessTypeUid) + username = params.user_cache.get(access_uid) if hasattr(params, 'user_cache') else None + ra_obj = { + 'record_uid': record_uid, + 'access_uid': access_uid, + 'access_type': ra.accessType, + 'access_role_type': ra.accessRoleType, + 'owner': ra.owner if hasattr(ra, 'owner') else False, + 'inherited': ra.inherited if hasattr(ra, 'inherited') else False, + 'hidden': ra.hidden if hasattr(ra, 'hidden') else False, + 'denied_access': ra.deniedAccess if hasattr(ra, 'deniedAccess') else False, + 'can_view_title': ra.can_view_title if hasattr(ra, 'can_view_title') and ra.can_view_title else False, + 'can_edit': ra.can_edit if hasattr(ra, 'can_edit') and ra.can_edit else False, + 'can_view': ra.can_view if hasattr(ra, 'can_view') and ra.can_view else False, + 'can_list_access': ra.can_list_access if hasattr(ra, 'can_list_access') and ra.can_list_access else False, + 'can_update_access': ra.can_update_access if hasattr(ra, 'can_update_access') and ra.can_update_access else False, + 'can_delete': ra.can_delete if hasattr(ra, 'can_delete') and ra.can_delete else False, + 'can_change_ownership': ra.can_change_ownership if hasattr(ra, 'can_change_ownership') and ra.can_change_ownership else False, + 'can_request_access': ra.can_request_access if hasattr(ra, 'can_request_access') and ra.can_request_access else False, + 'can_approve_access': ra.can_approve_access if hasattr(ra, 'can_approve_access') and ra.can_approve_access else False, + 'date_created': ra.dateCreated if hasattr(ra, 'dateCreated') else 0, + 'last_modified': ra.lastModified if hasattr(ra, 'lastModified') else 0, + } + if username: + ra_obj['username'] = username + if hasattr(ra, 'tlaProperties') and ra.HasField('tlaProperties'): + ra_obj['tla_properties'] = google.protobuf.json_format.MessageToDict( + ra.tlaProperties, preserving_proto_field_name=True + ) + params.keeper_drive_record_accesses[record_uid].append(ra_obj) + + +def _process_revoked_record_accesses(params, revoked_record_accesses): + """Drop record access entries that the server explicitly revoked.""" + for rra in revoked_record_accesses: + record_uid = utils.base64_url_encode(rra.recordUid) + actor_uid = utils.base64_url_encode(rra.actorUid) + if record_uid in params.keeper_drive_record_accesses: + params.keeper_drive_record_accesses[record_uid] = [ + ra for ra in params.keeper_drive_record_accesses[record_uid] + if ra['access_uid'] != actor_uid + ] + + +def _process_record_sharing_states(params, record_sharing_states): + """Update each record's effective ``shared`` flag from sharing state.""" + for rss in record_sharing_states: + record_uid = utils.base64_url_encode(rss.recordUid) + state_obj = { + 'record_uid': record_uid, + 'is_directly_shared': rss.isDirectlyShared, + 'is_indirectly_shared': rss.isIndirectlyShared, + 'is_shared': rss.isShared, + } + params.keeper_drive_record_sharing_states[record_uid] = state_obj + if record_uid in params.keeper_drive_records: + record_obj = params.keeper_drive_records[record_uid] + record_obj['shared'] = record_obj.get('shared', False) or state_obj['is_shared'] + + +def _process_record_links(params, record_links): + """Store parent/child record link relationships and surface their keys.""" + for rl in record_links: + child_uid = utils.base64_url_encode(rl.childRecordUid) if rl.childRecordUid else None + parent_uid = utils.base64_url_encode(rl.parentRecordUid) if rl.parentRecordUid else None + if not child_uid: + continue + link_obj = { + 'record_uid': child_uid, + 'parent_uid': parent_uid, + 'record_key': rl.recordKey, + } + if child_uid not in params.keeper_drive_record_links: + params.keeper_drive_record_links[child_uid] = [] + existing_keys = [lk.get('record_key') for lk in params.keeper_drive_record_links[child_uid]] + if rl.recordKey not in existing_keys: + params.keeper_drive_record_links[child_uid].append(link_obj) + + # Record links carry encrypted record keys — feed them into record_keys + # so the decrypt pass can pick them up. + if rl.recordKey: + if child_uid not in params.keeper_drive_record_keys: + params.keeper_drive_record_keys[child_uid] = [] + params.keeper_drive_record_keys[child_uid].append({ + 'record_uid': child_uid, + 'parent_uid': parent_uid, + 'record_key': rl.recordKey, + 'encrypted_key_type': folder_pb2.encrypted_by_data_key_gcm, + 'source': 'record_link', + }) + + +def _process_removed_record_links(params, removed_record_links): + """Remove link entries that the server marked deleted.""" + for rrl in removed_record_links: + child_uid = utils.base64_url_encode(rrl.childRecordUid) if rrl.childRecordUid else None + if not child_uid: + continue + if child_uid in params.keeper_drive_record_links: + if rrl.recordKey: + params.keeper_drive_record_links[child_uid] = [ + lk for lk in params.keeper_drive_record_links[child_uid] + if lk.get('record_key') != rrl.recordKey + ] + else: + del params.keeper_drive_record_links[child_uid] + + +def _process_folder_records(params, folder_records): + """Store folder ↔ record associations and per-folder record keys.""" + for fr in folder_records: + folder_uid = utils.base64_url_encode(fr.folderUid) + record_uid = utils.base64_url_encode(fr.recordMetadata.recordUid) + if folder_uid not in params.keeper_drive_folder_records: + params.keeper_drive_folder_records[folder_uid] = set() + params.keeper_drive_folder_records[folder_uid].add(record_uid) + + has_key = (hasattr(fr.recordMetadata, 'encryptedRecordKey') + and fr.recordMetadata.encryptedRecordKey) + if not has_key: + continue + + if record_uid not in params.keeper_drive_record_keys: + params.keeper_drive_record_keys[record_uid] = [] + rk_obj = { + 'record_uid': record_uid, + 'folder_uid': folder_uid, + 'record_key': fr.recordMetadata.encryptedRecordKey, + 'encrypted_key_type': fr.recordMetadata.encryptedRecordKeyType, + # ENCRYPTED_BY_USER_KEY (0) → record key encrypted with user data_key + # ENCRYPTED_BY_PARENT_KEY (1) → record key encrypted with the folder key + 'folder_key_encryption_type': int(fr.folderKeyEncryptionType), + } + if fr.recordMetadata.HasField('tlaProperties'): + rk_obj['tla_properties'] = google.protobuf.json_format.MessageToDict( + fr.recordMetadata.tlaProperties, preserving_proto_field_name=True + ) + params.keeper_drive_record_keys[record_uid].append(rk_obj) + + +def _process_removed_folder_records(params, removed_folder_records): + """Remove folder ↔ record associations marked as deleted.""" + for rfr in removed_folder_records: + folder_uid = utils.base64_url_encode(rfr.folder_uid) + record_uid = utils.base64_url_encode(rfr.record_uid) + if folder_uid in params.keeper_drive_folder_records: + params.keeper_drive_folder_records[folder_uid].discard(record_uid) + + +def _process_removed_folders(params, removed_folders): + """Drop folders flagged as removed in this sync batch. + + Applied after all folder/record additions so removals always win. + """ + for rf in removed_folders: + folder_uid = utils.base64_url_encode(rf.folder_uid) + logging.debug('Removing KeeperDrive folder from cache: %s', folder_uid) + + params.keeper_drive_folders.pop(folder_uid, None) + params.keeper_drive_folder_keys.pop(folder_uid, None) + params.keeper_drive_folder_accesses.pop(folder_uid, None) + params.keeper_drive_folder_sharing_states.pop(folder_uid, None) + params.keeper_drive_folder_records.pop(folder_uid, None) + params.subfolder_cache.pop(folder_uid, None) + params.subfolder_record_cache.pop(folder_uid, None) + + +def _purge_orphaned_records(params): + """Drop records that no longer belong to any folder. + + Without this pass, records removed via folder deletion would still appear + in ``kd-list`` after a successful removal + sync_down. + """ + all_folder_record_uids = { + uid + for rec_set in params.keeper_drive_folder_records.values() + for uid in rec_set + } + orphaned = [uid for uid in list(params.keeper_drive_records) + if uid not in all_folder_record_uids] + for uid in orphaned: + params.keeper_drive_records.pop(uid, None) + params.keeper_drive_record_data.pop(uid, None) + params.keeper_drive_record_keys.pop(uid, None) + params.keeper_drive_record_accesses.pop(uid, None) + params.keeper_drive_record_sharing_states.pop(uid, None) + params.keeper_drive_record_links.pop(uid, None) + params.record_cache.pop(uid, None) + params.meta_data_cache.pop(uid, None) + params.record_owner_cache.pop(uid, None) + logging.debug('Purged orphaned KeeperDrive record from cache: %s', uid) + + +def _process_raw_dag_data(params, raw_dag_data): + """Convert raw DAG protobuf entries to dicts and append to the cache.""" + if not raw_dag_data: + return + for dag_entry in raw_dag_data: + try: + dag_dict = google.protobuf.json_format.MessageToDict( + dag_entry, preserving_proto_field_name=True + ) + except Exception as e: + logging.debug(f"Failed to parse Keeper Drive DAG data: {e}") + dag_dict = {'error': str(e)} + params.keeper_drive_raw_dag_data.append(dag_dict) + + +def _try_decrypt_symmetric(enc_key, sym_key): + """Try AES-256-GCM then AES-256-CBC with *sym_key*. Returns plaintext or None.""" + for fn in (crypto.decrypt_aes_v2, crypto.decrypt_aes_v1): + try: + result = fn(enc_key, sym_key) + if result: + return result + except Exception: + pass + return None + + +def _try_decrypt_with_user_keys(enc_key, params): + """Try every available user key (symmetric then asymmetric). Returns plaintext or None.""" + result = _try_decrypt_symmetric(enc_key, params.data_key) + if result: + return result + if params.rsa_key2: + try: + result = crypto.decrypt_rsa(enc_key, params.rsa_key2) + if result: + return result + except Exception: + pass + if params.ecc_key: + try: + result = crypto.decrypt_ec(enc_key, params.ecc_key) + if result: + return result + except Exception: + pass + return None + + +def _decrypt_keeper_drive_keys(params): + """Decrypt Keeper Drive folder and record keys.""" + newly_decrypted = True + + while newly_decrypted: + newly_decrypted = False + + for folder_uid, folder_obj in params.keeper_drive_folders.items(): + if 'folder_key_unencrypted' in folder_obj: + continue + + folder_key = None + + if folder_uid in params.keeper_drive_folder_keys: + for fk in params.keeper_drive_folder_keys[folder_uid]: + enc_key = fk['encrypted_key'] + try: + if fk['key_type'] == folder_pb2.ENCRYPTED_BY_USER_KEY: + # FolderKeyEncryptionType only tells us the KEY SOURCE (user vs parent), + # not the encryption algorithm. Try all algorithms in likelihood order: + # AES-256-GCM (60 B) — modern default + # AES-256-CBC (48 B) — legacy + # RSA-2048 (256 B) — shared folder re-encrypted for this user + # ECC — EC-based key wrap + folder_key = _try_decrypt_with_user_keys(enc_key, params) + if folder_key: + break + elif fk['key_type'] == folder_pb2.ENCRYPTED_BY_PARENT_KEY: + parent_uid = folder_obj.get('parent_uid') + if parent_uid and parent_uid in params.keeper_drive_folders: + parent_folder = params.keeper_drive_folders[parent_uid] + if 'folder_key_unencrypted' in parent_folder: + parent_key = parent_folder['folder_key_unencrypted'] + folder_key = _try_decrypt_symmetric(enc_key, parent_key) + if folder_key: + break + except Exception as e: + logging.debug(f"Failed to decrypt folder key for {folder_uid}: {e}") + + # Fallback: try from folder access data (EncryptedDataKey — has explicit algorithm) + if not folder_key and folder_uid in params.keeper_drive_folder_accesses: + for fa in params.keeper_drive_folder_accesses[folder_uid]: + if 'folder_key' not in fa: + continue + + try: + encrypted_key = fa['folder_key']['encrypted_key'] + key_type = fa['folder_key']['encrypted_key_type'] + + if key_type == folder_pb2.encrypted_by_data_key_gcm: + folder_key = crypto.decrypt_aes_v2(encrypted_key, params.data_key) + elif key_type == folder_pb2.encrypted_by_data_key: + folder_key = crypto.decrypt_aes_v1(encrypted_key, params.data_key) + elif key_type == folder_pb2.encrypted_by_public_key: + if params.rsa_key2: + folder_key = crypto.decrypt_rsa(encrypted_key, params.rsa_key2) + elif key_type == folder_pb2.encrypted_by_public_key_ecc: + if params.ecc_key: + folder_key = crypto.decrypt_ec(encrypted_key, params.ecc_key) + else: + # Unknown type — try all user keys as a last resort + folder_key = _try_decrypt_with_user_keys(encrypted_key, params) + + if folder_key: + break + except Exception as e: + logging.debug(f"Failed to decrypt folder key for {folder_uid} from access data: {e}") + + if folder_key: + folder_obj['folder_key_unencrypted'] = folder_key + newly_decrypted = True + + if 'data' in folder_obj and folder_obj['data']: + try: + data_bytes = crypto.decrypt_aes_v2(folder_obj['data'], folder_key) + data_json = json.loads(data_bytes.decode('utf-8')) + folder_obj['name'] = data_json.get('name', 'Unnamed Folder') + if 'color' in data_json: + folder_obj['color'] = data_json['color'] + except Exception as e: + logging.debug(f"Failed to decrypt folder data for {folder_uid}: {e}") + + _decrypt_keeper_drive_record_keys(params) + + +def _try_decrypt_record_key(rk, params): + """Try all applicable methods to decrypt a single record key entry. + Returns decrypted key bytes or None.""" + encrypted_key = rk['record_key'] + key_type = rk.get('encrypted_key_type', 0) + folder_uid = rk.get('folder_uid') + parent_uid = rk.get('parent_uid') + + # 1. Try public key decryption (works regardless of source) + if key_type == folder_pb2.encrypted_by_public_key: + if params.rsa_key2: + try: + return crypto.decrypt_rsa(encrypted_key, params.rsa_key2) + except Exception as e: + logging.debug(f"RSA decrypt failed: {e}") + return None + if key_type == folder_pb2.encrypted_by_public_key_ecc: + if params.ecc_key: + try: + return crypto.decrypt_ec(encrypted_key, params.ecc_key) + except Exception as e: + logging.debug(f"EC decrypt failed: {e}") + return None + + # 2. For record-link keys, try parent record key then data key + if rk.get('source') == 'record_link' and parent_uid: + if parent_uid in params.keeper_drive_records: + parent_obj = params.keeper_drive_records[parent_uid] + if 'record_key_unencrypted' in parent_obj: + try: + return crypto.decrypt_aes_v2(encrypted_key, parent_obj['record_key_unencrypted']) + except Exception: + pass + try: + return crypto.decrypt_aes_v1(encrypted_key, parent_obj['record_key_unencrypted']) + except Exception: + pass + + # Build ordered list of keys to try. + # Use folderKeyEncryptionType when available to prefer the correct key source: + # ENCRYPTED_BY_USER_KEY (0) → data_key should come first + # ENCRYPTED_BY_PARENT_KEY (1) → folder key should come first (default / unknown) + fket = rk.get('folder_key_encryption_type') + folder_key_val = None + if folder_uid and folder_uid in params.keeper_drive_folders: + folder_obj = params.keeper_drive_folders[folder_uid] + if 'folder_key_unencrypted' in folder_obj: + folder_key_val = folder_obj['folder_key_unencrypted'] + + keys_to_try = [] + if fket == int(folder_pb2.ENCRYPTED_BY_USER_KEY): + # Record key was encrypted directly with the user's data key + keys_to_try.append(('data', params.data_key)) + if folder_key_val is not None: + keys_to_try.append(('folder', folder_key_val)) + else: + # Record key was encrypted with the parent folder key (or unknown — try folder first) + if folder_key_val is not None: + keys_to_try.append(('folder', folder_key_val)) + keys_to_try.append(('data', params.data_key)) + + # 3. Symmetric decryption with candidate keys + for label, dec_key in keys_to_try: + if key_type == folder_pb2.encrypted_by_data_key_gcm: + try: + return crypto.decrypt_aes_v2(encrypted_key, dec_key) + except Exception: + continue + elif key_type == folder_pb2.encrypted_by_data_key: + try: + return crypto.decrypt_aes_v1(encrypted_key, dec_key) + except Exception: + continue + else: + try: + return crypto.decrypt_aes_v2(encrypted_key, dec_key) + except Exception: + pass + try: + return crypto.decrypt_aes_v1(encrypted_key, dec_key) + except Exception: + continue + + return None + + +def _decrypt_record_data(record_uid, record_key, params): + """Decrypt record data using the record key and store data_json.""" + if record_uid not in params.keeper_drive_record_data: + return + rd_obj = params.keeper_drive_record_data[record_uid] + if 'data_json' in rd_obj: + return + if 'data' not in rd_obj or not rd_obj['data']: + return + try: + try: + data_bytes = crypto.decrypt_aes_v2(rd_obj['data'], record_key) + except Exception: + data_bytes = crypto.decrypt_aes_v1(rd_obj['data'], record_key) + data_json = json.loads(data_bytes.decode('utf-8')) + rd_obj['data_json'] = data_json + except Exception as e: + logging.warning(f"Failed to decrypt record data for {record_uid}: {e}") + + +def _decrypt_keeper_drive_record_keys(params): + """Decrypt all Keeper Drive record keys, trying multiple sources.""" + + # Pass 0: check if record keys were already decrypted by the regular vault + # sync (via recordMetaData or record_cache in SyncDownResponse). This is + # the primary path for records shared with the current user. + for record_uid, record_obj in params.keeper_drive_records.items(): + if 'record_key_unencrypted' in record_obj: + continue + # Check meta_data_cache (decrypted record metadata from regular sync) + if record_uid in params.meta_data_cache: + meta = params.meta_data_cache[record_uid] + if 'record_key_unencrypted' in meta: + record_obj['record_key_unencrypted'] = meta['record_key_unencrypted'] + _decrypt_record_data(record_uid, meta['record_key_unencrypted'], params) + logging.debug(f"Record {record_uid}: key obtained from meta_data_cache") + continue + # Check record_cache (records already processed by regular vault sync) + if record_uid in params.record_cache: + cached = params.record_cache[record_uid] + if 'record_key_unencrypted' in cached: + record_obj['record_key_unencrypted'] = cached['record_key_unencrypted'] + _decrypt_record_data(record_uid, cached['record_key_unencrypted'], params) + logging.debug(f"Record {record_uid}: key obtained from record_cache") + + # Pass 1: decrypt from keeper_drive_record_keys entries + for record_uid, record_keys_list in params.keeper_drive_record_keys.items(): + if record_uid not in params.keeper_drive_records: + continue + record_obj = params.keeper_drive_records[record_uid] + if 'record_key_unencrypted' in record_obj: + continue + + for rk in record_keys_list: + try: + record_key = _try_decrypt_record_key(rk, params) + if record_key: + record_obj['record_key_unencrypted'] = record_key + _decrypt_record_data(record_uid, record_key, params) + break + except Exception as e: + logging.debug(f"Failed to decrypt record key for {record_uid}: {e}") + + # Pass 2: for records still without keys, try all available decryption keys + # against the record data directly. This catches records whose keys weren't + # delivered through the expected folderRecords/recordKeys channels. + undecrypted = [ + uid for uid, obj in params.keeper_drive_records.items() + if 'record_key_unencrypted' not in obj + and uid in params.keeper_drive_record_data + and params.keeper_drive_record_data[uid].get('data') + ] + if undecrypted: + logging.debug(f"Pass 2: {len(undecrypted)} record(s) still need decryption") + + for record_uid in undecrypted: + record_obj = params.keeper_drive_records[record_uid] + rd_obj = params.keeper_drive_record_data[record_uid] + record_key = None + + # Try record link keys with parent record key, then data key + if record_uid in params.keeper_drive_record_links: + for link in params.keeper_drive_record_links[record_uid]: + enc_key = link.get('record_key') + if not enc_key: + continue + parent = link.get('parent_uid') + if parent and parent in params.keeper_drive_records: + parent_obj = params.keeper_drive_records[parent] + if 'record_key_unencrypted' in parent_obj: + try: + record_key = crypto.decrypt_aes_v2(enc_key, parent_obj['record_key_unencrypted']) + except Exception: + try: + record_key = crypto.decrypt_aes_v1(enc_key, parent_obj['record_key_unencrypted']) + except Exception: + pass + if not record_key: + try: + record_key = crypto.decrypt_aes_v2(enc_key, params.data_key) + except Exception: + try: + record_key = crypto.decrypt_aes_v1(enc_key, params.data_key) + except Exception: + pass + if not record_key and params.rsa_key2: + try: + record_key = crypto.decrypt_rsa(enc_key, params.rsa_key2) + except Exception: + pass + if not record_key and params.ecc_key: + try: + record_key = crypto.decrypt_ec(enc_key, params.ecc_key) + except Exception: + pass + if record_key: + break + + # Try decrypting record data directly with folder keys as a last resort. + # If the record data was encrypted with a folder key (instead of a per-record key), + # this will succeed and we use the folder key as the effective record key. + if not record_key: + candidate_folder_keys = set() + for folder_uid, rec_set in params.keeper_drive_folder_records.items(): + if record_uid in rec_set and folder_uid in params.keeper_drive_folders: + fobj = params.keeper_drive_folders[folder_uid] + if 'folder_key_unencrypted' in fobj: + candidate_folder_keys.add(id(fobj['folder_key_unencrypted'])) + try: + data_bytes = crypto.decrypt_aes_v2(rd_obj['data'], fobj['folder_key_unencrypted']) + data_json = json.loads(data_bytes.decode('utf-8')) + rd_obj['data_json'] = data_json + record_obj['record_key_unencrypted'] = fobj['folder_key_unencrypted'] + logging.debug(f"Record {record_uid}: decrypted data directly with folder key {folder_uid}") + record_key = fobj['folder_key_unencrypted'] + break + except Exception: + pass + + if record_key and 'record_key_unencrypted' not in record_obj: + record_obj['record_key_unencrypted'] = record_key + _decrypt_record_data(record_uid, record_key, params) + + # Log remaining undecrypted records + still_undecrypted = [ + uid for uid, obj in params.keeper_drive_records.items() + if 'record_key_unencrypted' not in obj + ] + if still_undecrypted: + logging.debug( + f"KeeperDrive: {len(still_undecrypted)} record(s) could not be decrypted: " + f"{still_undecrypted[:5]}{'...' if len(still_undecrypted) > 5 else ''}" + ) + + +def _reconstruct_keeper_drive_entities(params): + """Reconstruct complete Keeper Drive entities from atomic objects.""" + for folder_uid, folder_obj in params.keeper_drive_folders.items(): + if 'folder_key_unencrypted' not in folder_obj: + continue + + user_folder = { + 'folder_uid': folder_uid, + 'type': 'user_folder', + 'name': folder_obj.get('name', 'Unnamed Folder'), + 'folder_key_unencrypted': folder_obj['folder_key_unencrypted'], + 'source': 'keeper_drive', + } + + if 'parent_uid' in folder_obj and folder_obj['parent_uid']: + user_folder['parent_uid'] = folder_obj['parent_uid'] + + if 'color' in folder_obj: + user_folder['color'] = folder_obj['color'] + + params.subfolder_cache[folder_uid] = user_folder + + for folder_uid, record_uids in params.keeper_drive_folder_records.items(): + # Replace (not additive) so that records removed from a folder are + # evicted from the subfolder_record_cache on the very next sync. + params.subfolder_record_cache[folder_uid] = set(record_uids) + + for record_uid, record_obj in params.keeper_drive_records.items(): + if 'record_key_unencrypted' not in record_obj: + continue + + if record_uid not in params.keeper_drive_record_data: + continue + + rd_obj = params.keeper_drive_record_data[record_uid] + if 'data_json' not in rd_obj: + continue + + record_entry = { + 'record_uid': record_uid, + 'revision': record_obj.get('revision', 0), + 'version': record_obj.get('version', 0), + 'shared': record_obj.get('shared', False), + 'record_key_unencrypted': record_obj['record_key_unencrypted'], + 'data_unencrypted': json.dumps(rd_obj['data_json']).encode('utf-8'), + 'extra_unencrypted': None, + 'udata': {}, + 'source': 'keeper_drive', + } + + params.record_cache[record_uid] = record_entry + + if record_uid not in params.meta_data_cache: + meta_data = { + 'record_uid': record_uid, + 'record_key_unencrypted': record_obj['record_key_unencrypted'], + 'can_share': True, + 'can_edit': True, + } + if 'user_account_uid' in rd_obj: + meta_data['owner_account_uid'] = rd_obj['user_account_uid'] + if rd_obj['user_account_uid'] in params.user_cache: + meta_data['owner_username'] = params.user_cache[rd_obj['user_account_uid']] + params.meta_data_cache[record_uid] = meta_data + + if record_uid not in params.record_owner_cache: + if 'user_account_uid' in rd_obj: + is_owner = (rd_obj['user_account_uid'] == utils.base64_url_encode(params.account_uid_bytes)) + params.record_owner_cache[record_uid] = RecordOwner( + is_owner, + rd_obj['user_account_uid'] + ) \ No newline at end of file diff --git a/keepercommander/loginv3.py b/keepercommander/loginv3.py index b8ef29914..2985ad28f 100644 --- a/keepercommander/loginv3.py +++ b/keepercommander/loginv3.py @@ -565,6 +565,9 @@ def populateAccountSummary(params: KeeperParams): params.forbid_rsa = acct_summary_dict_snake_case.get('forbid_key_type2') is True params.is_enterprise_admin = acct_summary_dict_snake_case.get('is_enterprise_admin') is True + # disallowed features + params.disallowed_features = list(acct_summary.disallowedFeatures) if acct_summary.disallowedFeatures else [] + # settings params.settings = acct_summary_dict_snake_case['settings'] diff --git a/keepercommander/params.py b/keepercommander/params.py index 1eedcc9e2..234ed03f2 100644 --- a/keepercommander/params.py +++ b/keepercommander/params.py @@ -198,6 +198,7 @@ def __init__(self, config_filename='', config=None, server='keepersecurity.com') self.enterprise = None self.automators = None self.is_enterprise_admin = False + self.disallowed_features = [] # type: list[str] self.enterprise_loader = None self.enterprise_id = 0 self.msp_tree_key = None @@ -219,6 +220,19 @@ def __init__(self, config_filename='', config=None, server='keepersecurity.com') self.breach_watch_security_data = {} self.security_score_data = {} self.sso_login_info = None + # Keeper Drive caches for atomic sync objects + self.keeper_drive_folders = {} # folder_uid -> FolderData + self.keeper_drive_folder_keys = {} # folder_uid -> list of FolderKey + self.keeper_drive_folder_accesses = {} # folder_uid -> list of FolderAccessData + self.keeper_drive_records = {} # record_uid -> DriveRecord + self.keeper_drive_record_data = {} # record_uid -> RecordData + self.keeper_drive_record_keys = {} # record_uid -> list of RecordKey + self.keeper_drive_record_accesses = {} # record_uid -> list of RecordAccessData + self.keeper_drive_folder_records = {} # folder_uid -> set of record_uids + self.keeper_drive_folder_sharing_states = {} # folder_uid -> {shared, count} + self.keeper_drive_record_sharing_states = {} # record_uid -> sharing state dict + self.keeper_drive_record_links = {} # record_uid -> list of record link dicts + self.keeper_drive_raw_dag_data = [] # list of raw DAG entry dicts self.__proxy = None self.ssh_agent = None self.unmask_all = False @@ -274,6 +288,7 @@ def clear_session(self): self.settings = None self.enforcements = None self.is_enterprise_admin = False + self.disallowed_features = [] self.enterprise = None self.automators = None self.enterprise_loader = None @@ -291,6 +306,19 @@ def clear_session(self): self.breach_watch_security_data = {} self.security_score_data.clear() self.sso_login_info = None + # Clear Keeper Drive caches + self.keeper_drive_folders = {} + self.keeper_drive_folder_keys = {} + self.keeper_drive_folder_accesses = {} + self.keeper_drive_records = {} + self.keeper_drive_record_data = {} + self.keeper_drive_record_keys = {} + self.keeper_drive_record_accesses = {} + self.keeper_drive_folder_records = {} + self.keeper_drive_folder_sharing_states = {} + self.keeper_drive_record_sharing_states = {} + self.keeper_drive_record_links = {} + self.keeper_drive_raw_dag_data = [] self.ws = None if self.ssh_agent: self.ssh_agent.close() @@ -335,6 +363,9 @@ def queue_audit_event(self, name, **kwargs): server = property(__get_server, __set_server) rest_context = property(__get_rest_context) + def is_feature_disallowed(self, feature_name): # type: (str) -> bool + return isinstance(self.disallowed_features, list) and feature_name in self.disallowed_features + def get_share_account_timestamp(self): if isinstance(self.settings, dict): share_account_to = self.settings.get('share_account_to') diff --git a/keepercommander/proto/AccountSummary_pb2.py b/keepercommander/proto/AccountSummary_pb2.py index 2cb5b117b..b6e830a35 100644 --- a/keepercommander/proto/AccountSummary_pb2.py +++ b/keepercommander/proto/AccountSummary_pb2.py @@ -14,50 +14,50 @@ from . import APIRequest_pb2 as APIRequest__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x14\x41\x63\x63ountSummary.proto\x12\x0e\x41\x63\x63ountSummary\x1a\x10\x41PIRequest.proto\"N\n\x15\x41\x63\x63ountSummaryRequest\x12\x16\n\x0esummaryVersion\x18\x01 \x01(\x05\x12\x1d\n\x15includeRecentActivity\x18\x02 \x01(\x08\"\x98\x05\n\x16\x41\x63\x63ountSummaryElements\x12\x11\n\tclientKey\x18\x01 \x01(\x0c\x12*\n\x08settings\x18\x02 \x01(\x0b\x32\x18.AccountSummary.Settings\x12*\n\x08keysInfo\x18\x03 \x01(\x0b\x32\x18.AccountSummary.KeysInfo\x12)\n\x08syncLogs\x18\x04 \x03(\x0b\x32\x17.AccountSummary.SyncLog\x12\x19\n\x11isEnterpriseAdmin\x18\x05 \x01(\x08\x12(\n\x07license\x18\x06 \x01(\x0b\x32\x17.AccountSummary.License\x12$\n\x05group\x18\x07 \x01(\x0b\x32\x15.AccountSummary.Group\x12\x32\n\x0c\x45nforcements\x18\x08 \x01(\x0b\x32\x1c.AccountSummary.Enforcements\x12(\n\x06Images\x18\t \x03(\x0b\x32\x18.AccountSummary.KeyValue\x12\x30\n\x0fpersonalLicense\x18\n \x01(\x0b\x32\x17.AccountSummary.License\x12\x1e\n\x16\x66ixSharedFolderRecords\x18\x0b \x01(\x08\x12\x11\n\tusernames\x18\x0c \x03(\t\x12+\n\x07\x64\x65vices\x18\r \x03(\x0b\x32\x1a.AccountSummary.DeviceInfo\x12\x14\n\x0cisShareAdmin\x18\x0e \x01(\x08\x12\x17\n\x0f\x61\x63\x63ountRecovery\x18\x0f \x01(\x08\x12\x1d\n\x15\x61\x63\x63ountRecoveryPrompt\x18\x10 \x01(\x08\x12\'\n\x1fminMasterPasswordLengthNoPrompt\x18\x11 \x01(\x05\x12\x16\n\x0e\x66orbidKeyType2\x18\x12 \x01(\x08\"\x8b\x03\n\nDeviceInfo\x12\x1c\n\x14\x65ncryptedDeviceToken\x18\x01 \x01(\x0c\x12\x12\n\ndeviceName\x18\x02 \x01(\t\x12\x32\n\x0c\x64\x65viceStatus\x18\x03 \x01(\x0e\x32\x1c.Authentication.DeviceStatus\x12\x17\n\x0f\x64\x65vicePublicKey\x18\x04 \x01(\x0c\x12 \n\x18\x65ncryptedDataKeyDoNotUse\x18\x05 \x01(\x0c\x12\x15\n\rclientVersion\x18\x06 \x01(\t\x12\x10\n\x08username\x18\x07 \x01(\t\x12\x11\n\tipAddress\x18\x08 \x01(\t\x12\x1a\n\x12\x61pproveRequestTime\x18\t \x01(\x03\x12\x1f\n\x17\x65ncryptedDataKeyPresent\x18\n \x01(\x08\x12\x0f\n\x07groupId\x18\x0b \x01(\x03\x12\x16\n\x0e\x64\x65vicePlatform\x18\x0c \x01(\t\x12:\n\x10\x63lientFormFactor\x18\r \x01(\x0e\x32 .Authentication.ClientFormFactor\"\xc1\x01\n\x08KeysInfo\x12\x18\n\x10\x65ncryptionParams\x18\x01 \x01(\x0c\x12\x18\n\x10\x65ncryptedDataKey\x18\x02 \x01(\x0c\x12\x19\n\x11\x64\x61taKeyBackupDate\x18\x03 \x01(\x01\x12\x13\n\x0buserAuthUid\x18\x04 \x01(\x0c\x12\x1b\n\x13\x65ncryptedPrivateKey\x18\x05 \x01(\x0c\x12\x1e\n\x16\x65ncryptedEccPrivateKey\x18\x06 \x01(\x0c\x12\x14\n\x0c\x65\x63\x63PublicKey\x18\x07 \x01(\x0c\"\x81\x01\n\x07SyncLog\x12\x13\n\x0b\x63ountryName\x18\x01 \x01(\t\x12\x12\n\nsecondsAgo\x18\x02 \x01(\x03\x12\x12\n\ndeviceName\x18\x03 \x01(\t\x12\x13\n\x0b\x63ountryCode\x18\x04 \x01(\t\x12\x11\n\tdeviceUID\x18\x05 \x01(\x0c\x12\x11\n\tipAddress\x18\x06 \x01(\t\"\xe7\x06\n\x07License\x12\x18\n\x10subscriptionCode\x18\x01 \x01(\t\x12\x15\n\rproductTypeId\x18\x02 \x01(\x05\x12\x17\n\x0fproductTypeName\x18\x03 \x01(\t\x12\x16\n\x0e\x65xpirationDate\x18\x04 \x01(\t\x12\x1e\n\x16secondsUntilExpiration\x18\x05 \x01(\x03\x12\x12\n\nmaxDevices\x18\x06 \x01(\x05\x12\x14\n\x0c\x66ilePlanType\x18\x07 \x01(\x05\x12\x11\n\tbytesUsed\x18\x08 \x01(\x03\x12\x12\n\nbytesTotal\x18\t \x01(\x03\x12%\n\x1dsecondsUntilStorageExpiration\x18\n \x01(\x03\x12\x1d\n\x15storageExpirationDate\x18\x0b \x01(\t\x12,\n$hasAutoRenewableAppstoreSubscription\x18\x0c \x01(\x08\x12\x13\n\x0b\x61\x63\x63ountType\x18\r \x01(\x05\x12\x18\n\x10uploadsRemaining\x18\x0e \x01(\x05\x12\x14\n\x0c\x65nterpriseId\x18\x0f \x01(\x05\x12\x13\n\x0b\x63hatEnabled\x18\x10 \x01(\x08\x12 \n\x18\x61uditAndReportingEnabled\x18\x11 \x01(\x08\x12!\n\x19\x62reachWatchFeatureDisable\x18\x12 \x01(\x08\x12\x12\n\naccountUid\x18\x13 \x01(\x0c\x12\x1c\n\x14\x61llowPersonalLicense\x18\x14 \x01(\x08\x12\x12\n\nlicensedBy\x18\x15 \x01(\t\x12\r\n\x05\x65mail\x18\x16 \x01(\t\x12\x1a\n\x12\x62reachWatchEnabled\x18\x17 \x01(\x08\x12\x1a\n\x12\x62reachWatchScanned\x18\x18 \x01(\x08\x12\x1d\n\x15\x62reachWatchExpiration\x18\x19 \x01(\x03\x12\x1e\n\x16\x62reachWatchDateCreated\x18\x1a \x01(\x03\x12%\n\x05\x65rror\x18\x1b \x01(\x0b\x32\x16.AccountSummary.Result\x12\x12\n\nexpiration\x18\x1d \x01(\x03\x12\x19\n\x11storageExpiration\x18\x1e \x01(\x03\x12\x14\n\x0cuploadsCount\x18\x1f \x01(\x05\x12\r\n\x05units\x18 \x01(\x05\x12\x19\n\x11pendingEnterprise\x18! \x01(\x08\x12\x14\n\x0cisPamEnabled\x18\" \x01(\x08\"\xa3\x01\n\x05\x41\x64\x64On\x12\x14\n\x0clicenseKeyId\x18\x01 \x01(\x05\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x16\n\x0e\x65xpirationDate\x18\x03 \x01(\x03\x12\x13\n\x0b\x63reatedDate\x18\x04 \x01(\x03\x12\x0f\n\x07isTrial\x18\x05 \x01(\x08\x12\x0f\n\x07\x65nabled\x18\x06 \x01(\x08\x12\x0f\n\x07scanned\x18\x07 \x01(\x08\x12\x16\n\x0e\x66\x65\x61tureDisable\x18\x08 \x01(\x08\"\xbd\t\n\x08Settings\x12\r\n\x05\x61udit\x18\x01 \x01(\x08\x12!\n\x19mustPerformAccountShareBy\x18\x02 \x01(\x03\x12>\n\x0eshareAccountTo\x18\x03 \x03(\x0b\x32&.AccountSummary.MissingAccountShareKey\x12+\n\x05rules\x18\x04 \x03(\x0b\x32\x1c.AccountSummary.PasswordRule\x12\x1a\n\x12passwordRulesIntro\x18\x05 \x01(\t\x12\x16\n\x0e\x61utoBackupDays\x18\x06 \x01(\x05\x12\r\n\x05theme\x18\x07 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x08 \x01(\t\x12\x14\n\x0c\x63hannelValue\x18\t \x01(\t\x12\x15\n\rrsaConfigured\x18\n \x01(\x08\x12\x15\n\remailVerified\x18\x0b \x01(\x08\x12\"\n\x1amasterPasswordLastModified\x18\x0c \x01(\x01\x12\x18\n\x10\x61\x63\x63ountFolderKey\x18\r \x01(\x0c\x12\x31\n\x0csecurityKeys\x18\x0e \x03(\x0b\x32\x1b.AccountSummary.SecurityKey\x12+\n\tkeyValues\x18\x0f \x03(\x0b\x32\x18.AccountSummary.KeyValue\x12\x0f\n\x07ssoUser\x18\x10 \x01(\x08\x12\x18\n\x10onlineAccessOnly\x18\x11 \x01(\x08\x12\x1c\n\x14masterPasswordExpiry\x18\x12 \x01(\x05\x12\x19\n\x11twoFactorRequired\x18\x13 \x01(\x08\x12\x16\n\x0e\x64isallowExport\x18\x14 \x01(\x08\x12\x15\n\rrestrictFiles\x18\x15 \x01(\x08\x12\x1a\n\x12restrictAllSharing\x18\x16 \x01(\x08\x12\x17\n\x0frestrictSharing\x18\x17 \x01(\x08\x12\"\n\x1arestrictSharingIncomingAll\x18\x18 \x01(\x08\x12)\n!restrictSharingIncomingEnterprise\x18\x19 \x01(\x08\x12\x13\n\x0blogoutTimer\x18\x1a \x01(\x03\x12\x17\n\x0fpersistentLogin\x18\x1b \x01(\x08\x12\x1c\n\x14ipDisableAutoApprove\x18\x1c \x01(\x08\x12$\n\x1cshareDataKeyWithEccPublicKey\x18\x1d \x01(\x08\x12\'\n\x1fshareDataKeyWithDevicePublicKey\x18\x1e \x01(\x08\x12\x1a\n\x12RecordTypesCounter\x18\x1f \x01(\x05\x12$\n\x1cRecordTypesEnterpriseCounter\x18 \x01(\x05\x12\x1a\n\x12recordTypesEnabled\x18! \x01(\x08\x12\x1c\n\x14\x63\x61nManageRecordTypes\x18\" \x01(\x08\x12\x1d\n\x15recordTypesPAMCounter\x18# \x01(\x05\x12\x1a\n\x12logoutTimerMinutes\x18$ \x01(\x05\x12 \n\x18securityKeysNoUserVerify\x18% \x01(\x08\x12\x36\n\x08\x63hannels\x18& \x03(\x0e\x32$.Authentication.TwoFactorChannelType\x12\x19\n\x11personalUsernames\x18\' \x03(\t\"&\n\x08KeyValue\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"-\n\x0fKeyValueBoolean\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x08\"*\n\x0cKeyValueLong\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x03\"=\n\x06Result\x12\x12\n\nresultCode\x18\x01 \x01(\t\x12\x0f\n\x07message\x18\x02 \x01(\t\x12\x0e\n\x06result\x18\x03 \x01(\t\"\xc2\x01\n\x0c\x45nforcements\x12)\n\x07strings\x18\x01 \x03(\x0b\x32\x18.AccountSummary.KeyValue\x12\x31\n\x08\x62ooleans\x18\x02 \x03(\x0b\x32\x1f.AccountSummary.KeyValueBoolean\x12+\n\x05longs\x18\x03 \x03(\x0b\x32\x1c.AccountSummary.KeyValueLong\x12\'\n\x05jsons\x18\x04 \x03(\x0b\x32\x18.AccountSummary.KeyValue\"<\n\x16MissingAccountShareKey\x12\x0f\n\x07role_id\x18\x01 \x01(\x03\x12\x11\n\tpublicKey\x18\x02 \x01(\x0c\"u\n\x0cPasswordRule\x12\x10\n\x08ruleType\x18\x01 \x01(\t\x12\x0f\n\x07pattern\x18\x02 \x01(\t\x12\r\n\x05match\x18\x03 \x01(\x08\x12\x0f\n\x07minimum\x18\x04 \x01(\x05\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t\x12\r\n\x05value\x18\x06 \x01(\t\"\x97\x01\n\x0bSecurityKey\x12\x10\n\x08\x64\x65viceId\x18\x01 \x01(\x03\x12\x12\n\ndeviceName\x18\x02 \x01(\t\x12\x11\n\tdateAdded\x18\x03 \x01(\x03\x12\x0f\n\x07isValid\x18\x04 \x01(\x08\x12>\n\x12\x64\x65viceRegistration\x18\x05 \x01(\x0b\x32\".AccountSummary.DeviceRegistration\"y\n\x12\x44\x65viceRegistration\x12\x11\n\tkeyHandle\x18\x01 \x01(\t\x12\x11\n\tpublicKey\x18\x02 \x01(\x0c\x12\x17\n\x0f\x61ttestationCert\x18\x03 \x01(\t\x12\x0f\n\x07\x63ounter\x18\x04 \x01(\x03\x12\x13\n\x0b\x63ompromised\x18\x05 \x01(\x08\"k\n\x05Group\x12\r\n\x05\x61\x64min\x18\x01 \x01(\x08\x12\x1d\n\x15groupVerificationCode\x18\x02 \x01(\t\x12\x34\n\radministrator\x18\x04 \x01(\x0b\x32\x1d.AccountSummary.Administrator\"\xc0\x01\n\rAdministrator\x12\x11\n\tfirstName\x18\x01 \x01(\t\x12\x10\n\x08lastName\x18\x02 \x01(\t\x12\r\n\x05\x65mail\x18\x03 \x01(\t\x12\x1c\n\x14\x63urrentNumberOfUsers\x18\x04 \x01(\x05\x12\x15\n\rnumberOfUsers\x18\x05 \x01(\x05\x12\x18\n\x10subscriptionCode\x18\x07 \x01(\t\x12\x16\n\x0e\x65xpirationDate\x18\x08 \x01(\t\x12\x14\n\x0cpurchaseDate\x18\t \x01(\tB*\n\x18\x63om.keepersecurity.protoB\x0e\x41\x63\x63ountSummaryb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x14\x41\x63\x63ountSummary.proto\x12\x0e\x41\x63\x63ountSummary\x1a\x10\x41PIRequest.proto\"N\n\x15\x41\x63\x63ountSummaryRequest\x12\x16\n\x0esummaryVersion\x18\x01 \x01(\x05\x12\x1d\n\x15includeRecentActivity\x18\x02 \x01(\x08\"\xcc\x05\n\x16\x41\x63\x63ountSummaryElements\x12\x11\n\tclientKey\x18\x01 \x01(\x0c\x12*\n\x08settings\x18\x02 \x01(\x0b\x32\x18.AccountSummary.Settings\x12*\n\x08keysInfo\x18\x03 \x01(\x0b\x32\x18.AccountSummary.KeysInfo\x12)\n\x08syncLogs\x18\x04 \x03(\x0b\x32\x17.AccountSummary.SyncLog\x12\x19\n\x11isEnterpriseAdmin\x18\x05 \x01(\x08\x12(\n\x07license\x18\x06 \x01(\x0b\x32\x17.AccountSummary.License\x12$\n\x05group\x18\x07 \x01(\x0b\x32\x15.AccountSummary.Group\x12\x32\n\x0c\x45nforcements\x18\x08 \x01(\x0b\x32\x1c.AccountSummary.Enforcements\x12(\n\x06Images\x18\t \x03(\x0b\x32\x18.AccountSummary.KeyValue\x12\x30\n\x0fpersonalLicense\x18\n \x01(\x0b\x32\x17.AccountSummary.License\x12\x1e\n\x16\x66ixSharedFolderRecords\x18\x0b \x01(\x08\x12\x11\n\tusernames\x18\x0c \x03(\t\x12+\n\x07\x64\x65vices\x18\r \x03(\x0b\x32\x1a.AccountSummary.DeviceInfo\x12\x14\n\x0cisShareAdmin\x18\x0e \x01(\x08\x12\x17\n\x0f\x61\x63\x63ountRecovery\x18\x0f \x01(\x08\x12\x1d\n\x15\x61\x63\x63ountRecoveryPrompt\x18\x10 \x01(\x08\x12\'\n\x1fminMasterPasswordLengthNoPrompt\x18\x11 \x01(\x05\x12\x16\n\x0e\x66orbidKeyType2\x18\x12 \x01(\x08\x12\x16\n\x0e\x66orbidKeyType1\x18\x13 \x01(\x08\x12\x1a\n\x12\x64isallowedFeatures\x18\x14 \x03(\t\"\x8b\x03\n\nDeviceInfo\x12\x1c\n\x14\x65ncryptedDeviceToken\x18\x01 \x01(\x0c\x12\x12\n\ndeviceName\x18\x02 \x01(\t\x12\x32\n\x0c\x64\x65viceStatus\x18\x03 \x01(\x0e\x32\x1c.Authentication.DeviceStatus\x12\x17\n\x0f\x64\x65vicePublicKey\x18\x04 \x01(\x0c\x12 \n\x18\x65ncryptedDataKeyDoNotUse\x18\x05 \x01(\x0c\x12\x15\n\rclientVersion\x18\x06 \x01(\t\x12\x10\n\x08username\x18\x07 \x01(\t\x12\x11\n\tipAddress\x18\x08 \x01(\t\x12\x1a\n\x12\x61pproveRequestTime\x18\t \x01(\x03\x12\x1f\n\x17\x65ncryptedDataKeyPresent\x18\n \x01(\x08\x12\x0f\n\x07groupId\x18\x0b \x01(\x03\x12\x16\n\x0e\x64\x65vicePlatform\x18\x0c \x01(\t\x12:\n\x10\x63lientFormFactor\x18\r \x01(\x0e\x32 .Authentication.ClientFormFactor\"\xc1\x01\n\x08KeysInfo\x12\x18\n\x10\x65ncryptionParams\x18\x01 \x01(\x0c\x12\x18\n\x10\x65ncryptedDataKey\x18\x02 \x01(\x0c\x12\x19\n\x11\x64\x61taKeyBackupDate\x18\x03 \x01(\x01\x12\x13\n\x0buserAuthUid\x18\x04 \x01(\x0c\x12\x1b\n\x13\x65ncryptedPrivateKey\x18\x05 \x01(\x0c\x12\x1e\n\x16\x65ncryptedEccPrivateKey\x18\x06 \x01(\x0c\x12\x14\n\x0c\x65\x63\x63PublicKey\x18\x07 \x01(\x0c\"\x81\x01\n\x07SyncLog\x12\x13\n\x0b\x63ountryName\x18\x01 \x01(\t\x12\x12\n\nsecondsAgo\x18\x02 \x01(\x03\x12\x12\n\ndeviceName\x18\x03 \x01(\t\x12\x13\n\x0b\x63ountryCode\x18\x04 \x01(\t\x12\x11\n\tdeviceUID\x18\x05 \x01(\x0c\x12\x11\n\tipAddress\x18\x06 \x01(\t\"\xfd\x06\n\x07License\x12\x18\n\x10subscriptionCode\x18\x01 \x01(\t\x12\x15\n\rproductTypeId\x18\x02 \x01(\x05\x12\x17\n\x0fproductTypeName\x18\x03 \x01(\t\x12\x16\n\x0e\x65xpirationDate\x18\x04 \x01(\t\x12\x1e\n\x16secondsUntilExpiration\x18\x05 \x01(\x03\x12\x12\n\nmaxDevices\x18\x06 \x01(\x05\x12\x14\n\x0c\x66ilePlanType\x18\x07 \x01(\x05\x12\x11\n\tbytesUsed\x18\x08 \x01(\x03\x12\x12\n\nbytesTotal\x18\t \x01(\x03\x12%\n\x1dsecondsUntilStorageExpiration\x18\n \x01(\x03\x12\x1d\n\x15storageExpirationDate\x18\x0b \x01(\t\x12,\n$hasAutoRenewableAppstoreSubscription\x18\x0c \x01(\x08\x12\x13\n\x0b\x61\x63\x63ountType\x18\r \x01(\x05\x12\x18\n\x10uploadsRemaining\x18\x0e \x01(\x05\x12\x14\n\x0c\x65nterpriseId\x18\x0f \x01(\x05\x12\x13\n\x0b\x63hatEnabled\x18\x10 \x01(\x08\x12 \n\x18\x61uditAndReportingEnabled\x18\x11 \x01(\x08\x12!\n\x19\x62reachWatchFeatureDisable\x18\x12 \x01(\x08\x12\x12\n\naccountUid\x18\x13 \x01(\x0c\x12\x1c\n\x14\x61llowPersonalLicense\x18\x14 \x01(\x08\x12\x12\n\nlicensedBy\x18\x15 \x01(\t\x12\r\n\x05\x65mail\x18\x16 \x01(\t\x12\x1a\n\x12\x62reachWatchEnabled\x18\x17 \x01(\x08\x12\x1a\n\x12\x62reachWatchScanned\x18\x18 \x01(\x08\x12\x1d\n\x15\x62reachWatchExpiration\x18\x19 \x01(\x03\x12\x1e\n\x16\x62reachWatchDateCreated\x18\x1a \x01(\x03\x12%\n\x05\x65rror\x18\x1b \x01(\x0b\x32\x16.AccountSummary.Result\x12\x12\n\nexpiration\x18\x1d \x01(\x03\x12\x19\n\x11storageExpiration\x18\x1e \x01(\x03\x12\x14\n\x0cuploadsCount\x18\x1f \x01(\x05\x12\r\n\x05units\x18 \x01(\x05\x12\x19\n\x11pendingEnterprise\x18! \x01(\x08\x12\x14\n\x0cisPamEnabled\x18\" \x01(\x08\x12\x14\n\x0cisKsmEnabled\x18# \x01(\x08\"\xa3\x01\n\x05\x41\x64\x64On\x12\x14\n\x0clicenseKeyId\x18\x01 \x01(\x05\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x16\n\x0e\x65xpirationDate\x18\x03 \x01(\x03\x12\x13\n\x0b\x63reatedDate\x18\x04 \x01(\x03\x12\x0f\n\x07isTrial\x18\x05 \x01(\x08\x12\x0f\n\x07\x65nabled\x18\x06 \x01(\x08\x12\x0f\n\x07scanned\x18\x07 \x01(\x08\x12\x16\n\x0e\x66\x65\x61tureDisable\x18\x08 \x01(\x08\"\xf4\t\n\x08Settings\x12\r\n\x05\x61udit\x18\x01 \x01(\x08\x12!\n\x19mustPerformAccountShareBy\x18\x02 \x01(\x03\x12>\n\x0eshareAccountTo\x18\x03 \x03(\x0b\x32&.AccountSummary.MissingAccountShareKey\x12+\n\x05rules\x18\x04 \x03(\x0b\x32\x1c.AccountSummary.PasswordRule\x12\x1a\n\x12passwordRulesIntro\x18\x05 \x01(\t\x12\x16\n\x0e\x61utoBackupDays\x18\x06 \x01(\x05\x12\r\n\x05theme\x18\x07 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x08 \x01(\t\x12\x14\n\x0c\x63hannelValue\x18\t \x01(\t\x12\x15\n\rrsaConfigured\x18\n \x01(\x08\x12\x15\n\remailVerified\x18\x0b \x01(\x08\x12\"\n\x1amasterPasswordLastModified\x18\x0c \x01(\x01\x12\x18\n\x10\x61\x63\x63ountFolderKey\x18\r \x01(\x0c\x12\x31\n\x0csecurityKeys\x18\x0e \x03(\x0b\x32\x1b.AccountSummary.SecurityKey\x12+\n\tkeyValues\x18\x0f \x03(\x0b\x32\x18.AccountSummary.KeyValue\x12\x0f\n\x07ssoUser\x18\x10 \x01(\x08\x12\x18\n\x10onlineAccessOnly\x18\x11 \x01(\x08\x12\x1c\n\x14masterPasswordExpiry\x18\x12 \x01(\x05\x12\x19\n\x11twoFactorRequired\x18\x13 \x01(\x08\x12\x16\n\x0e\x64isallowExport\x18\x14 \x01(\x08\x12\x15\n\rrestrictFiles\x18\x15 \x01(\x08\x12\x1a\n\x12restrictAllSharing\x18\x16 \x01(\x08\x12\x17\n\x0frestrictSharing\x18\x17 \x01(\x08\x12\"\n\x1arestrictSharingIncomingAll\x18\x18 \x01(\x08\x12)\n!restrictSharingIncomingEnterprise\x18\x19 \x01(\x08\x12\x13\n\x0blogoutTimer\x18\x1a \x01(\x03\x12\x17\n\x0fpersistentLogin\x18\x1b \x01(\x08\x12\x1c\n\x14ipDisableAutoApprove\x18\x1c \x01(\x08\x12$\n\x1cshareDataKeyWithEccPublicKey\x18\x1d \x01(\x08\x12\'\n\x1fshareDataKeyWithDevicePublicKey\x18\x1e \x01(\x08\x12\x1a\n\x12RecordTypesCounter\x18\x1f \x01(\x05\x12$\n\x1cRecordTypesEnterpriseCounter\x18 \x01(\x05\x12\x1a\n\x12recordTypesEnabled\x18! \x01(\x08\x12\x1c\n\x14\x63\x61nManageRecordTypes\x18\" \x01(\x08\x12\x1d\n\x15recordTypesPAMCounter\x18# \x01(\x05\x12\x1a\n\x12logoutTimerMinutes\x18$ \x01(\x05\x12 \n\x18securityKeysNoUserVerify\x18% \x01(\x08\x12\x36\n\x08\x63hannels\x18& \x03(\x0e\x32$.Authentication.TwoFactorChannelType\x12\x19\n\x11personalUsernames\x18\' \x03(\t\x12\x15\n\rmaxIpDistance\x18( \x01(\x05\x12\x1e\n\x16maxIpDistanceEffective\x18) \x01(\x05\"&\n\x08KeyValue\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"-\n\x0fKeyValueBoolean\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x08\"*\n\x0cKeyValueLong\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x03\"=\n\x06Result\x12\x12\n\nresultCode\x18\x01 \x01(\t\x12\x0f\n\x07message\x18\x02 \x01(\t\x12\x0e\n\x06result\x18\x03 \x01(\t\"\xc2\x01\n\x0c\x45nforcements\x12)\n\x07strings\x18\x01 \x03(\x0b\x32\x18.AccountSummary.KeyValue\x12\x31\n\x08\x62ooleans\x18\x02 \x03(\x0b\x32\x1f.AccountSummary.KeyValueBoolean\x12+\n\x05longs\x18\x03 \x03(\x0b\x32\x1c.AccountSummary.KeyValueLong\x12\'\n\x05jsons\x18\x04 \x03(\x0b\x32\x18.AccountSummary.KeyValue\"<\n\x16MissingAccountShareKey\x12\x0f\n\x07role_id\x18\x01 \x01(\x03\x12\x11\n\tpublicKey\x18\x02 \x01(\x0c\"u\n\x0cPasswordRule\x12\x10\n\x08ruleType\x18\x01 \x01(\t\x12\x0f\n\x07pattern\x18\x02 \x01(\t\x12\r\n\x05match\x18\x03 \x01(\x08\x12\x0f\n\x07minimum\x18\x04 \x01(\x05\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t\x12\r\n\x05value\x18\x06 \x01(\t\"\x97\x01\n\x0bSecurityKey\x12\x10\n\x08\x64\x65viceId\x18\x01 \x01(\x03\x12\x12\n\ndeviceName\x18\x02 \x01(\t\x12\x11\n\tdateAdded\x18\x03 \x01(\x03\x12\x0f\n\x07isValid\x18\x04 \x01(\x08\x12>\n\x12\x64\x65viceRegistration\x18\x05 \x01(\x0b\x32\".AccountSummary.DeviceRegistration\"y\n\x12\x44\x65viceRegistration\x12\x11\n\tkeyHandle\x18\x01 \x01(\t\x12\x11\n\tpublicKey\x18\x02 \x01(\x0c\x12\x17\n\x0f\x61ttestationCert\x18\x03 \x01(\t\x12\x0f\n\x07\x63ounter\x18\x04 \x01(\x03\x12\x13\n\x0b\x63ompromised\x18\x05 \x01(\x08\"k\n\x05Group\x12\r\n\x05\x61\x64min\x18\x01 \x01(\x08\x12\x1d\n\x15groupVerificationCode\x18\x02 \x01(\t\x12\x34\n\radministrator\x18\x04 \x01(\x0b\x32\x1d.AccountSummary.Administrator\"\xc0\x01\n\rAdministrator\x12\x11\n\tfirstName\x18\x01 \x01(\t\x12\x10\n\x08lastName\x18\x02 \x01(\t\x12\r\n\x05\x65mail\x18\x03 \x01(\t\x12\x1c\n\x14\x63urrentNumberOfUsers\x18\x04 \x01(\x05\x12\x15\n\rnumberOfUsers\x18\x05 \x01(\x05\x12\x18\n\x10subscriptionCode\x18\x07 \x01(\t\x12\x16\n\x0e\x65xpirationDate\x18\x08 \x01(\t\x12\x14\n\x0cpurchaseDate\x18\t \x01(\tB*\n\x18\x63om.keepersecurity.protoB\x0e\x41\x63\x63ountSummaryb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'AccountSummary_pb2', _globals) -if _descriptor._USE_C_DESCRIPTORS == False: - _globals['DESCRIPTOR']._options = None +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None _globals['DESCRIPTOR']._serialized_options = b'\n\030com.keepersecurity.protoB\016AccountSummary' _globals['_ACCOUNTSUMMARYREQUEST']._serialized_start=58 _globals['_ACCOUNTSUMMARYREQUEST']._serialized_end=136 _globals['_ACCOUNTSUMMARYELEMENTS']._serialized_start=139 - _globals['_ACCOUNTSUMMARYELEMENTS']._serialized_end=803 - _globals['_DEVICEINFO']._serialized_start=806 - _globals['_DEVICEINFO']._serialized_end=1201 - _globals['_KEYSINFO']._serialized_start=1204 - _globals['_KEYSINFO']._serialized_end=1397 - _globals['_SYNCLOG']._serialized_start=1400 - _globals['_SYNCLOG']._serialized_end=1529 - _globals['_LICENSE']._serialized_start=1532 - _globals['_LICENSE']._serialized_end=2403 - _globals['_ADDON']._serialized_start=2406 - _globals['_ADDON']._serialized_end=2569 - _globals['_SETTINGS']._serialized_start=2572 - _globals['_SETTINGS']._serialized_end=3785 - _globals['_KEYVALUE']._serialized_start=3787 - _globals['_KEYVALUE']._serialized_end=3825 - _globals['_KEYVALUEBOOLEAN']._serialized_start=3827 - _globals['_KEYVALUEBOOLEAN']._serialized_end=3872 - _globals['_KEYVALUELONG']._serialized_start=3874 - _globals['_KEYVALUELONG']._serialized_end=3916 - _globals['_RESULT']._serialized_start=3918 - _globals['_RESULT']._serialized_end=3979 - _globals['_ENFORCEMENTS']._serialized_start=3982 - _globals['_ENFORCEMENTS']._serialized_end=4176 - _globals['_MISSINGACCOUNTSHAREKEY']._serialized_start=4178 - _globals['_MISSINGACCOUNTSHAREKEY']._serialized_end=4238 - _globals['_PASSWORDRULE']._serialized_start=4240 - _globals['_PASSWORDRULE']._serialized_end=4357 - _globals['_SECURITYKEY']._serialized_start=4360 - _globals['_SECURITYKEY']._serialized_end=4511 - _globals['_DEVICEREGISTRATION']._serialized_start=4513 - _globals['_DEVICEREGISTRATION']._serialized_end=4634 - _globals['_GROUP']._serialized_start=4636 - _globals['_GROUP']._serialized_end=4743 - _globals['_ADMINISTRATOR']._serialized_start=4746 - _globals['_ADMINISTRATOR']._serialized_end=4938 + _globals['_ACCOUNTSUMMARYELEMENTS']._serialized_end=855 + _globals['_DEVICEINFO']._serialized_start=858 + _globals['_DEVICEINFO']._serialized_end=1253 + _globals['_KEYSINFO']._serialized_start=1256 + _globals['_KEYSINFO']._serialized_end=1449 + _globals['_SYNCLOG']._serialized_start=1452 + _globals['_SYNCLOG']._serialized_end=1581 + _globals['_LICENSE']._serialized_start=1584 + _globals['_LICENSE']._serialized_end=2477 + _globals['_ADDON']._serialized_start=2480 + _globals['_ADDON']._serialized_end=2643 + _globals['_SETTINGS']._serialized_start=2646 + _globals['_SETTINGS']._serialized_end=3914 + _globals['_KEYVALUE']._serialized_start=3916 + _globals['_KEYVALUE']._serialized_end=3954 + _globals['_KEYVALUEBOOLEAN']._serialized_start=3956 + _globals['_KEYVALUEBOOLEAN']._serialized_end=4001 + _globals['_KEYVALUELONG']._serialized_start=4003 + _globals['_KEYVALUELONG']._serialized_end=4045 + _globals['_RESULT']._serialized_start=4047 + _globals['_RESULT']._serialized_end=4108 + _globals['_ENFORCEMENTS']._serialized_start=4111 + _globals['_ENFORCEMENTS']._serialized_end=4305 + _globals['_MISSINGACCOUNTSHAREKEY']._serialized_start=4307 + _globals['_MISSINGACCOUNTSHAREKEY']._serialized_end=4367 + _globals['_PASSWORDRULE']._serialized_start=4369 + _globals['_PASSWORDRULE']._serialized_end=4486 + _globals['_SECURITYKEY']._serialized_start=4489 + _globals['_SECURITYKEY']._serialized_end=4640 + _globals['_DEVICEREGISTRATION']._serialized_start=4642 + _globals['_DEVICEREGISTRATION']._serialized_end=4763 + _globals['_GROUP']._serialized_start=4765 + _globals['_GROUP']._serialized_end=4872 + _globals['_ADMINISTRATOR']._serialized_start=4875 + _globals['_ADMINISTRATOR']._serialized_end=5067 # @@protoc_insertion_point(module_scope) diff --git a/keepercommander/proto/AccountSummary_pb2.pyi b/keepercommander/proto/AccountSummary_pb2.pyi index 16357184f..40127ad83 100644 --- a/keepercommander/proto/AccountSummary_pb2.pyi +++ b/keepercommander/proto/AccountSummary_pb2.pyi @@ -2,20 +2,21 @@ import APIRequest_pb2 as _APIRequest_pb2 from google.protobuf.internal import containers as _containers from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message -from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union +from collections.abc import Iterable as _Iterable, Mapping as _Mapping +from typing import ClassVar as _ClassVar, Optional as _Optional, Union as _Union DESCRIPTOR: _descriptor.FileDescriptor class AccountSummaryRequest(_message.Message): - __slots__ = ["summaryVersion", "includeRecentActivity"] + __slots__ = ("summaryVersion", "includeRecentActivity") SUMMARYVERSION_FIELD_NUMBER: _ClassVar[int] INCLUDERECENTACTIVITY_FIELD_NUMBER: _ClassVar[int] summaryVersion: int includeRecentActivity: bool - def __init__(self, summaryVersion: _Optional[int] = ..., includeRecentActivity: bool = ...) -> None: ... + def __init__(self, summaryVersion: _Optional[int] = ..., includeRecentActivity: _Optional[bool] = ...) -> None: ... class AccountSummaryElements(_message.Message): - __slots__ = ["clientKey", "settings", "keysInfo", "syncLogs", "isEnterpriseAdmin", "license", "group", "Enforcements", "Images", "personalLicense", "fixSharedFolderRecords", "usernames", "devices", "isShareAdmin", "accountRecovery", "accountRecoveryPrompt", "minMasterPasswordLengthNoPrompt", "forbidKeyType2"] + __slots__ = ("clientKey", "settings", "keysInfo", "syncLogs", "isEnterpriseAdmin", "license", "group", "Enforcements", "Images", "personalLicense", "fixSharedFolderRecords", "usernames", "devices", "isShareAdmin", "accountRecovery", "accountRecoveryPrompt", "minMasterPasswordLengthNoPrompt", "forbidKeyType2", "forbidKeyType1", "disallowedFeatures") CLIENTKEY_FIELD_NUMBER: _ClassVar[int] SETTINGS_FIELD_NUMBER: _ClassVar[int] KEYSINFO_FIELD_NUMBER: _ClassVar[int] @@ -34,6 +35,8 @@ class AccountSummaryElements(_message.Message): ACCOUNTRECOVERYPROMPT_FIELD_NUMBER: _ClassVar[int] MINMASTERPASSWORDLENGTHNOPROMPT_FIELD_NUMBER: _ClassVar[int] FORBIDKEYTYPE2_FIELD_NUMBER: _ClassVar[int] + FORBIDKEYTYPE1_FIELD_NUMBER: _ClassVar[int] + DISALLOWEDFEATURES_FIELD_NUMBER: _ClassVar[int] clientKey: bytes settings: Settings keysInfo: KeysInfo @@ -52,10 +55,12 @@ class AccountSummaryElements(_message.Message): accountRecoveryPrompt: bool minMasterPasswordLengthNoPrompt: int forbidKeyType2: bool - def __init__(self, clientKey: _Optional[bytes] = ..., settings: _Optional[_Union[Settings, _Mapping]] = ..., keysInfo: _Optional[_Union[KeysInfo, _Mapping]] = ..., syncLogs: _Optional[_Iterable[_Union[SyncLog, _Mapping]]] = ..., isEnterpriseAdmin: bool = ..., license: _Optional[_Union[License, _Mapping]] = ..., group: _Optional[_Union[Group, _Mapping]] = ..., Enforcements: _Optional[_Union[Enforcements, _Mapping]] = ..., Images: _Optional[_Iterable[_Union[KeyValue, _Mapping]]] = ..., personalLicense: _Optional[_Union[License, _Mapping]] = ..., fixSharedFolderRecords: bool = ..., usernames: _Optional[_Iterable[str]] = ..., devices: _Optional[_Iterable[_Union[DeviceInfo, _Mapping]]] = ..., isShareAdmin: bool = ..., accountRecovery: bool = ..., accountRecoveryPrompt: bool = ..., minMasterPasswordLengthNoPrompt: _Optional[int] = ..., forbidKeyType2: bool = ...) -> None: ... + forbidKeyType1: bool + disallowedFeatures: _containers.RepeatedScalarFieldContainer[str] + def __init__(self, clientKey: _Optional[bytes] = ..., settings: _Optional[_Union[Settings, _Mapping]] = ..., keysInfo: _Optional[_Union[KeysInfo, _Mapping]] = ..., syncLogs: _Optional[_Iterable[_Union[SyncLog, _Mapping]]] = ..., isEnterpriseAdmin: _Optional[bool] = ..., license: _Optional[_Union[License, _Mapping]] = ..., group: _Optional[_Union[Group, _Mapping]] = ..., Enforcements: _Optional[_Union[Enforcements, _Mapping]] = ..., Images: _Optional[_Iterable[_Union[KeyValue, _Mapping]]] = ..., personalLicense: _Optional[_Union[License, _Mapping]] = ..., fixSharedFolderRecords: _Optional[bool] = ..., usernames: _Optional[_Iterable[str]] = ..., devices: _Optional[_Iterable[_Union[DeviceInfo, _Mapping]]] = ..., isShareAdmin: _Optional[bool] = ..., accountRecovery: _Optional[bool] = ..., accountRecoveryPrompt: _Optional[bool] = ..., minMasterPasswordLengthNoPrompt: _Optional[int] = ..., forbidKeyType2: _Optional[bool] = ..., forbidKeyType1: _Optional[bool] = ..., disallowedFeatures: _Optional[_Iterable[str]] = ...) -> None: ... class DeviceInfo(_message.Message): - __slots__ = ["encryptedDeviceToken", "deviceName", "deviceStatus", "devicePublicKey", "encryptedDataKeyDoNotUse", "clientVersion", "username", "ipAddress", "approveRequestTime", "encryptedDataKeyPresent", "groupId", "devicePlatform", "clientFormFactor"] + __slots__ = ("encryptedDeviceToken", "deviceName", "deviceStatus", "devicePublicKey", "encryptedDataKeyDoNotUse", "clientVersion", "username", "ipAddress", "approveRequestTime", "encryptedDataKeyPresent", "groupId", "devicePlatform", "clientFormFactor") ENCRYPTEDDEVICETOKEN_FIELD_NUMBER: _ClassVar[int] DEVICENAME_FIELD_NUMBER: _ClassVar[int] DEVICESTATUS_FIELD_NUMBER: _ClassVar[int] @@ -82,10 +87,10 @@ class DeviceInfo(_message.Message): groupId: int devicePlatform: str clientFormFactor: _APIRequest_pb2.ClientFormFactor - def __init__(self, encryptedDeviceToken: _Optional[bytes] = ..., deviceName: _Optional[str] = ..., deviceStatus: _Optional[_Union[_APIRequest_pb2.DeviceStatus, str]] = ..., devicePublicKey: _Optional[bytes] = ..., encryptedDataKeyDoNotUse: _Optional[bytes] = ..., clientVersion: _Optional[str] = ..., username: _Optional[str] = ..., ipAddress: _Optional[str] = ..., approveRequestTime: _Optional[int] = ..., encryptedDataKeyPresent: bool = ..., groupId: _Optional[int] = ..., devicePlatform: _Optional[str] = ..., clientFormFactor: _Optional[_Union[_APIRequest_pb2.ClientFormFactor, str]] = ...) -> None: ... + def __init__(self, encryptedDeviceToken: _Optional[bytes] = ..., deviceName: _Optional[str] = ..., deviceStatus: _Optional[_Union[_APIRequest_pb2.DeviceStatus, str]] = ..., devicePublicKey: _Optional[bytes] = ..., encryptedDataKeyDoNotUse: _Optional[bytes] = ..., clientVersion: _Optional[str] = ..., username: _Optional[str] = ..., ipAddress: _Optional[str] = ..., approveRequestTime: _Optional[int] = ..., encryptedDataKeyPresent: _Optional[bool] = ..., groupId: _Optional[int] = ..., devicePlatform: _Optional[str] = ..., clientFormFactor: _Optional[_Union[_APIRequest_pb2.ClientFormFactor, str]] = ...) -> None: ... class KeysInfo(_message.Message): - __slots__ = ["encryptionParams", "encryptedDataKey", "dataKeyBackupDate", "userAuthUid", "encryptedPrivateKey", "encryptedEccPrivateKey", "eccPublicKey"] + __slots__ = ("encryptionParams", "encryptedDataKey", "dataKeyBackupDate", "userAuthUid", "encryptedPrivateKey", "encryptedEccPrivateKey", "eccPublicKey") ENCRYPTIONPARAMS_FIELD_NUMBER: _ClassVar[int] ENCRYPTEDDATAKEY_FIELD_NUMBER: _ClassVar[int] DATAKEYBACKUPDATE_FIELD_NUMBER: _ClassVar[int] @@ -103,7 +108,7 @@ class KeysInfo(_message.Message): def __init__(self, encryptionParams: _Optional[bytes] = ..., encryptedDataKey: _Optional[bytes] = ..., dataKeyBackupDate: _Optional[float] = ..., userAuthUid: _Optional[bytes] = ..., encryptedPrivateKey: _Optional[bytes] = ..., encryptedEccPrivateKey: _Optional[bytes] = ..., eccPublicKey: _Optional[bytes] = ...) -> None: ... class SyncLog(_message.Message): - __slots__ = ["countryName", "secondsAgo", "deviceName", "countryCode", "deviceUID", "ipAddress"] + __slots__ = ("countryName", "secondsAgo", "deviceName", "countryCode", "deviceUID", "ipAddress") COUNTRYNAME_FIELD_NUMBER: _ClassVar[int] SECONDSAGO_FIELD_NUMBER: _ClassVar[int] DEVICENAME_FIELD_NUMBER: _ClassVar[int] @@ -119,7 +124,7 @@ class SyncLog(_message.Message): def __init__(self, countryName: _Optional[str] = ..., secondsAgo: _Optional[int] = ..., deviceName: _Optional[str] = ..., countryCode: _Optional[str] = ..., deviceUID: _Optional[bytes] = ..., ipAddress: _Optional[str] = ...) -> None: ... class License(_message.Message): - __slots__ = ["subscriptionCode", "productTypeId", "productTypeName", "expirationDate", "secondsUntilExpiration", "maxDevices", "filePlanType", "bytesUsed", "bytesTotal", "secondsUntilStorageExpiration", "storageExpirationDate", "hasAutoRenewableAppstoreSubscription", "accountType", "uploadsRemaining", "enterpriseId", "chatEnabled", "auditAndReportingEnabled", "breachWatchFeatureDisable", "accountUid", "allowPersonalLicense", "licensedBy", "email", "breachWatchEnabled", "breachWatchScanned", "breachWatchExpiration", "breachWatchDateCreated", "error", "expiration", "storageExpiration", "uploadsCount", "units", "pendingEnterprise", "isPamEnabled"] + __slots__ = ("subscriptionCode", "productTypeId", "productTypeName", "expirationDate", "secondsUntilExpiration", "maxDevices", "filePlanType", "bytesUsed", "bytesTotal", "secondsUntilStorageExpiration", "storageExpirationDate", "hasAutoRenewableAppstoreSubscription", "accountType", "uploadsRemaining", "enterpriseId", "chatEnabled", "auditAndReportingEnabled", "breachWatchFeatureDisable", "accountUid", "allowPersonalLicense", "licensedBy", "email", "breachWatchEnabled", "breachWatchScanned", "breachWatchExpiration", "breachWatchDateCreated", "error", "expiration", "storageExpiration", "uploadsCount", "units", "pendingEnterprise", "isPamEnabled", "isKsmEnabled") SUBSCRIPTIONCODE_FIELD_NUMBER: _ClassVar[int] PRODUCTTYPEID_FIELD_NUMBER: _ClassVar[int] PRODUCTTYPENAME_FIELD_NUMBER: _ClassVar[int] @@ -153,6 +158,7 @@ class License(_message.Message): UNITS_FIELD_NUMBER: _ClassVar[int] PENDINGENTERPRISE_FIELD_NUMBER: _ClassVar[int] ISPAMENABLED_FIELD_NUMBER: _ClassVar[int] + ISKSMENABLED_FIELD_NUMBER: _ClassVar[int] subscriptionCode: str productTypeId: int productTypeName: str @@ -186,10 +192,11 @@ class License(_message.Message): units: int pendingEnterprise: bool isPamEnabled: bool - def __init__(self, subscriptionCode: _Optional[str] = ..., productTypeId: _Optional[int] = ..., productTypeName: _Optional[str] = ..., expirationDate: _Optional[str] = ..., secondsUntilExpiration: _Optional[int] = ..., maxDevices: _Optional[int] = ..., filePlanType: _Optional[int] = ..., bytesUsed: _Optional[int] = ..., bytesTotal: _Optional[int] = ..., secondsUntilStorageExpiration: _Optional[int] = ..., storageExpirationDate: _Optional[str] = ..., hasAutoRenewableAppstoreSubscription: bool = ..., accountType: _Optional[int] = ..., uploadsRemaining: _Optional[int] = ..., enterpriseId: _Optional[int] = ..., chatEnabled: bool = ..., auditAndReportingEnabled: bool = ..., breachWatchFeatureDisable: bool = ..., accountUid: _Optional[bytes] = ..., allowPersonalLicense: bool = ..., licensedBy: _Optional[str] = ..., email: _Optional[str] = ..., breachWatchEnabled: bool = ..., breachWatchScanned: bool = ..., breachWatchExpiration: _Optional[int] = ..., breachWatchDateCreated: _Optional[int] = ..., error: _Optional[_Union[Result, _Mapping]] = ..., expiration: _Optional[int] = ..., storageExpiration: _Optional[int] = ..., uploadsCount: _Optional[int] = ..., units: _Optional[int] = ..., pendingEnterprise: bool = ..., isPamEnabled: bool = ...) -> None: ... + isKsmEnabled: bool + def __init__(self, subscriptionCode: _Optional[str] = ..., productTypeId: _Optional[int] = ..., productTypeName: _Optional[str] = ..., expirationDate: _Optional[str] = ..., secondsUntilExpiration: _Optional[int] = ..., maxDevices: _Optional[int] = ..., filePlanType: _Optional[int] = ..., bytesUsed: _Optional[int] = ..., bytesTotal: _Optional[int] = ..., secondsUntilStorageExpiration: _Optional[int] = ..., storageExpirationDate: _Optional[str] = ..., hasAutoRenewableAppstoreSubscription: _Optional[bool] = ..., accountType: _Optional[int] = ..., uploadsRemaining: _Optional[int] = ..., enterpriseId: _Optional[int] = ..., chatEnabled: _Optional[bool] = ..., auditAndReportingEnabled: _Optional[bool] = ..., breachWatchFeatureDisable: _Optional[bool] = ..., accountUid: _Optional[bytes] = ..., allowPersonalLicense: _Optional[bool] = ..., licensedBy: _Optional[str] = ..., email: _Optional[str] = ..., breachWatchEnabled: _Optional[bool] = ..., breachWatchScanned: _Optional[bool] = ..., breachWatchExpiration: _Optional[int] = ..., breachWatchDateCreated: _Optional[int] = ..., error: _Optional[_Union[Result, _Mapping]] = ..., expiration: _Optional[int] = ..., storageExpiration: _Optional[int] = ..., uploadsCount: _Optional[int] = ..., units: _Optional[int] = ..., pendingEnterprise: _Optional[bool] = ..., isPamEnabled: _Optional[bool] = ..., isKsmEnabled: _Optional[bool] = ...) -> None: ... class AddOn(_message.Message): - __slots__ = ["licenseKeyId", "name", "expirationDate", "createdDate", "isTrial", "enabled", "scanned", "featureDisable"] + __slots__ = ("licenseKeyId", "name", "expirationDate", "createdDate", "isTrial", "enabled", "scanned", "featureDisable") LICENSEKEYID_FIELD_NUMBER: _ClassVar[int] NAME_FIELD_NUMBER: _ClassVar[int] EXPIRATIONDATE_FIELD_NUMBER: _ClassVar[int] @@ -206,10 +213,10 @@ class AddOn(_message.Message): enabled: bool scanned: bool featureDisable: bool - def __init__(self, licenseKeyId: _Optional[int] = ..., name: _Optional[str] = ..., expirationDate: _Optional[int] = ..., createdDate: _Optional[int] = ..., isTrial: bool = ..., enabled: bool = ..., scanned: bool = ..., featureDisable: bool = ...) -> None: ... + def __init__(self, licenseKeyId: _Optional[int] = ..., name: _Optional[str] = ..., expirationDate: _Optional[int] = ..., createdDate: _Optional[int] = ..., isTrial: _Optional[bool] = ..., enabled: _Optional[bool] = ..., scanned: _Optional[bool] = ..., featureDisable: _Optional[bool] = ...) -> None: ... class Settings(_message.Message): - __slots__ = ["audit", "mustPerformAccountShareBy", "shareAccountTo", "rules", "passwordRulesIntro", "autoBackupDays", "theme", "channel", "channelValue", "rsaConfigured", "emailVerified", "masterPasswordLastModified", "accountFolderKey", "securityKeys", "keyValues", "ssoUser", "onlineAccessOnly", "masterPasswordExpiry", "twoFactorRequired", "disallowExport", "restrictFiles", "restrictAllSharing", "restrictSharing", "restrictSharingIncomingAll", "restrictSharingIncomingEnterprise", "logoutTimer", "persistentLogin", "ipDisableAutoApprove", "shareDataKeyWithEccPublicKey", "shareDataKeyWithDevicePublicKey", "RecordTypesCounter", "RecordTypesEnterpriseCounter", "recordTypesEnabled", "canManageRecordTypes", "recordTypesPAMCounter", "logoutTimerMinutes", "securityKeysNoUserVerify", "channels", "personalUsernames"] + __slots__ = ("audit", "mustPerformAccountShareBy", "shareAccountTo", "rules", "passwordRulesIntro", "autoBackupDays", "theme", "channel", "channelValue", "rsaConfigured", "emailVerified", "masterPasswordLastModified", "accountFolderKey", "securityKeys", "keyValues", "ssoUser", "onlineAccessOnly", "masterPasswordExpiry", "twoFactorRequired", "disallowExport", "restrictFiles", "restrictAllSharing", "restrictSharing", "restrictSharingIncomingAll", "restrictSharingIncomingEnterprise", "logoutTimer", "persistentLogin", "ipDisableAutoApprove", "shareDataKeyWithEccPublicKey", "shareDataKeyWithDevicePublicKey", "RecordTypesCounter", "RecordTypesEnterpriseCounter", "recordTypesEnabled", "canManageRecordTypes", "recordTypesPAMCounter", "logoutTimerMinutes", "securityKeysNoUserVerify", "channels", "personalUsernames", "maxIpDistance", "maxIpDistanceEffective") AUDIT_FIELD_NUMBER: _ClassVar[int] MUSTPERFORMACCOUNTSHAREBY_FIELD_NUMBER: _ClassVar[int] SHAREACCOUNTTO_FIELD_NUMBER: _ClassVar[int] @@ -249,6 +256,8 @@ class Settings(_message.Message): SECURITYKEYSNOUSERVERIFY_FIELD_NUMBER: _ClassVar[int] CHANNELS_FIELD_NUMBER: _ClassVar[int] PERSONALUSERNAMES_FIELD_NUMBER: _ClassVar[int] + MAXIPDISTANCE_FIELD_NUMBER: _ClassVar[int] + MAXIPDISTANCEEFFECTIVE_FIELD_NUMBER: _ClassVar[int] audit: bool mustPerformAccountShareBy: int shareAccountTo: _containers.RepeatedCompositeFieldContainer[MissingAccountShareKey] @@ -288,10 +297,12 @@ class Settings(_message.Message): securityKeysNoUserVerify: bool channels: _containers.RepeatedScalarFieldContainer[_APIRequest_pb2.TwoFactorChannelType] personalUsernames: _containers.RepeatedScalarFieldContainer[str] - def __init__(self, audit: bool = ..., mustPerformAccountShareBy: _Optional[int] = ..., shareAccountTo: _Optional[_Iterable[_Union[MissingAccountShareKey, _Mapping]]] = ..., rules: _Optional[_Iterable[_Union[PasswordRule, _Mapping]]] = ..., passwordRulesIntro: _Optional[str] = ..., autoBackupDays: _Optional[int] = ..., theme: _Optional[str] = ..., channel: _Optional[str] = ..., channelValue: _Optional[str] = ..., rsaConfigured: bool = ..., emailVerified: bool = ..., masterPasswordLastModified: _Optional[float] = ..., accountFolderKey: _Optional[bytes] = ..., securityKeys: _Optional[_Iterable[_Union[SecurityKey, _Mapping]]] = ..., keyValues: _Optional[_Iterable[_Union[KeyValue, _Mapping]]] = ..., ssoUser: bool = ..., onlineAccessOnly: bool = ..., masterPasswordExpiry: _Optional[int] = ..., twoFactorRequired: bool = ..., disallowExport: bool = ..., restrictFiles: bool = ..., restrictAllSharing: bool = ..., restrictSharing: bool = ..., restrictSharingIncomingAll: bool = ..., restrictSharingIncomingEnterprise: bool = ..., logoutTimer: _Optional[int] = ..., persistentLogin: bool = ..., ipDisableAutoApprove: bool = ..., shareDataKeyWithEccPublicKey: bool = ..., shareDataKeyWithDevicePublicKey: bool = ..., RecordTypesCounter: _Optional[int] = ..., RecordTypesEnterpriseCounter: _Optional[int] = ..., recordTypesEnabled: bool = ..., canManageRecordTypes: bool = ..., recordTypesPAMCounter: _Optional[int] = ..., logoutTimerMinutes: _Optional[int] = ..., securityKeysNoUserVerify: bool = ..., channels: _Optional[_Iterable[_Union[_APIRequest_pb2.TwoFactorChannelType, str]]] = ..., personalUsernames: _Optional[_Iterable[str]] = ...) -> None: ... + maxIpDistance: int + maxIpDistanceEffective: int + def __init__(self, audit: _Optional[bool] = ..., mustPerformAccountShareBy: _Optional[int] = ..., shareAccountTo: _Optional[_Iterable[_Union[MissingAccountShareKey, _Mapping]]] = ..., rules: _Optional[_Iterable[_Union[PasswordRule, _Mapping]]] = ..., passwordRulesIntro: _Optional[str] = ..., autoBackupDays: _Optional[int] = ..., theme: _Optional[str] = ..., channel: _Optional[str] = ..., channelValue: _Optional[str] = ..., rsaConfigured: _Optional[bool] = ..., emailVerified: _Optional[bool] = ..., masterPasswordLastModified: _Optional[float] = ..., accountFolderKey: _Optional[bytes] = ..., securityKeys: _Optional[_Iterable[_Union[SecurityKey, _Mapping]]] = ..., keyValues: _Optional[_Iterable[_Union[KeyValue, _Mapping]]] = ..., ssoUser: _Optional[bool] = ..., onlineAccessOnly: _Optional[bool] = ..., masterPasswordExpiry: _Optional[int] = ..., twoFactorRequired: _Optional[bool] = ..., disallowExport: _Optional[bool] = ..., restrictFiles: _Optional[bool] = ..., restrictAllSharing: _Optional[bool] = ..., restrictSharing: _Optional[bool] = ..., restrictSharingIncomingAll: _Optional[bool] = ..., restrictSharingIncomingEnterprise: _Optional[bool] = ..., logoutTimer: _Optional[int] = ..., persistentLogin: _Optional[bool] = ..., ipDisableAutoApprove: _Optional[bool] = ..., shareDataKeyWithEccPublicKey: _Optional[bool] = ..., shareDataKeyWithDevicePublicKey: _Optional[bool] = ..., RecordTypesCounter: _Optional[int] = ..., RecordTypesEnterpriseCounter: _Optional[int] = ..., recordTypesEnabled: _Optional[bool] = ..., canManageRecordTypes: _Optional[bool] = ..., recordTypesPAMCounter: _Optional[int] = ..., logoutTimerMinutes: _Optional[int] = ..., securityKeysNoUserVerify: _Optional[bool] = ..., channels: _Optional[_Iterable[_Union[_APIRequest_pb2.TwoFactorChannelType, str]]] = ..., personalUsernames: _Optional[_Iterable[str]] = ..., maxIpDistance: _Optional[int] = ..., maxIpDistanceEffective: _Optional[int] = ...) -> None: ... class KeyValue(_message.Message): - __slots__ = ["key", "value"] + __slots__ = ("key", "value") KEY_FIELD_NUMBER: _ClassVar[int] VALUE_FIELD_NUMBER: _ClassVar[int] key: str @@ -299,15 +310,15 @@ class KeyValue(_message.Message): def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... class KeyValueBoolean(_message.Message): - __slots__ = ["key", "value"] + __slots__ = ("key", "value") KEY_FIELD_NUMBER: _ClassVar[int] VALUE_FIELD_NUMBER: _ClassVar[int] key: str value: bool - def __init__(self, key: _Optional[str] = ..., value: bool = ...) -> None: ... + def __init__(self, key: _Optional[str] = ..., value: _Optional[bool] = ...) -> None: ... class KeyValueLong(_message.Message): - __slots__ = ["key", "value"] + __slots__ = ("key", "value") KEY_FIELD_NUMBER: _ClassVar[int] VALUE_FIELD_NUMBER: _ClassVar[int] key: str @@ -315,7 +326,7 @@ class KeyValueLong(_message.Message): def __init__(self, key: _Optional[str] = ..., value: _Optional[int] = ...) -> None: ... class Result(_message.Message): - __slots__ = ["resultCode", "message", "result"] + __slots__ = ("resultCode", "message", "result") RESULTCODE_FIELD_NUMBER: _ClassVar[int] MESSAGE_FIELD_NUMBER: _ClassVar[int] RESULT_FIELD_NUMBER: _ClassVar[int] @@ -325,7 +336,7 @@ class Result(_message.Message): def __init__(self, resultCode: _Optional[str] = ..., message: _Optional[str] = ..., result: _Optional[str] = ...) -> None: ... class Enforcements(_message.Message): - __slots__ = ["strings", "booleans", "longs", "jsons"] + __slots__ = ("strings", "booleans", "longs", "jsons") STRINGS_FIELD_NUMBER: _ClassVar[int] BOOLEANS_FIELD_NUMBER: _ClassVar[int] LONGS_FIELD_NUMBER: _ClassVar[int] @@ -337,7 +348,7 @@ class Enforcements(_message.Message): def __init__(self, strings: _Optional[_Iterable[_Union[KeyValue, _Mapping]]] = ..., booleans: _Optional[_Iterable[_Union[KeyValueBoolean, _Mapping]]] = ..., longs: _Optional[_Iterable[_Union[KeyValueLong, _Mapping]]] = ..., jsons: _Optional[_Iterable[_Union[KeyValue, _Mapping]]] = ...) -> None: ... class MissingAccountShareKey(_message.Message): - __slots__ = ["role_id", "publicKey"] + __slots__ = ("role_id", "publicKey") ROLE_ID_FIELD_NUMBER: _ClassVar[int] PUBLICKEY_FIELD_NUMBER: _ClassVar[int] role_id: int @@ -345,7 +356,7 @@ class MissingAccountShareKey(_message.Message): def __init__(self, role_id: _Optional[int] = ..., publicKey: _Optional[bytes] = ...) -> None: ... class PasswordRule(_message.Message): - __slots__ = ["ruleType", "pattern", "match", "minimum", "description", "value"] + __slots__ = ("ruleType", "pattern", "match", "minimum", "description", "value") RULETYPE_FIELD_NUMBER: _ClassVar[int] PATTERN_FIELD_NUMBER: _ClassVar[int] MATCH_FIELD_NUMBER: _ClassVar[int] @@ -358,10 +369,10 @@ class PasswordRule(_message.Message): minimum: int description: str value: str - def __init__(self, ruleType: _Optional[str] = ..., pattern: _Optional[str] = ..., match: bool = ..., minimum: _Optional[int] = ..., description: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... + def __init__(self, ruleType: _Optional[str] = ..., pattern: _Optional[str] = ..., match: _Optional[bool] = ..., minimum: _Optional[int] = ..., description: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... class SecurityKey(_message.Message): - __slots__ = ["deviceId", "deviceName", "dateAdded", "isValid", "deviceRegistration"] + __slots__ = ("deviceId", "deviceName", "dateAdded", "isValid", "deviceRegistration") DEVICEID_FIELD_NUMBER: _ClassVar[int] DEVICENAME_FIELD_NUMBER: _ClassVar[int] DATEADDED_FIELD_NUMBER: _ClassVar[int] @@ -372,10 +383,10 @@ class SecurityKey(_message.Message): dateAdded: int isValid: bool deviceRegistration: DeviceRegistration - def __init__(self, deviceId: _Optional[int] = ..., deviceName: _Optional[str] = ..., dateAdded: _Optional[int] = ..., isValid: bool = ..., deviceRegistration: _Optional[_Union[DeviceRegistration, _Mapping]] = ...) -> None: ... + def __init__(self, deviceId: _Optional[int] = ..., deviceName: _Optional[str] = ..., dateAdded: _Optional[int] = ..., isValid: _Optional[bool] = ..., deviceRegistration: _Optional[_Union[DeviceRegistration, _Mapping]] = ...) -> None: ... class DeviceRegistration(_message.Message): - __slots__ = ["keyHandle", "publicKey", "attestationCert", "counter", "compromised"] + __slots__ = ("keyHandle", "publicKey", "attestationCert", "counter", "compromised") KEYHANDLE_FIELD_NUMBER: _ClassVar[int] PUBLICKEY_FIELD_NUMBER: _ClassVar[int] ATTESTATIONCERT_FIELD_NUMBER: _ClassVar[int] @@ -386,20 +397,20 @@ class DeviceRegistration(_message.Message): attestationCert: str counter: int compromised: bool - def __init__(self, keyHandle: _Optional[str] = ..., publicKey: _Optional[bytes] = ..., attestationCert: _Optional[str] = ..., counter: _Optional[int] = ..., compromised: bool = ...) -> None: ... + def __init__(self, keyHandle: _Optional[str] = ..., publicKey: _Optional[bytes] = ..., attestationCert: _Optional[str] = ..., counter: _Optional[int] = ..., compromised: _Optional[bool] = ...) -> None: ... class Group(_message.Message): - __slots__ = ["admin", "groupVerificationCode", "administrator"] + __slots__ = ("admin", "groupVerificationCode", "administrator") ADMIN_FIELD_NUMBER: _ClassVar[int] GROUPVERIFICATIONCODE_FIELD_NUMBER: _ClassVar[int] ADMINISTRATOR_FIELD_NUMBER: _ClassVar[int] admin: bool groupVerificationCode: str administrator: Administrator - def __init__(self, admin: bool = ..., groupVerificationCode: _Optional[str] = ..., administrator: _Optional[_Union[Administrator, _Mapping]] = ...) -> None: ... + def __init__(self, admin: _Optional[bool] = ..., groupVerificationCode: _Optional[str] = ..., administrator: _Optional[_Union[Administrator, _Mapping]] = ...) -> None: ... class Administrator(_message.Message): - __slots__ = ["firstName", "lastName", "email", "currentNumberOfUsers", "numberOfUsers", "subscriptionCode", "expirationDate", "purchaseDate"] + __slots__ = ("firstName", "lastName", "email", "currentNumberOfUsers", "numberOfUsers", "subscriptionCode", "expirationDate", "purchaseDate") FIRSTNAME_FIELD_NUMBER: _ClassVar[int] LASTNAME_FIELD_NUMBER: _ClassVar[int] EMAIL_FIELD_NUMBER: _ClassVar[int] diff --git a/keepercommander/proto/Summary.md b/keepercommander/proto/Summary.md new file mode 100644 index 000000000..f739f5c09 --- /dev/null +++ b/keepercommander/proto/Summary.md @@ -0,0 +1,170 @@ +Summary +The Commander codebase (CLI, not the Web Vault — same backend, different client) already has substantial KeeperDrive scaffolding. A full kd-* command surface is wired up via keepercommander/commands/keeper_drive/ and an API service layer in keepercommander/keeper_drive/, plus sync-down handling and the KEEPER_DRIVE feature flag. + +But several MRD requirements are unimplemented or only partially covered. Here is the requirement-by-requirement audit, then the concrete to-do list. + +What's already in place +MRD ID Requirement Where it lives Status +KD1, KD2 +Create root / nested folders (canAdd) +kd-mkdir → keeper_drive.create_folder_v3 +Implemented +KD3, KD4 +Independent sharing & override of inherited permissions (canUpdateAccess) +kd-rndir --no-inherit / --inherit, _build_update_data sets inheritUserPermissions +Implemented +KD5 +Update folder metadata title/color (canUpdateSetting) +kd-rndir → update_folder_v3 +Implemented +KD6, KD7 +Remove / permanently delete folder +kd-rmdir -o folder-trash / delete-permanent → remove_folder_v3 +Implemented +KD8, KD9 +View folder structure / record titles +kd-list, sync caches keeper_drive_folders/records +Implemented +KD10 +View folder accessors (canListAccess) +get_folder_access_v3 (vault/folders/v3/access), kd-get -v +Implemented +KD11 +Update folder access +kd-share-folder → grant/update/revoke_folder_access_v3 +Implemented (users only — see gaps) +KD13, KD14, KD20–22 +View / edit record content +kd-get, kd-record-update, kd-record-add +Implemented +KD15, KD16 +Create record at root / inside folder +kd-record-add --folder → create_record_v3 +Implemented +KD17, KD18, KD19 +Remove record (folder / unlink / permanent) +kd-rm -o owner-trash / folder-trash / unlink → remove_record_v3 +Implemented +KD23 +View record accessors +get_record_accesses_v3 exposed via kd-get +Implemented +KD24 +Update record access +kd-share-record grant/revoke → share_record_v3 +Implemented (users only) +KD25 +Change record ownership +kd-share-record -a owner and kd-transfer-record → transfer_record_ownership_v3 +Implemented +KD32 +FeatureFlag.KEEPER_DRIVE gating +params.is_feature_disallowed('keeper_drive') used in cli.py, sync_down.py, autocomplete.py +Implemented +KD33 +KeeperDrive ↔ Legacy isolation in mv +commands/folder.py lines 859–911 +Partial (only mv) +KD34 +Inheritance vs independent sharing +Same as KD3/KD4 +Implemented +KD35 +Explicit deny overrides +Marked N/A for MVP +N/A +What's missing or incomplete +1. KSM application support (KD29, KD30, KD31) — fully missing +There are zero KeeperDrive↔KSM hooks anywhere under keepercommander/keeper_drive/ or commands/keeper_drive/. A grep for ksm/secrets_manager in those directories returns nothing. + +You need: + +A kd-ksm-app-add (KD29 / KD31) that creates a KSM Application either at the vault root or inside a KeeperDrive folder context (canAdd). +A kd-ksm-app-share (KD30) that wires a KSM application share into a KeeperDrive folder. +Probably a service-layer module keepercommander/keeper_drive/ksm_api.py plus exposure through the package __init__.py _SUBMODULE_MAP. +The existing legacy commands/ksm.py is the natural source to refactor against — it already speaks the KSM endpoints; you just need the v3 wiring + folder-context parameter. + +2. Team sharing (KD11, KD24) — only user-as-actor is supported +The MRD wording for both folder and record sharing is "users or teams", and the proto already has AT_TEAM. But the only place AT_TEAM is referenced in keeper_drive/ is removal_api.py (and only to count affected_teams_count). + +In folder_api.py the entire share path hard-codes accessType = folder_pb2.AT_USER (lines 327, 385, 412, 441, 461, 478) and kd-share-folder/kd-share-record parsers accept only --email. There's no --team/--team-uid switch and no team-key-encryption path. + +Add: + +--team / -T to kd-share-folder and kd-share-record parsers. +A resolve_team_uid_bytes helper paralleling resolve_user_uid_bytes in common.py. +A team branch in grant_folder_access_v3 / manage_folder_access_batch_v3 / record-share that sets accessType = AT_TEAM and encrypts the folder/record key with the team key. +3. Maximum nesting depth = 5 (KD2) — not enforced +Nothing in kd-mkdir, create_folder_v3, _prepare_folder_for_creation, or helpers.py checks the depth of the parent chain. A grep for MAX_DEPTH / depth returns no matches. + +Add a check in KeeperDriveMkdirCommand.execute (or in create_folder_v3) that walks parent_uid → parent_uid through params.keeper_drive_folders and refuses with a friendly error when depth >= 5. + +4. Cross-model nesting prevention (KD28) — only mv is guarded +commands/folder.py FolderMoveCommand blocks moves between SharedFolder and KeeperDrive. But FolderMakeCommand (legacy mkdir) at line 482+ never inspects whether base_folder is a KeeperDriveFolderType. So mkdir -sf "X" while cd'd into a KD folder would try to create a Legacy SharedFolder inside KeeperDrive (server may reject, but client should fail fast). + +Add the symmetric guard to FolderMakeCommand.execute: + +if base_folder.type == BaseFolderNode.KeeperDriveFolderType: + raise CommandError('mkdir', + 'Legacy folders cannot be created inside a KeeperDrive folder. ' + 'Use kd-mkdir instead.') +The same guard belongs in legacy RecordAddCommand (legacy add/record-add) so that when the current folder is KD, the user is told to use kd-record-add (KD27 contextual create rule). + +5. kd-mkdir cannot target a parent (KD2 ergonomics) +KeeperDriveMkdirCommand discovers the parent only via params.current_folder. There is no --folder / --parent argument the way kd-record-add has --folder, so building hierarchies non-interactively requires cd between every call. + +Add --folder/--parent FOLDER to keeper_drive_mkdir_parser and pass it through create_folder_v3(parent_uid=…). + +6. Folder-permission–driven record actions (KD13, KD14) — client checks only the record-level grant +helpers._check_record_permission looks up keeper_drive_record_accesses only. The MRD says folder-level canViewRecords / canEditRecords should also authorize record reads/edits inside that folder. + +kd-record-update and kd-get therefore reject users who hold the right via the parent folder rather than the record itself. The server is probably permissive, but pre-flight checks are wrong. + +Update _check_record_permission to also walk find_kd_folders_for_record(params, record_uid) and accept when any containing folder grants can_view_records / can_edit_records. + +7. Permission matrix vs MRD scope +keeper_drive/permissions.py includes NAVIGATOR=0 and REQUESTOR=1, and ROLE_NAME_MAP exposes 'contributor' / 'requestor' (both → 1). The MRD explicitly limits Phase 1 roles to VIEWER (2), SHARED_MANAGER (3), CONTENT_MANAGER (4), CONTENT_SHARE_MANAGER (5), MANAGER (6). + +This is fine if backend will silently accept those, but kd-share-folder/kd-share-record parsers do already restrict --role choices to MRD-allowed names, so the extras are dead options reachable only programmatically. Decide whether to: + +Drop NAVIGATOR/REQUESTOR/contributor mapping for V1 to avoid drift, or +Keep them but document them as internal. +Also: the helpers.role_label and infer_role functions still return 'contributor'/'requestor'/'navigator' on display — they will leak into kd-list -p and kd-get -v. Trim them for V1 to match the MRD's display surface. + +8. KD12 — Change folder ownership +MRD marks this as N/A for MVP, and there is no kd-chown-folder in the codebase. Confirmed correct — leave a stub TODO if desired. + +9. Out-of-scope features that are actually present +The MRD explicitly puts these out of scope but the code partially supports them: + +Out-of-scope feature Where it leaks in Recommendation +TLA (time-limited access) +--expire-at / --expire-in on kd-share-folder and kd-share-record; tlaProperties.expiration set in grant_folder_access_v3 +Either keep (server will reject if disabled) or hide the switches behind a feature flag check. +TrashCan / restore +Staged trashcan_sync_pb2 files appear in the original git status snapshot but are untracked-uncommitted; keeper_drive_trashed_folders cache is referenced in sync.py clear_caches +Don't ship the proto pieces in this PR; remove the cache code or feature-flag it. +Move To / Drag-and-Drop +kd-ln and kd-shortcut are link operations, fine. But mv partially still talks about "Drive folders" — that path is correctly raising CommandError, leave as is. +10. current_folder for KD context (KD27 contextual create) +commands/folder.py FolderCdCommand.execute (line 363) already accepts a KD folder UID as current_folder. Good. But kd-record-add only consumes --folder and ignores params.current_folder. To match the MRD wording ("If a KeeperDrive folder is selected and Add/Create is clicked, the dialog shall create a record … within that KeeperDrive folder context"), have KeeperDriveAddRecordCommand.execute default folder_uid to params.current_folder when it is a KD folder UID. + +11. KEEPER_DRIVE flag handling on every kd-* command (KD32) +The flag is checked in command-listing/help (cli.py line 387) and in sync ingestion (sync_down.py line 79), but the individual kd-* execute() methods don't re-check the flag. So a user with the flag disallowed who somehow types kd-mkdir directly will hit it. Recommend adding a guard in a base helper used by every kd-* command (e.g., in helpers.command_error_handler or a separate require_keeper_drive(params, cmd_name) decorator). + +12. Minor +keeper_drive_share_folder_parser has no --team (see #2) and no JSON output mode, while every other kd-* listing/inspection has --format json. Add for parity if needed. +_check_folder_permission (in helpers.py) silently returns on the first matching username; if no matching access entry is found, it never raises — letting actions pass when they shouldn't. Add a final raise CommandError(cmd_name, error_message) after the loop. +kd-share-record parser sets --email required=True even for folder-bulk mode (-R). Reconsider. +Concrete to-do list, priority-ordered +Add KSM commands & service module (KD29/30/31). New work, needed for MRD section 8. +Add team-as-actor support to kd-share-folder / kd-share-record and the underlying *_v3 calls (KD11/KD24). +Enforce max depth 5 in kd-mkdir (KD2). +Block legacy mkdir/add inside KD folders to satisfy KD28; mirror the mv guard in FolderMakeCommand and the legacy add command. +Add --folder/--parent to kd-mkdir and have kd-record-add honor params.current_folder (KD27 ergonomics). +Fix folder-derived record permission checks in _check_record_permission (KD13/14). +Add a global feature-flag guard at the top of every kd-* execute() (KD32). +Tighten _check_folder_permission so a missing access record raises instead of falling through. +Trim NAVIGATOR/REQUESTOR/contributor surface from display helpers and the role map (MRD V1 role list). +Either ship or revert the staged trashcan_sync_pb2* files — they're listed in the git snapshot but absent on disk; trash sync is out of scope per MRD. +Want me to start with any of these (KSM commands and team support are the two largest gaps)? \ No newline at end of file diff --git a/keepercommander/proto/SyncDown_pb2.py b/keepercommander/proto/SyncDown_pb2.py index cf16af278..17ac2d397 100644 --- a/keepercommander/proto/SyncDown_pb2.py +++ b/keepercommander/proto/SyncDown_pb2.py @@ -16,86 +16,95 @@ from . import APIRequest_pb2 as APIRequest__pb2 from . import enterprise_pb2 as enterprise__pb2 from . import NotificationCenter_pb2 as NotificationCenter__pb2 +from . import dag_pb2 as dag__pb2 +from . import folder_pb2 as folder__pb2 +from . import record_sharing_pb2 as record__sharing__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0eSyncDown.proto\x12\x05Vault\x1a\x0crecord.proto\x1a\x11\x62reachwatch.proto\x1a\x10\x41PIRequest.proto\x1a\x10\x65nterprise.proto\x1a\x18NotificationCenter.proto\"A\n\x0fSyncDownRequest\x12\x19\n\x11\x63ontinuationToken\x18\x01 \x01(\x0c\x12\x13\n\x0b\x64\x61taVersion\x18\x02 \x01(\x05\"\x81\x11\n\x10SyncDownResponse\x12\x19\n\x11\x63ontinuationToken\x18\x01 \x01(\x0c\x12\x0f\n\x07hasMore\x18\x02 \x01(\x08\x12\'\n\x0b\x63\x61\x63heStatus\x18\x03 \x01(\x0e\x32\x12.Vault.CacheStatus\x12&\n\x0buserFolders\x18\x04 \x03(\x0b\x32\x11.Vault.UserFolder\x12*\n\rsharedFolders\x18\x05 \x03(\x0b\x32\x13.Vault.SharedFolder\x12>\n\x17userFolderSharedFolders\x18\x06 \x03(\x0b\x32\x1d.Vault.UserFolderSharedFolder\x12\x36\n\x13sharedFolderFolders\x18\x07 \x03(\x0b\x32\x19.Vault.SharedFolderFolder\x12\x1e\n\x07records\x18\x08 \x03(\x0b\x32\r.Vault.Record\x12-\n\x0erecordMetaData\x18\t \x03(\x0b\x32\x15.Vault.RecordMetaData\x12+\n\rnonSharedData\x18\n \x03(\x0b\x32\x14.Vault.NonSharedData\x12&\n\x0brecordLinks\x18\x0b \x03(\x0b\x32\x11.Vault.RecordLink\x12\x32\n\x11userFolderRecords\x18\x0c \x03(\x0b\x32\x17.Vault.UserFolderRecord\x12\x36\n\x13sharedFolderRecords\x18\r \x03(\x0b\x32\x19.Vault.SharedFolderRecord\x12\x42\n\x19sharedFolderFolderRecords\x18\x0e \x03(\x0b\x32\x1f.Vault.SharedFolderFolderRecord\x12\x32\n\x11sharedFolderUsers\x18\x0f \x03(\x0b\x32\x17.Vault.SharedFolderUser\x12\x32\n\x11sharedFolderTeams\x18\x10 \x03(\x0b\x32\x17.Vault.SharedFolderTeam\x12\x1a\n\x12recordAddAuditData\x18\x11 \x03(\x0c\x12\x1a\n\x05teams\x18\x12 \x03(\x0b\x32\x0b.Vault.Team\x12,\n\x0esharingChanges\x18\x13 \x03(\x0b\x32\x14.Vault.SharingChange\x12\x1f\n\x07profile\x18\x14 \x01(\x0b\x32\x0e.Vault.Profile\x12%\n\nprofilePic\x18\x15 \x01(\x0b\x32\x11.Vault.ProfilePic\x12\x34\n\x12pendingTeamMembers\x18\x16 \x03(\x0b\x32\x18.Vault.PendingTeamMember\x12\x34\n\x12\x62reachWatchRecords\x18\x17 \x03(\x0b\x32\x18.Vault.BreachWatchRecord\x12\"\n\tuserAuths\x18\x18 \x03(\x0b\x32\x0f.Vault.UserAuth\x12?\n\x17\x62reachWatchSecurityData\x18\x19 \x03(\x0b\x32\x1e.Vault.BreachWatchSecurityData\x12/\n\x0freusedPasswords\x18\x1a \x01(\x0b\x32\x16.Vault.ReusedPasswords\x12\x1a\n\x12removedUserFolders\x18\x1b \x03(\x0c\x12\x1c\n\x14removedSharedFolders\x18\x1c \x03(\x0c\x12\x45\n\x1eremovedUserFolderSharedFolders\x18\x1d \x03(\x0b\x32\x1d.Vault.UserFolderSharedFolder\x12=\n\x1aremovedSharedFolderFolders\x18\x1e \x03(\x0b\x32\x19.Vault.SharedFolderFolder\x12\x16\n\x0eremovedRecords\x18\x1f \x03(\x0c\x12-\n\x12removedRecordLinks\x18 \x03(\x0b\x32\x11.Vault.RecordLink\x12\x39\n\x18removedUserFolderRecords\x18! \x03(\x0b\x32\x17.Vault.UserFolderRecord\x12=\n\x1aremovedSharedFolderRecords\x18\" \x03(\x0b\x32\x19.Vault.SharedFolderRecord\x12I\n removedSharedFolderFolderRecords\x18# \x03(\x0b\x32\x1f.Vault.SharedFolderFolderRecord\x12\x39\n\x18removedSharedFolderUsers\x18$ \x03(\x0b\x32\x17.Vault.SharedFolderUser\x12\x39\n\x18removedSharedFolderTeams\x18% \x03(\x0b\x32\x17.Vault.SharedFolderTeam\x12\x14\n\x0cremovedTeams\x18& \x03(\x0c\x12&\n\x0cksmAppShares\x18\' \x03(\x0b\x32\x10.Vault.KsmChange\x12\'\n\rksmAppClients\x18( \x03(\x0b\x32\x10.Vault.KsmChange\x12\x30\n\x10shareInvitations\x18) \x03(\x0b\x32\x16.Vault.ShareInvitation\x12+\n\x0b\x64iagnostics\x18* \x01(\x0b\x32\x16.Vault.SyncDiagnostics\x12.\n\x0frecordRotations\x18+ \x03(\x0b\x32\x15.Vault.RecordRotation\x12\x1a\n\x05users\x18, \x03(\x0b\x32\x0b.Vault.User\x12\x14\n\x0cremovedUsers\x18- \x03(\x0c\x12\x33\n\x11securityScoreData\x18. \x03(\x0b\x32\x18.Vault.SecurityScoreData\x12\x41\n\x10notificationSync\x18/ \x03(\x0b\x32\'.NotificationCenter.NotificationWrapper\"\x92\x01\n\nUserFolder\x12\x11\n\tfolderUid\x18\x01 \x01(\x0c\x12\x11\n\tparentUid\x18\x02 \x01(\x0c\x12\x15\n\ruserFolderKey\x18\x03 \x01(\x0c\x12\'\n\x07keyType\x18\x04 \x01(\x0e\x32\x16.Records.RecordKeyType\x12\x10\n\x08revision\x18\x05 \x01(\x03\x12\x0c\n\x04\x64\x61ta\x18\x06 \x01(\x0c\"\xd5\x02\n\x0cSharedFolder\x12\x17\n\x0fsharedFolderUid\x18\x01 \x01(\x0c\x12\x10\n\x08revision\x18\x02 \x01(\x03\x12\x17\n\x0fsharedFolderKey\x18\x03 \x01(\x0c\x12\'\n\x07keyType\x18\x04 \x01(\x0e\x32\x16.Records.RecordKeyType\x12\x0c\n\x04\x64\x61ta\x18\x05 \x01(\x0c\x12\x1c\n\x14\x64\x65\x66\x61ultManageRecords\x18\x06 \x01(\x08\x12\x1a\n\x12\x64\x65\x66\x61ultManageUsers\x18\x07 \x01(\x08\x12\x16\n\x0e\x64\x65\x66\x61ultCanEdit\x18\x08 \x01(\x08\x12\x19\n\x11\x64\x65\x66\x61ultCanReshare\x18\t \x01(\x08\x12\'\n\x0b\x63\x61\x63heStatus\x18\n \x01(\x0e\x32\x12.Vault.CacheStatus\x12\r\n\x05owner\x18\x0b \x01(\t\x12\x17\n\x0fownerAccountUid\x18\x0c \x01(\x0c\x12\x0c\n\x04name\x18\r \x01(\x0c\"V\n\x16UserFolderSharedFolder\x12\x11\n\tfolderUid\x18\x01 \x01(\x0c\x12\x17\n\x0fsharedFolderUid\x18\x02 \x01(\x0c\x12\x10\n\x08revision\x18\x03 \x01(\x03\"\xbb\x01\n\x12SharedFolderFolder\x12\x17\n\x0fsharedFolderUid\x18\x01 \x01(\x0c\x12\x11\n\tfolderUid\x18\x02 \x01(\x0c\x12\x11\n\tparentUid\x18\x03 \x01(\x0c\x12\x1d\n\x15sharedFolderFolderKey\x18\x04 \x01(\x0c\x12\'\n\x07keyType\x18\x05 \x01(\x0e\x32\x16.Records.RecordKeyType\x12\x10\n\x08revision\x18\x06 \x01(\x03\x12\x0c\n\x04\x64\x61ta\x18\x07 \x01(\x0c\"l\n\x0fSharedFolderKey\x12\x17\n\x0fsharedFolderUid\x18\x01 \x01(\x0c\x12\x17\n\x0fsharedFolderKey\x18\x02 \x01(\x0c\x12\'\n\x07keyType\x18\x03 \x01(\x0e\x32\x16.Records.RecordKeyType\"\xc3\x02\n\x04Team\x12\x0f\n\x07teamUid\x18\x01 \x01(\x0c\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0f\n\x07teamKey\x18\x03 \x01(\x0c\x12+\n\x0bteamKeyType\x18\x04 \x01(\x0e\x32\x16.Records.RecordKeyType\x12\x16\n\x0eteamPrivateKey\x18\x05 \x01(\x0c\x12\x14\n\x0crestrictEdit\x18\x06 \x01(\x08\x12\x15\n\rrestrictShare\x18\x07 \x01(\x08\x12\x14\n\x0crestrictView\x18\x08 \x01(\x08\x12\x1c\n\x14removedSharedFolders\x18\t \x03(\x0c\x12\x30\n\x10sharedFolderKeys\x18\n \x03(\x0b\x32\x16.Vault.SharedFolderKey\x12\x19\n\x11teamEccPrivateKey\x18\x0b \x01(\x0c\x12\x18\n\x10teamEccPublicKey\x18\x0c \x01(\x0c\"\xbf\x01\n\x06Record\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x10\n\x08revision\x18\x02 \x01(\x03\x12\x0f\n\x07version\x18\x03 \x01(\x05\x12\x0e\n\x06shared\x18\x04 \x01(\x08\x12\x1a\n\x12\x63lientModifiedTime\x18\x05 \x01(\x03\x12\x0c\n\x04\x64\x61ta\x18\x06 \x01(\x0c\x12\r\n\x05\x65xtra\x18\x07 \x01(\x0c\x12\r\n\x05udata\x18\x08 \x01(\t\x12\x10\n\x08\x66ileSize\x18\t \x01(\x03\x12\x15\n\rthumbnailSize\x18\n \x01(\x03\"b\n\nRecordLink\x12\x17\n\x0fparentRecordUid\x18\x01 \x01(\x0c\x12\x16\n\x0e\x63hildRecordUid\x18\x02 \x01(\x0c\x12\x11\n\trecordKey\x18\x03 \x01(\x0c\x12\x10\n\x08revision\x18\x04 \x01(\x03\"J\n\x10UserFolderRecord\x12\x11\n\tfolderUid\x18\x01 \x01(\x0c\x12\x11\n\trecordUid\x18\x02 \x01(\x0c\x12\x10\n\x08revision\x18\x03 \x01(\x03\"k\n\x18SharedFolderFolderRecord\x12\x17\n\x0fsharedFolderUid\x18\x01 \x01(\x0c\x12\x11\n\tfolderUid\x18\x02 \x01(\x0c\x12\x11\n\trecordUid\x18\x03 \x01(\x0c\x12\x10\n\x08revision\x18\x04 \x01(\x03\"0\n\rNonSharedData\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"\x9f\x02\n\x0eRecordMetaData\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\r\n\x05owner\x18\x02 \x01(\x08\x12\x11\n\trecordKey\x18\x03 \x01(\x0c\x12-\n\rrecordKeyType\x18\x04 \x01(\x0e\x32\x16.Records.RecordKeyType\x12\x10\n\x08\x63\x61nShare\x18\x05 \x01(\x08\x12\x0f\n\x07\x63\x61nEdit\x18\x06 \x01(\x08\x12\x17\n\x0fownerAccountUid\x18\x07 \x01(\x0c\x12\x12\n\nexpiration\x18\x08 \x01(\x03\x12\x42\n\x1a\x65xpirationNotificationType\x18\t \x01(\x0e\x32\x1e.Records.TimerNotificationType\x12\x15\n\rownerUsername\x18\n \x01(\t\"2\n\rSharingChange\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x0e\n\x06shared\x18\x02 \x01(\x08\">\n\x07Profile\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\x0c\x12\x13\n\x0bprofileName\x18\x02 \x01(\t\x12\x10\n\x08revision\x18\x03 \x01(\x03\"+\n\nProfilePic\x12\x0b\n\x03url\x18\x01 \x01(\t\x12\x10\n\x08revision\x18\x02 \x01(\x03\"p\n\x11PendingTeamMember\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x15\n\ruserPublicKey\x18\x02 \x01(\x0c\x12\x10\n\x08teamUids\x18\x03 \x03(\x0c\x12\x18\n\x10userEccPublicKey\x18\x04 \x01(\x0c\"\xa6\x01\n\x11\x42reachWatchRecord\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x12.\n\x04type\x18\x03 \x01(\x0e\x32 .BreachWatch.BreachWatchInfoType\x12\x11\n\tscannedBy\x18\x04 \x01(\t\x12\x10\n\x08revision\x18\x05 \x01(\x03\x12\x1b\n\x13scannedByAccountUid\x18\x06 \x01(\x0c\"\xb4\x01\n\x08UserAuth\x12\x0b\n\x03uid\x18\x01 \x01(\x0c\x12,\n\tloginType\x18\x02 \x01(\x0e\x32\x19.Authentication.LoginType\x12\x0f\n\x07\x64\x65leted\x18\x03 \x01(\x08\x12\x12\n\niterations\x18\x04 \x01(\x05\x12\x0c\n\x04salt\x18\x05 \x01(\x0c\x12\x1a\n\x12\x65ncryptedClientKey\x18\x06 \x01(\x0c\x12\x10\n\x08revision\x18\x07 \x01(\x03\x12\x0c\n\x04name\x18\x08 \x01(\t\">\n\x17\x42reachWatchSecurityData\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x10\n\x08revision\x18\x02 \x01(\x03\"2\n\x0fReusedPasswords\x12\r\n\x05\x63ount\x18\x01 \x01(\x05\x12\x10\n\x08revision\x18\x02 \x01(\x03\"\xa9\x02\n\x12SharedFolderRecord\x12\x17\n\x0fsharedFolderUid\x18\x01 \x01(\x0c\x12\x11\n\trecordUid\x18\x02 \x01(\x0c\x12\x11\n\trecordKey\x18\x03 \x01(\x0c\x12\x10\n\x08\x63\x61nShare\x18\x04 \x01(\x08\x12\x0f\n\x07\x63\x61nEdit\x18\x05 \x01(\x08\x12\x17\n\x0fownerAccountUid\x18\x06 \x01(\x0c\x12\x12\n\nexpiration\x18\x07 \x01(\x03\x12\r\n\x05owner\x18\x08 \x01(\x08\x12\x42\n\x1a\x65xpirationNotificationType\x18\t \x01(\x0e\x32\x1e.Records.TimerNotificationType\x12\x15\n\rownerUsername\x18\n \x01(\t\x12\x1a\n\x12rotateOnExpiration\x18\x0b \x01(\x08\"\xf1\x01\n\x10SharedFolderUser\x12\x17\n\x0fsharedFolderUid\x18\x01 \x01(\x0c\x12\x10\n\x08username\x18\x02 \x01(\t\x12\x15\n\rmanageRecords\x18\x03 \x01(\x08\x12\x13\n\x0bmanageUsers\x18\x04 \x01(\x08\x12\x12\n\naccountUid\x18\x05 \x01(\x0c\x12\x12\n\nexpiration\x18\x06 \x01(\x03\x12\x42\n\x1a\x65xpirationNotificationType\x18\x07 \x01(\x0e\x32\x1e.Records.TimerNotificationType\x12\x1a\n\x12rotateOnExpiration\x18\x08 \x01(\x08\"\xea\x01\n\x10SharedFolderTeam\x12\x17\n\x0fsharedFolderUid\x18\x01 \x01(\x0c\x12\x0f\n\x07teamUid\x18\x02 \x01(\x0c\x12\x0c\n\x04name\x18\x03 \x01(\t\x12\x15\n\rmanageRecords\x18\x04 \x01(\x08\x12\x13\n\x0bmanageUsers\x18\x05 \x01(\x08\x12\x12\n\nexpiration\x18\x06 \x01(\x03\x12\x42\n\x1a\x65xpirationNotificationType\x18\x07 \x01(\x0e\x32\x1e.Records.TimerNotificationType\x12\x1a\n\x12rotateOnExpiration\x18\x08 \x01(\x08\"\x8a\x01\n\tKsmChange\x12\x14\n\x0c\x61ppRecordUid\x18\x01 \x01(\x0c\x12\x10\n\x08\x64\x65tailId\x18\x02 \x01(\x0c\x12\x0f\n\x07removed\x18\x03 \x01(\x08\x12\x30\n\rappClientType\x18\x04 \x01(\x0e\x32\x19.Enterprise.AppClientType\x12\x12\n\nexpiration\x18\x05 \x01(\x03\"#\n\x0fShareInvitation\x12\x10\n\x08username\x18\x01 \x01(\t\",\n\x04User\x12\x12\n\naccountUid\x18\x01 \x01(\x0c\x12\x10\n\x08username\x18\x02 \x01(\t\"{\n\x0fSyncDiagnostics\x12\x19\n\x11\x63ontinuationToken\x18\x01 \x01(\x0c\x12\x0e\n\x06userId\x18\x02 \x01(\x05\x12\x18\n\x10\x65nterpriseUserId\x18\x03 \x01(\x03\x12\x10\n\x08syncedTo\x18\x04 \x01(\x03\x12\x11\n\tsyncingTo\x18\x05 \x01(\x03\"\xee\x01\n\x0eRecordRotation\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x10\n\x08revision\x18\x02 \x01(\x03\x12\x18\n\x10\x63onfigurationUid\x18\x03 \x01(\x0c\x12\x10\n\x08schedule\x18\x04 \x01(\t\x12\x15\n\rpwdComplexity\x18\x05 \x01(\x0c\x12\x10\n\x08\x64isabled\x18\x06 \x01(\x08\x12\x13\n\x0bresourceUid\x18\x07 \x01(\x0c\x12\x14\n\x0clastRotation\x18\x08 \x01(\x03\x12\x37\n\x12lastRotationStatus\x18\t \x01(\x0e\x32\x1b.Vault.RecordRotationStatus\"F\n\x11SecurityScoreData\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x12\x10\n\x08revision\x18\x03 \x01(\x03\"3\n\x1d\x42reachWatchGetSyncDataRequest\x12\x12\n\nrecordUids\x18\x01 \x03(\x0c\"\xb3\x01\n\x1e\x42reachWatchGetSyncDataResponse\x12\x34\n\x12\x62reachWatchRecords\x18\x01 \x03(\x0b\x32\x18.Vault.BreachWatchRecord\x12?\n\x17\x62reachWatchSecurityData\x18\x02 \x03(\x0b\x32\x1e.Vault.BreachWatchSecurityData\x12\x1a\n\x05users\x18\x03 \x03(\x0b\x32\x0b.Vault.User\"6\n\x18GetAccountUidMapResponse\x12\x1a\n\x05users\x18\x01 \x03(\x0b\x32\x0b.Vault.User*\"\n\x0b\x43\x61\x63heStatus\x12\x08\n\x04KEEP\x10\x00\x12\t\n\x05\x43LEAR\x10\x01*f\n\x14RecordRotationStatus\x12\x14\n\x10RRST_NOT_ROTATED\x10\x00\x12\x14\n\x10RRST_IN_PROGRESS\x10\x01\x12\x10\n\x0cRRST_SUCCESS\x10\x02\x12\x10\n\x0cRRST_FAILURE\x10\x03\x42!\n\x18\x63om.keepersecurity.protoB\x05Vaultb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0eSyncDown.proto\x12\x05Vault\x1a\x0crecord.proto\x1a\x11\x62reachwatch.proto\x1a\x10\x41PIRequest.proto\x1a\x10\x65nterprise.proto\x1a\x18NotificationCenter.proto\x1a\tdag.proto\x1a\x0c\x66older.proto\x1a\x14record_sharing.proto\"P\n\x0fSyncDownRequest\x12\x19\n\x11\x63ontinuationToken\x18\x01 \x01(\x0c\x12\x13\n\x0b\x64\x61taVersion\x18\x02 \x01(\x05\x12\r\n\x05\x64\x65\x62ug\x18\x03 \x01(\x08\"\xb2\x11\n\x10SyncDownResponse\x12\x19\n\x11\x63ontinuationToken\x18\x01 \x01(\x0c\x12\x0f\n\x07hasMore\x18\x02 \x01(\x08\x12\'\n\x0b\x63\x61\x63heStatus\x18\x03 \x01(\x0e\x32\x12.Vault.CacheStatus\x12&\n\x0buserFolders\x18\x04 \x03(\x0b\x32\x11.Vault.UserFolder\x12*\n\rsharedFolders\x18\x05 \x03(\x0b\x32\x13.Vault.SharedFolder\x12>\n\x17userFolderSharedFolders\x18\x06 \x03(\x0b\x32\x1d.Vault.UserFolderSharedFolder\x12\x36\n\x13sharedFolderFolders\x18\x07 \x03(\x0b\x32\x19.Vault.SharedFolderFolder\x12\x1e\n\x07records\x18\x08 \x03(\x0b\x32\r.Vault.Record\x12-\n\x0erecordMetaData\x18\t \x03(\x0b\x32\x15.Vault.RecordMetaData\x12+\n\rnonSharedData\x18\n \x03(\x0b\x32\x14.Vault.NonSharedData\x12&\n\x0brecordLinks\x18\x0b \x03(\x0b\x32\x11.Vault.RecordLink\x12\x32\n\x11userFolderRecords\x18\x0c \x03(\x0b\x32\x17.Vault.UserFolderRecord\x12\x36\n\x13sharedFolderRecords\x18\r \x03(\x0b\x32\x19.Vault.SharedFolderRecord\x12\x42\n\x19sharedFolderFolderRecords\x18\x0e \x03(\x0b\x32\x1f.Vault.SharedFolderFolderRecord\x12\x32\n\x11sharedFolderUsers\x18\x0f \x03(\x0b\x32\x17.Vault.SharedFolderUser\x12\x32\n\x11sharedFolderTeams\x18\x10 \x03(\x0b\x32\x17.Vault.SharedFolderTeam\x12\x1a\n\x12recordAddAuditData\x18\x11 \x03(\x0c\x12\x1a\n\x05teams\x18\x12 \x03(\x0b\x32\x0b.Vault.Team\x12,\n\x0esharingChanges\x18\x13 \x03(\x0b\x32\x14.Vault.SharingChange\x12\x1f\n\x07profile\x18\x14 \x01(\x0b\x32\x0e.Vault.Profile\x12%\n\nprofilePic\x18\x15 \x01(\x0b\x32\x11.Vault.ProfilePic\x12\x34\n\x12pendingTeamMembers\x18\x16 \x03(\x0b\x32\x18.Vault.PendingTeamMember\x12\x34\n\x12\x62reachWatchRecords\x18\x17 \x03(\x0b\x32\x18.Vault.BreachWatchRecord\x12\"\n\tuserAuths\x18\x18 \x03(\x0b\x32\x0f.Vault.UserAuth\x12?\n\x17\x62reachWatchSecurityData\x18\x19 \x03(\x0b\x32\x1e.Vault.BreachWatchSecurityData\x12/\n\x0freusedPasswords\x18\x1a \x01(\x0b\x32\x16.Vault.ReusedPasswords\x12\x1a\n\x12removedUserFolders\x18\x1b \x03(\x0c\x12\x1c\n\x14removedSharedFolders\x18\x1c \x03(\x0c\x12\x45\n\x1eremovedUserFolderSharedFolders\x18\x1d \x03(\x0b\x32\x1d.Vault.UserFolderSharedFolder\x12=\n\x1aremovedSharedFolderFolders\x18\x1e \x03(\x0b\x32\x19.Vault.SharedFolderFolder\x12\x16\n\x0eremovedRecords\x18\x1f \x03(\x0c\x12-\n\x12removedRecordLinks\x18 \x03(\x0b\x32\x11.Vault.RecordLink\x12\x39\n\x18removedUserFolderRecords\x18! \x03(\x0b\x32\x17.Vault.UserFolderRecord\x12=\n\x1aremovedSharedFolderRecords\x18\" \x03(\x0b\x32\x19.Vault.SharedFolderRecord\x12I\n removedSharedFolderFolderRecords\x18# \x03(\x0b\x32\x1f.Vault.SharedFolderFolderRecord\x12\x39\n\x18removedSharedFolderUsers\x18$ \x03(\x0b\x32\x17.Vault.SharedFolderUser\x12\x39\n\x18removedSharedFolderTeams\x18% \x03(\x0b\x32\x17.Vault.SharedFolderTeam\x12\x14\n\x0cremovedTeams\x18& \x03(\x0c\x12&\n\x0cksmAppShares\x18\' \x03(\x0b\x32\x10.Vault.KsmChange\x12\'\n\rksmAppClients\x18( \x03(\x0b\x32\x10.Vault.KsmChange\x12\x30\n\x10shareInvitations\x18) \x03(\x0b\x32\x16.Vault.ShareInvitation\x12+\n\x0b\x64iagnostics\x18* \x01(\x0b\x32\x16.Vault.SyncDiagnostics\x12.\n\x0frecordRotations\x18+ \x03(\x0b\x32\x15.Vault.RecordRotation\x12\x1a\n\x05users\x18, \x03(\x0b\x32\x0b.Vault.User\x12\x14\n\x0cremovedUsers\x18- \x03(\x0c\x12\x33\n\x11securityScoreData\x18. \x03(\x0b\x32\x18.Vault.SecurityScoreData\x12\x41\n\x10notificationSync\x18/ \x03(\x0b\x32\'.NotificationCenter.NotificationWrapper\x12/\n\x0fkeeperDriveData\x18\x30 \x01(\x0b\x32\x16.Vault.KeeperDriveData\"\x98\x01\n\x0b\x44riveRecord\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x10\n\x08revision\x18\x02 \x01(\x03\x12\x0f\n\x07version\x18\x03 \x01(\x05\x12\x0e\n\x06shared\x18\x04 \x01(\x08\x12\x1a\n\x12\x63lientModifiedTime\x18\x05 \x01(\x03\x12\x10\n\x08\x66ileSize\x18\x06 \x01(\x03\x12\x15\n\rthumbnailSize\x18\x07 \x01(\x03\"F\n\x12\x46olderSharingState\x12\x11\n\tfolderUid\x18\x01 \x01(\x0c\x12\x0e\n\x06shared\x18\x02 \x01(\x08\x12\r\n\x05\x63ount\x18\x03 \x01(\x05\"\x9b\x08\n\x0fKeeperDriveData\x12#\n\x07\x66olders\x18\n \x03(\x0b\x32\x12.Folder.FolderData\x12%\n\nfolderKeys\x18\r \x03(\x0b\x32\x11.Folder.FolderKey\x12\x30\n\x0e\x66olderAccesses\x18\x0f \x03(\x0b\x32\x18.Folder.FolderAccessData\x12\x34\n\x15revokedFolderAccesses\x18\x11 \x03(\x0b\x32\x15.Folder.RevokedAccess\x12&\n\nrecordData\x18\x14 \x03(\x0b\x32\x12.Folder.RecordData\x12+\n\rnonSharedData\x18\x15 \x03(\x0b\x32\x14.Vault.NonSharedData\x12\x30\n\x0erecordAccesses\x18\x19 \x03(\x0b\x32\x18.Folder.RecordAccessData\x12?\n\x15revokedRecordAccesses\x18\x1b \x03(\x0b\x32 .record.v3.sharing.RevokedAccess\x12\x42\n\x13recordSharingStates\x18\x1c \x03(\x0b\x32%.record.v3.sharing.RecordSharingState\x12&\n\x0brecordLinks\x18\x1e \x03(\x0b\x32\x11.Vault.RecordLink\x12-\n\x12removedRecordLinks\x18 \x03(\x0b\x32\x11.Vault.RecordLink\x12\x34\n\x12\x62reachWatchRecords\x18( \x03(\x0b\x32\x18.Vault.BreachWatchRecord\x12\x33\n\x11securityScoreData\x18) \x03(\x0b\x32\x18.Vault.SecurityScoreData\x12?\n\x17\x62reachWatchSecurityData\x18* \x03(\x0b\x32\x1e.Vault.BreachWatchSecurityData\x12-\n\x0eremovedFolders\x18\x30 \x03(\x0b\x32\x15.Folder.FolderRemoved\x12\x36\n\x14removedFolderRecords\x18\x34 \x03(\x0b\x32\x18.Records.FolderRecordKey\x12+\n\rfolderRecords\x18\x36 \x03(\x0b\x32\x14.Folder.FolderRecord\x12\x31\n\x12recordRotationData\x18\x38 \x03(\x0b\x32\x15.Vault.RecordRotation\x12#\n\x07records\x18: \x03(\x0b\x32\x12.Vault.DriveRecord\x12\x35\n\x12\x66olderSharingState\x18< \x03(\x0b\x32\x19.Vault.FolderSharingState\x12\"\n\nrawDagData\x18\x65 \x03(\x0b\x32\x0e.Dag.DebugData\"\x92\x01\n\nUserFolder\x12\x11\n\tfolderUid\x18\x01 \x01(\x0c\x12\x11\n\tparentUid\x18\x02 \x01(\x0c\x12\x15\n\ruserFolderKey\x18\x03 \x01(\x0c\x12\'\n\x07keyType\x18\x04 \x01(\x0e\x32\x16.Records.RecordKeyType\x12\x10\n\x08revision\x18\x05 \x01(\x03\x12\x0c\n\x04\x64\x61ta\x18\x06 \x01(\x0c\"\xd5\x02\n\x0cSharedFolder\x12\x17\n\x0fsharedFolderUid\x18\x01 \x01(\x0c\x12\x10\n\x08revision\x18\x02 \x01(\x03\x12\x17\n\x0fsharedFolderKey\x18\x03 \x01(\x0c\x12\'\n\x07keyType\x18\x04 \x01(\x0e\x32\x16.Records.RecordKeyType\x12\x0c\n\x04\x64\x61ta\x18\x05 \x01(\x0c\x12\x1c\n\x14\x64\x65\x66\x61ultManageRecords\x18\x06 \x01(\x08\x12\x1a\n\x12\x64\x65\x66\x61ultManageUsers\x18\x07 \x01(\x08\x12\x16\n\x0e\x64\x65\x66\x61ultCanEdit\x18\x08 \x01(\x08\x12\x19\n\x11\x64\x65\x66\x61ultCanReshare\x18\t \x01(\x08\x12\'\n\x0b\x63\x61\x63heStatus\x18\n \x01(\x0e\x32\x12.Vault.CacheStatus\x12\r\n\x05owner\x18\x0b \x01(\t\x12\x17\n\x0fownerAccountUid\x18\x0c \x01(\x0c\x12\x0c\n\x04name\x18\r \x01(\x0c\"V\n\x16UserFolderSharedFolder\x12\x11\n\tfolderUid\x18\x01 \x01(\x0c\x12\x17\n\x0fsharedFolderUid\x18\x02 \x01(\x0c\x12\x10\n\x08revision\x18\x03 \x01(\x03\"\xbb\x01\n\x12SharedFolderFolder\x12\x17\n\x0fsharedFolderUid\x18\x01 \x01(\x0c\x12\x11\n\tfolderUid\x18\x02 \x01(\x0c\x12\x11\n\tparentUid\x18\x03 \x01(\x0c\x12\x1d\n\x15sharedFolderFolderKey\x18\x04 \x01(\x0c\x12\'\n\x07keyType\x18\x05 \x01(\x0e\x32\x16.Records.RecordKeyType\x12\x10\n\x08revision\x18\x06 \x01(\x03\x12\x0c\n\x04\x64\x61ta\x18\x07 \x01(\x0c\"l\n\x0fSharedFolderKey\x12\x17\n\x0fsharedFolderUid\x18\x01 \x01(\x0c\x12\x17\n\x0fsharedFolderKey\x18\x02 \x01(\x0c\x12\'\n\x07keyType\x18\x03 \x01(\x0e\x32\x16.Records.RecordKeyType\"\xc3\x02\n\x04Team\x12\x0f\n\x07teamUid\x18\x01 \x01(\x0c\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0f\n\x07teamKey\x18\x03 \x01(\x0c\x12+\n\x0bteamKeyType\x18\x04 \x01(\x0e\x32\x16.Records.RecordKeyType\x12\x16\n\x0eteamPrivateKey\x18\x05 \x01(\x0c\x12\x14\n\x0crestrictEdit\x18\x06 \x01(\x08\x12\x15\n\rrestrictShare\x18\x07 \x01(\x08\x12\x14\n\x0crestrictView\x18\x08 \x01(\x08\x12\x1c\n\x14removedSharedFolders\x18\t \x03(\x0c\x12\x30\n\x10sharedFolderKeys\x18\n \x03(\x0b\x32\x16.Vault.SharedFolderKey\x12\x19\n\x11teamEccPrivateKey\x18\x0b \x01(\x0c\x12\x18\n\x10teamEccPublicKey\x18\x0c \x01(\x0c\"\xbf\x01\n\x06Record\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x10\n\x08revision\x18\x02 \x01(\x03\x12\x0f\n\x07version\x18\x03 \x01(\x05\x12\x0e\n\x06shared\x18\x04 \x01(\x08\x12\x1a\n\x12\x63lientModifiedTime\x18\x05 \x01(\x03\x12\x0c\n\x04\x64\x61ta\x18\x06 \x01(\x0c\x12\r\n\x05\x65xtra\x18\x07 \x01(\x0c\x12\r\n\x05udata\x18\x08 \x01(\t\x12\x10\n\x08\x66ileSize\x18\t \x01(\x03\x12\x15\n\rthumbnailSize\x18\n \x01(\x03\"b\n\nRecordLink\x12\x17\n\x0fparentRecordUid\x18\x01 \x01(\x0c\x12\x16\n\x0e\x63hildRecordUid\x18\x02 \x01(\x0c\x12\x11\n\trecordKey\x18\x03 \x01(\x0c\x12\x10\n\x08revision\x18\x04 \x01(\x03\"J\n\x10UserFolderRecord\x12\x11\n\tfolderUid\x18\x01 \x01(\x0c\x12\x11\n\trecordUid\x18\x02 \x01(\x0c\x12\x10\n\x08revision\x18\x03 \x01(\x03\"k\n\x18SharedFolderFolderRecord\x12\x17\n\x0fsharedFolderUid\x18\x01 \x01(\x0c\x12\x11\n\tfolderUid\x18\x02 \x01(\x0c\x12\x11\n\trecordUid\x18\x03 \x01(\x0c\x12\x10\n\x08revision\x18\x04 \x01(\x03\"0\n\rNonSharedData\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"\x9f\x02\n\x0eRecordMetaData\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\r\n\x05owner\x18\x02 \x01(\x08\x12\x11\n\trecordKey\x18\x03 \x01(\x0c\x12-\n\rrecordKeyType\x18\x04 \x01(\x0e\x32\x16.Records.RecordKeyType\x12\x10\n\x08\x63\x61nShare\x18\x05 \x01(\x08\x12\x0f\n\x07\x63\x61nEdit\x18\x06 \x01(\x08\x12\x17\n\x0fownerAccountUid\x18\x07 \x01(\x0c\x12\x12\n\nexpiration\x18\x08 \x01(\x03\x12\x42\n\x1a\x65xpirationNotificationType\x18\t \x01(\x0e\x32\x1e.Records.TimerNotificationType\x12\x15\n\rownerUsername\x18\n \x01(\t\"2\n\rSharingChange\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x0e\n\x06shared\x18\x02 \x01(\x08\">\n\x07Profile\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\x0c\x12\x13\n\x0bprofileName\x18\x02 \x01(\t\x12\x10\n\x08revision\x18\x03 \x01(\x03\"+\n\nProfilePic\x12\x0b\n\x03url\x18\x01 \x01(\t\x12\x10\n\x08revision\x18\x02 \x01(\x03\"p\n\x11PendingTeamMember\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x15\n\ruserPublicKey\x18\x02 \x01(\x0c\x12\x10\n\x08teamUids\x18\x03 \x03(\x0c\x12\x18\n\x10userEccPublicKey\x18\x04 \x01(\x0c\"\xa6\x01\n\x11\x42reachWatchRecord\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x12.\n\x04type\x18\x03 \x01(\x0e\x32 .BreachWatch.BreachWatchInfoType\x12\x11\n\tscannedBy\x18\x04 \x01(\t\x12\x10\n\x08revision\x18\x05 \x01(\x03\x12\x1b\n\x13scannedByAccountUid\x18\x06 \x01(\x0c\"\xb4\x01\n\x08UserAuth\x12\x0b\n\x03uid\x18\x01 \x01(\x0c\x12,\n\tloginType\x18\x02 \x01(\x0e\x32\x19.Authentication.LoginType\x12\x0f\n\x07\x64\x65leted\x18\x03 \x01(\x08\x12\x12\n\niterations\x18\x04 \x01(\x05\x12\x0c\n\x04salt\x18\x05 \x01(\x0c\x12\x1a\n\x12\x65ncryptedClientKey\x18\x06 \x01(\x0c\x12\x10\n\x08revision\x18\x07 \x01(\x03\x12\x0c\n\x04name\x18\x08 \x01(\t\"O\n\x17\x42reachWatchSecurityData\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x10\n\x08revision\x18\x02 \x01(\x03\x12\x0f\n\x07removed\x18\x03 \x01(\x08\"2\n\x0fReusedPasswords\x12\r\n\x05\x63ount\x18\x01 \x01(\x05\x12\x10\n\x08revision\x18\x02 \x01(\x03\"\xa9\x02\n\x12SharedFolderRecord\x12\x17\n\x0fsharedFolderUid\x18\x01 \x01(\x0c\x12\x11\n\trecordUid\x18\x02 \x01(\x0c\x12\x11\n\trecordKey\x18\x03 \x01(\x0c\x12\x10\n\x08\x63\x61nShare\x18\x04 \x01(\x08\x12\x0f\n\x07\x63\x61nEdit\x18\x05 \x01(\x08\x12\x17\n\x0fownerAccountUid\x18\x06 \x01(\x0c\x12\x12\n\nexpiration\x18\x07 \x01(\x03\x12\r\n\x05owner\x18\x08 \x01(\x08\x12\x42\n\x1a\x65xpirationNotificationType\x18\t \x01(\x0e\x32\x1e.Records.TimerNotificationType\x12\x15\n\rownerUsername\x18\n \x01(\t\x12\x1a\n\x12rotateOnExpiration\x18\x0b \x01(\x08\"\xf1\x01\n\x10SharedFolderUser\x12\x17\n\x0fsharedFolderUid\x18\x01 \x01(\x0c\x12\x10\n\x08username\x18\x02 \x01(\t\x12\x15\n\rmanageRecords\x18\x03 \x01(\x08\x12\x13\n\x0bmanageUsers\x18\x04 \x01(\x08\x12\x12\n\naccountUid\x18\x05 \x01(\x0c\x12\x12\n\nexpiration\x18\x06 \x01(\x03\x12\x42\n\x1a\x65xpirationNotificationType\x18\x07 \x01(\x0e\x32\x1e.Records.TimerNotificationType\x12\x1a\n\x12rotateOnExpiration\x18\x08 \x01(\x08\"\xea\x01\n\x10SharedFolderTeam\x12\x17\n\x0fsharedFolderUid\x18\x01 \x01(\x0c\x12\x0f\n\x07teamUid\x18\x02 \x01(\x0c\x12\x0c\n\x04name\x18\x03 \x01(\t\x12\x15\n\rmanageRecords\x18\x04 \x01(\x08\x12\x13\n\x0bmanageUsers\x18\x05 \x01(\x08\x12\x12\n\nexpiration\x18\x06 \x01(\x03\x12\x42\n\x1a\x65xpirationNotificationType\x18\x07 \x01(\x0e\x32\x1e.Records.TimerNotificationType\x12\x1a\n\x12rotateOnExpiration\x18\x08 \x01(\x08\"\x8a\x01\n\tKsmChange\x12\x14\n\x0c\x61ppRecordUid\x18\x01 \x01(\x0c\x12\x10\n\x08\x64\x65tailId\x18\x02 \x01(\x0c\x12\x0f\n\x07removed\x18\x03 \x01(\x08\x12\x30\n\rappClientType\x18\x04 \x01(\x0e\x32\x19.Enterprise.AppClientType\x12\x12\n\nexpiration\x18\x05 \x01(\x03\"#\n\x0fShareInvitation\x12\x10\n\x08username\x18\x01 \x01(\t\",\n\x04User\x12\x12\n\naccountUid\x18\x01 \x01(\x0c\x12\x10\n\x08username\x18\x02 \x01(\t\"{\n\x0fSyncDiagnostics\x12\x19\n\x11\x63ontinuationToken\x18\x01 \x01(\x0c\x12\x0e\n\x06userId\x18\x02 \x01(\x05\x12\x18\n\x10\x65nterpriseUserId\x18\x03 \x01(\x03\x12\x10\n\x08syncedTo\x18\x04 \x01(\x03\x12\x11\n\tsyncingTo\x18\x05 \x01(\x03\"\xee\x01\n\x0eRecordRotation\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x10\n\x08revision\x18\x02 \x01(\x03\x12\x18\n\x10\x63onfigurationUid\x18\x03 \x01(\x0c\x12\x10\n\x08schedule\x18\x04 \x01(\t\x12\x15\n\rpwdComplexity\x18\x05 \x01(\x0c\x12\x10\n\x08\x64isabled\x18\x06 \x01(\x08\x12\x13\n\x0bresourceUid\x18\x07 \x01(\x0c\x12\x14\n\x0clastRotation\x18\x08 \x01(\x03\x12\x37\n\x12lastRotationStatus\x18\t \x01(\x0e\x32\x1b.Vault.RecordRotationStatus\"F\n\x11SecurityScoreData\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x12\x10\n\x08revision\x18\x03 \x01(\x03\"3\n\x1d\x42reachWatchGetSyncDataRequest\x12\x12\n\nrecordUids\x18\x01 \x03(\x0c\"\xb3\x01\n\x1e\x42reachWatchGetSyncDataResponse\x12\x34\n\x12\x62reachWatchRecords\x18\x01 \x03(\x0b\x32\x18.Vault.BreachWatchRecord\x12?\n\x17\x62reachWatchSecurityData\x18\x02 \x03(\x0b\x32\x1e.Vault.BreachWatchSecurityData\x12\x1a\n\x05users\x18\x03 \x03(\x0b\x32\x0b.Vault.User\"6\n\x18GetAccountUidMapResponse\x12\x1a\n\x05users\x18\x01 \x03(\x0b\x32\x0b.Vault.User*\"\n\x0b\x43\x61\x63heStatus\x12\x08\n\x04KEEP\x10\x00\x12\t\n\x05\x43LEAR\x10\x01*f\n\x14RecordRotationStatus\x12\x14\n\x10RRST_NOT_ROTATED\x10\x00\x12\x14\n\x10RRST_IN_PROGRESS\x10\x01\x12\x10\n\x0cRRST_SUCCESS\x10\x02\x12\x10\n\x0cRRST_FAILURE\x10\x03\x42!\n\x18\x63om.keepersecurity.protoB\x05Vaultb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'SyncDown_pb2', _globals) -if _descriptor._USE_C_DESCRIPTORS == False: - _globals['DESCRIPTOR']._options = None +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None _globals['DESCRIPTOR']._serialized_options = b'\n\030com.keepersecurity.protoB\005Vault' - _globals['_CACHESTATUS']._serialized_start=6870 - _globals['_CACHESTATUS']._serialized_end=6904 - _globals['_RECORDROTATIONSTATUS']._serialized_start=6906 - _globals['_RECORDROTATIONSTATUS']._serialized_end=7008 - _globals['_SYNCDOWNREQUEST']._serialized_start=120 - _globals['_SYNCDOWNREQUEST']._serialized_end=185 - _globals['_SYNCDOWNRESPONSE']._serialized_start=188 - _globals['_SYNCDOWNRESPONSE']._serialized_end=2365 - _globals['_USERFOLDER']._serialized_start=2368 - _globals['_USERFOLDER']._serialized_end=2514 - _globals['_SHAREDFOLDER']._serialized_start=2517 - _globals['_SHAREDFOLDER']._serialized_end=2858 - _globals['_USERFOLDERSHAREDFOLDER']._serialized_start=2860 - _globals['_USERFOLDERSHAREDFOLDER']._serialized_end=2946 - _globals['_SHAREDFOLDERFOLDER']._serialized_start=2949 - _globals['_SHAREDFOLDERFOLDER']._serialized_end=3136 - _globals['_SHAREDFOLDERKEY']._serialized_start=3138 - _globals['_SHAREDFOLDERKEY']._serialized_end=3246 - _globals['_TEAM']._serialized_start=3249 - _globals['_TEAM']._serialized_end=3572 - _globals['_RECORD']._serialized_start=3575 - _globals['_RECORD']._serialized_end=3766 - _globals['_RECORDLINK']._serialized_start=3768 - _globals['_RECORDLINK']._serialized_end=3866 - _globals['_USERFOLDERRECORD']._serialized_start=3868 - _globals['_USERFOLDERRECORD']._serialized_end=3942 - _globals['_SHAREDFOLDERFOLDERRECORD']._serialized_start=3944 - _globals['_SHAREDFOLDERFOLDERRECORD']._serialized_end=4051 - _globals['_NONSHAREDDATA']._serialized_start=4053 - _globals['_NONSHAREDDATA']._serialized_end=4101 - _globals['_RECORDMETADATA']._serialized_start=4104 - _globals['_RECORDMETADATA']._serialized_end=4391 - _globals['_SHARINGCHANGE']._serialized_start=4393 - _globals['_SHARINGCHANGE']._serialized_end=4443 - _globals['_PROFILE']._serialized_start=4445 - _globals['_PROFILE']._serialized_end=4507 - _globals['_PROFILEPIC']._serialized_start=4509 - _globals['_PROFILEPIC']._serialized_end=4552 - _globals['_PENDINGTEAMMEMBER']._serialized_start=4554 - _globals['_PENDINGTEAMMEMBER']._serialized_end=4666 - _globals['_BREACHWATCHRECORD']._serialized_start=4669 - _globals['_BREACHWATCHRECORD']._serialized_end=4835 - _globals['_USERAUTH']._serialized_start=4838 - _globals['_USERAUTH']._serialized_end=5018 - _globals['_BREACHWATCHSECURITYDATA']._serialized_start=5020 - _globals['_BREACHWATCHSECURITYDATA']._serialized_end=5082 - _globals['_REUSEDPASSWORDS']._serialized_start=5084 - _globals['_REUSEDPASSWORDS']._serialized_end=5134 - _globals['_SHAREDFOLDERRECORD']._serialized_start=5137 - _globals['_SHAREDFOLDERRECORD']._serialized_end=5434 - _globals['_SHAREDFOLDERUSER']._serialized_start=5437 - _globals['_SHAREDFOLDERUSER']._serialized_end=5678 - _globals['_SHAREDFOLDERTEAM']._serialized_start=5681 - _globals['_SHAREDFOLDERTEAM']._serialized_end=5915 - _globals['_KSMCHANGE']._serialized_start=5918 - _globals['_KSMCHANGE']._serialized_end=6056 - _globals['_SHAREINVITATION']._serialized_start=6058 - _globals['_SHAREINVITATION']._serialized_end=6093 - _globals['_USER']._serialized_start=6095 - _globals['_USER']._serialized_end=6139 - _globals['_SYNCDIAGNOSTICS']._serialized_start=6141 - _globals['_SYNCDIAGNOSTICS']._serialized_end=6264 - _globals['_RECORDROTATION']._serialized_start=6267 - _globals['_RECORDROTATION']._serialized_end=6505 - _globals['_SECURITYSCOREDATA']._serialized_start=6507 - _globals['_SECURITYSCOREDATA']._serialized_end=6577 - _globals['_BREACHWATCHGETSYNCDATAREQUEST']._serialized_start=6579 - _globals['_BREACHWATCHGETSYNCDATAREQUEST']._serialized_end=6630 - _globals['_BREACHWATCHGETSYNCDATARESPONSE']._serialized_start=6633 - _globals['_BREACHWATCHGETSYNCDATARESPONSE']._serialized_end=6812 - _globals['_GETACCOUNTUIDMAPRESPONSE']._serialized_start=6814 - _globals['_GETACCOUNTUIDMAPRESPONSE']._serialized_end=6868 + _globals['_CACHESTATUS']._serialized_start=8279 + _globals['_CACHESTATUS']._serialized_end=8313 + _globals['_RECORDROTATIONSTATUS']._serialized_start=8315 + _globals['_RECORDROTATIONSTATUS']._serialized_end=8417 + _globals['_SYNCDOWNREQUEST']._serialized_start=167 + _globals['_SYNCDOWNREQUEST']._serialized_end=247 + _globals['_SYNCDOWNRESPONSE']._serialized_start=250 + _globals['_SYNCDOWNRESPONSE']._serialized_end=2476 + _globals['_DRIVERECORD']._serialized_start=2479 + _globals['_DRIVERECORD']._serialized_end=2631 + _globals['_FOLDERSHARINGSTATE']._serialized_start=2633 + _globals['_FOLDERSHARINGSTATE']._serialized_end=2703 + _globals['_KEEPERDRIVEDATA']._serialized_start=2706 + _globals['_KEEPERDRIVEDATA']._serialized_end=3757 + _globals['_USERFOLDER']._serialized_start=3760 + _globals['_USERFOLDER']._serialized_end=3906 + _globals['_SHAREDFOLDER']._serialized_start=3909 + _globals['_SHAREDFOLDER']._serialized_end=4250 + _globals['_USERFOLDERSHAREDFOLDER']._serialized_start=4252 + _globals['_USERFOLDERSHAREDFOLDER']._serialized_end=4338 + _globals['_SHAREDFOLDERFOLDER']._serialized_start=4341 + _globals['_SHAREDFOLDERFOLDER']._serialized_end=4528 + _globals['_SHAREDFOLDERKEY']._serialized_start=4530 + _globals['_SHAREDFOLDERKEY']._serialized_end=4638 + _globals['_TEAM']._serialized_start=4641 + _globals['_TEAM']._serialized_end=4964 + _globals['_RECORD']._serialized_start=4967 + _globals['_RECORD']._serialized_end=5158 + _globals['_RECORDLINK']._serialized_start=5160 + _globals['_RECORDLINK']._serialized_end=5258 + _globals['_USERFOLDERRECORD']._serialized_start=5260 + _globals['_USERFOLDERRECORD']._serialized_end=5334 + _globals['_SHAREDFOLDERFOLDERRECORD']._serialized_start=5336 + _globals['_SHAREDFOLDERFOLDERRECORD']._serialized_end=5443 + _globals['_NONSHAREDDATA']._serialized_start=5445 + _globals['_NONSHAREDDATA']._serialized_end=5493 + _globals['_RECORDMETADATA']._serialized_start=5496 + _globals['_RECORDMETADATA']._serialized_end=5783 + _globals['_SHARINGCHANGE']._serialized_start=5785 + _globals['_SHARINGCHANGE']._serialized_end=5835 + _globals['_PROFILE']._serialized_start=5837 + _globals['_PROFILE']._serialized_end=5899 + _globals['_PROFILEPIC']._serialized_start=5901 + _globals['_PROFILEPIC']._serialized_end=5944 + _globals['_PENDINGTEAMMEMBER']._serialized_start=5946 + _globals['_PENDINGTEAMMEMBER']._serialized_end=6058 + _globals['_BREACHWATCHRECORD']._serialized_start=6061 + _globals['_BREACHWATCHRECORD']._serialized_end=6227 + _globals['_USERAUTH']._serialized_start=6230 + _globals['_USERAUTH']._serialized_end=6410 + _globals['_BREACHWATCHSECURITYDATA']._serialized_start=6412 + _globals['_BREACHWATCHSECURITYDATA']._serialized_end=6491 + _globals['_REUSEDPASSWORDS']._serialized_start=6493 + _globals['_REUSEDPASSWORDS']._serialized_end=6543 + _globals['_SHAREDFOLDERRECORD']._serialized_start=6546 + _globals['_SHAREDFOLDERRECORD']._serialized_end=6843 + _globals['_SHAREDFOLDERUSER']._serialized_start=6846 + _globals['_SHAREDFOLDERUSER']._serialized_end=7087 + _globals['_SHAREDFOLDERTEAM']._serialized_start=7090 + _globals['_SHAREDFOLDERTEAM']._serialized_end=7324 + _globals['_KSMCHANGE']._serialized_start=7327 + _globals['_KSMCHANGE']._serialized_end=7465 + _globals['_SHAREINVITATION']._serialized_start=7467 + _globals['_SHAREINVITATION']._serialized_end=7502 + _globals['_USER']._serialized_start=7504 + _globals['_USER']._serialized_end=7548 + _globals['_SYNCDIAGNOSTICS']._serialized_start=7550 + _globals['_SYNCDIAGNOSTICS']._serialized_end=7673 + _globals['_RECORDROTATION']._serialized_start=7676 + _globals['_RECORDROTATION']._serialized_end=7914 + _globals['_SECURITYSCOREDATA']._serialized_start=7916 + _globals['_SECURITYSCOREDATA']._serialized_end=7986 + _globals['_BREACHWATCHGETSYNCDATAREQUEST']._serialized_start=7988 + _globals['_BREACHWATCHGETSYNCDATAREQUEST']._serialized_end=8039 + _globals['_BREACHWATCHGETSYNCDATARESPONSE']._serialized_start=8042 + _globals['_BREACHWATCHGETSYNCDATARESPONSE']._serialized_end=8221 + _globals['_GETACCOUNTUIDMAPRESPONSE']._serialized_start=8223 + _globals['_GETACCOUNTUIDMAPRESPONSE']._serialized_end=8277 # @@protoc_insertion_point(module_scope) diff --git a/keepercommander/proto/SyncDown_pb2.pyi b/keepercommander/proto/SyncDown_pb2.pyi index 45e5e2821..de75c9162 100644 --- a/keepercommander/proto/SyncDown_pb2.pyi +++ b/keepercommander/proto/SyncDown_pb2.pyi @@ -3,21 +3,25 @@ import breachwatch_pb2 as _breachwatch_pb2 import APIRequest_pb2 as _APIRequest_pb2 import enterprise_pb2 as _enterprise_pb2 import NotificationCenter_pb2 as _NotificationCenter_pb2 +import dag_pb2 as _dag_pb2 +import folder_pb2 as _folder_pb2 +import record_sharing_pb2 as _record_sharing_pb2 from google.protobuf.internal import containers as _containers from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message -from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union +from collections.abc import Iterable as _Iterable, Mapping as _Mapping +from typing import ClassVar as _ClassVar, Optional as _Optional, Union as _Union DESCRIPTOR: _descriptor.FileDescriptor class CacheStatus(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = [] + __slots__ = () KEEP: _ClassVar[CacheStatus] CLEAR: _ClassVar[CacheStatus] class RecordRotationStatus(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = [] + __slots__ = () RRST_NOT_ROTATED: _ClassVar[RecordRotationStatus] RRST_IN_PROGRESS: _ClassVar[RecordRotationStatus] RRST_SUCCESS: _ClassVar[RecordRotationStatus] @@ -30,15 +34,17 @@ RRST_SUCCESS: RecordRotationStatus RRST_FAILURE: RecordRotationStatus class SyncDownRequest(_message.Message): - __slots__ = ["continuationToken", "dataVersion"] + __slots__ = ("continuationToken", "dataVersion", "debug") CONTINUATIONTOKEN_FIELD_NUMBER: _ClassVar[int] DATAVERSION_FIELD_NUMBER: _ClassVar[int] + DEBUG_FIELD_NUMBER: _ClassVar[int] continuationToken: bytes dataVersion: int - def __init__(self, continuationToken: _Optional[bytes] = ..., dataVersion: _Optional[int] = ...) -> None: ... + debug: bool + def __init__(self, continuationToken: _Optional[bytes] = ..., dataVersion: _Optional[int] = ..., debug: _Optional[bool] = ...) -> None: ... class SyncDownResponse(_message.Message): - __slots__ = ["continuationToken", "hasMore", "cacheStatus", "userFolders", "sharedFolders", "userFolderSharedFolders", "sharedFolderFolders", "records", "recordMetaData", "nonSharedData", "recordLinks", "userFolderRecords", "sharedFolderRecords", "sharedFolderFolderRecords", "sharedFolderUsers", "sharedFolderTeams", "recordAddAuditData", "teams", "sharingChanges", "profile", "profilePic", "pendingTeamMembers", "breachWatchRecords", "userAuths", "breachWatchSecurityData", "reusedPasswords", "removedUserFolders", "removedSharedFolders", "removedUserFolderSharedFolders", "removedSharedFolderFolders", "removedRecords", "removedRecordLinks", "removedUserFolderRecords", "removedSharedFolderRecords", "removedSharedFolderFolderRecords", "removedSharedFolderUsers", "removedSharedFolderTeams", "removedTeams", "ksmAppShares", "ksmAppClients", "shareInvitations", "diagnostics", "recordRotations", "users", "removedUsers", "securityScoreData", "notificationSync"] + __slots__ = ("continuationToken", "hasMore", "cacheStatus", "userFolders", "sharedFolders", "userFolderSharedFolders", "sharedFolderFolders", "records", "recordMetaData", "nonSharedData", "recordLinks", "userFolderRecords", "sharedFolderRecords", "sharedFolderFolderRecords", "sharedFolderUsers", "sharedFolderTeams", "recordAddAuditData", "teams", "sharingChanges", "profile", "profilePic", "pendingTeamMembers", "breachWatchRecords", "userAuths", "breachWatchSecurityData", "reusedPasswords", "removedUserFolders", "removedSharedFolders", "removedUserFolderSharedFolders", "removedSharedFolderFolders", "removedRecords", "removedRecordLinks", "removedUserFolderRecords", "removedSharedFolderRecords", "removedSharedFolderFolderRecords", "removedSharedFolderUsers", "removedSharedFolderTeams", "removedTeams", "ksmAppShares", "ksmAppClients", "shareInvitations", "diagnostics", "recordRotations", "users", "removedUsers", "securityScoreData", "notificationSync", "keeperDriveData") CONTINUATIONTOKEN_FIELD_NUMBER: _ClassVar[int] HASMORE_FIELD_NUMBER: _ClassVar[int] CACHESTATUS_FIELD_NUMBER: _ClassVar[int] @@ -86,6 +92,7 @@ class SyncDownResponse(_message.Message): REMOVEDUSERS_FIELD_NUMBER: _ClassVar[int] SECURITYSCOREDATA_FIELD_NUMBER: _ClassVar[int] NOTIFICATIONSYNC_FIELD_NUMBER: _ClassVar[int] + KEEPERDRIVEDATA_FIELD_NUMBER: _ClassVar[int] continuationToken: bytes hasMore: bool cacheStatus: CacheStatus @@ -133,10 +140,85 @@ class SyncDownResponse(_message.Message): removedUsers: _containers.RepeatedScalarFieldContainer[bytes] securityScoreData: _containers.RepeatedCompositeFieldContainer[SecurityScoreData] notificationSync: _containers.RepeatedCompositeFieldContainer[_NotificationCenter_pb2.NotificationWrapper] - def __init__(self, continuationToken: _Optional[bytes] = ..., hasMore: bool = ..., cacheStatus: _Optional[_Union[CacheStatus, str]] = ..., userFolders: _Optional[_Iterable[_Union[UserFolder, _Mapping]]] = ..., sharedFolders: _Optional[_Iterable[_Union[SharedFolder, _Mapping]]] = ..., userFolderSharedFolders: _Optional[_Iterable[_Union[UserFolderSharedFolder, _Mapping]]] = ..., sharedFolderFolders: _Optional[_Iterable[_Union[SharedFolderFolder, _Mapping]]] = ..., records: _Optional[_Iterable[_Union[Record, _Mapping]]] = ..., recordMetaData: _Optional[_Iterable[_Union[RecordMetaData, _Mapping]]] = ..., nonSharedData: _Optional[_Iterable[_Union[NonSharedData, _Mapping]]] = ..., recordLinks: _Optional[_Iterable[_Union[RecordLink, _Mapping]]] = ..., userFolderRecords: _Optional[_Iterable[_Union[UserFolderRecord, _Mapping]]] = ..., sharedFolderRecords: _Optional[_Iterable[_Union[SharedFolderRecord, _Mapping]]] = ..., sharedFolderFolderRecords: _Optional[_Iterable[_Union[SharedFolderFolderRecord, _Mapping]]] = ..., sharedFolderUsers: _Optional[_Iterable[_Union[SharedFolderUser, _Mapping]]] = ..., sharedFolderTeams: _Optional[_Iterable[_Union[SharedFolderTeam, _Mapping]]] = ..., recordAddAuditData: _Optional[_Iterable[bytes]] = ..., teams: _Optional[_Iterable[_Union[Team, _Mapping]]] = ..., sharingChanges: _Optional[_Iterable[_Union[SharingChange, _Mapping]]] = ..., profile: _Optional[_Union[Profile, _Mapping]] = ..., profilePic: _Optional[_Union[ProfilePic, _Mapping]] = ..., pendingTeamMembers: _Optional[_Iterable[_Union[PendingTeamMember, _Mapping]]] = ..., breachWatchRecords: _Optional[_Iterable[_Union[BreachWatchRecord, _Mapping]]] = ..., userAuths: _Optional[_Iterable[_Union[UserAuth, _Mapping]]] = ..., breachWatchSecurityData: _Optional[_Iterable[_Union[BreachWatchSecurityData, _Mapping]]] = ..., reusedPasswords: _Optional[_Union[ReusedPasswords, _Mapping]] = ..., removedUserFolders: _Optional[_Iterable[bytes]] = ..., removedSharedFolders: _Optional[_Iterable[bytes]] = ..., removedUserFolderSharedFolders: _Optional[_Iterable[_Union[UserFolderSharedFolder, _Mapping]]] = ..., removedSharedFolderFolders: _Optional[_Iterable[_Union[SharedFolderFolder, _Mapping]]] = ..., removedRecords: _Optional[_Iterable[bytes]] = ..., removedRecordLinks: _Optional[_Iterable[_Union[RecordLink, _Mapping]]] = ..., removedUserFolderRecords: _Optional[_Iterable[_Union[UserFolderRecord, _Mapping]]] = ..., removedSharedFolderRecords: _Optional[_Iterable[_Union[SharedFolderRecord, _Mapping]]] = ..., removedSharedFolderFolderRecords: _Optional[_Iterable[_Union[SharedFolderFolderRecord, _Mapping]]] = ..., removedSharedFolderUsers: _Optional[_Iterable[_Union[SharedFolderUser, _Mapping]]] = ..., removedSharedFolderTeams: _Optional[_Iterable[_Union[SharedFolderTeam, _Mapping]]] = ..., removedTeams: _Optional[_Iterable[bytes]] = ..., ksmAppShares: _Optional[_Iterable[_Union[KsmChange, _Mapping]]] = ..., ksmAppClients: _Optional[_Iterable[_Union[KsmChange, _Mapping]]] = ..., shareInvitations: _Optional[_Iterable[_Union[ShareInvitation, _Mapping]]] = ..., diagnostics: _Optional[_Union[SyncDiagnostics, _Mapping]] = ..., recordRotations: _Optional[_Iterable[_Union[RecordRotation, _Mapping]]] = ..., users: _Optional[_Iterable[_Union[User, _Mapping]]] = ..., removedUsers: _Optional[_Iterable[bytes]] = ..., securityScoreData: _Optional[_Iterable[_Union[SecurityScoreData, _Mapping]]] = ..., notificationSync: _Optional[_Iterable[_Union[_NotificationCenter_pb2.NotificationWrapper, _Mapping]]] = ...) -> None: ... + keeperDriveData: KeeperDriveData + def __init__(self, continuationToken: _Optional[bytes] = ..., hasMore: _Optional[bool] = ..., cacheStatus: _Optional[_Union[CacheStatus, str]] = ..., userFolders: _Optional[_Iterable[_Union[UserFolder, _Mapping]]] = ..., sharedFolders: _Optional[_Iterable[_Union[SharedFolder, _Mapping]]] = ..., userFolderSharedFolders: _Optional[_Iterable[_Union[UserFolderSharedFolder, _Mapping]]] = ..., sharedFolderFolders: _Optional[_Iterable[_Union[SharedFolderFolder, _Mapping]]] = ..., records: _Optional[_Iterable[_Union[Record, _Mapping]]] = ..., recordMetaData: _Optional[_Iterable[_Union[RecordMetaData, _Mapping]]] = ..., nonSharedData: _Optional[_Iterable[_Union[NonSharedData, _Mapping]]] = ..., recordLinks: _Optional[_Iterable[_Union[RecordLink, _Mapping]]] = ..., userFolderRecords: _Optional[_Iterable[_Union[UserFolderRecord, _Mapping]]] = ..., sharedFolderRecords: _Optional[_Iterable[_Union[SharedFolderRecord, _Mapping]]] = ..., sharedFolderFolderRecords: _Optional[_Iterable[_Union[SharedFolderFolderRecord, _Mapping]]] = ..., sharedFolderUsers: _Optional[_Iterable[_Union[SharedFolderUser, _Mapping]]] = ..., sharedFolderTeams: _Optional[_Iterable[_Union[SharedFolderTeam, _Mapping]]] = ..., recordAddAuditData: _Optional[_Iterable[bytes]] = ..., teams: _Optional[_Iterable[_Union[Team, _Mapping]]] = ..., sharingChanges: _Optional[_Iterable[_Union[SharingChange, _Mapping]]] = ..., profile: _Optional[_Union[Profile, _Mapping]] = ..., profilePic: _Optional[_Union[ProfilePic, _Mapping]] = ..., pendingTeamMembers: _Optional[_Iterable[_Union[PendingTeamMember, _Mapping]]] = ..., breachWatchRecords: _Optional[_Iterable[_Union[BreachWatchRecord, _Mapping]]] = ..., userAuths: _Optional[_Iterable[_Union[UserAuth, _Mapping]]] = ..., breachWatchSecurityData: _Optional[_Iterable[_Union[BreachWatchSecurityData, _Mapping]]] = ..., reusedPasswords: _Optional[_Union[ReusedPasswords, _Mapping]] = ..., removedUserFolders: _Optional[_Iterable[bytes]] = ..., removedSharedFolders: _Optional[_Iterable[bytes]] = ..., removedUserFolderSharedFolders: _Optional[_Iterable[_Union[UserFolderSharedFolder, _Mapping]]] = ..., removedSharedFolderFolders: _Optional[_Iterable[_Union[SharedFolderFolder, _Mapping]]] = ..., removedRecords: _Optional[_Iterable[bytes]] = ..., removedRecordLinks: _Optional[_Iterable[_Union[RecordLink, _Mapping]]] = ..., removedUserFolderRecords: _Optional[_Iterable[_Union[UserFolderRecord, _Mapping]]] = ..., removedSharedFolderRecords: _Optional[_Iterable[_Union[SharedFolderRecord, _Mapping]]] = ..., removedSharedFolderFolderRecords: _Optional[_Iterable[_Union[SharedFolderFolderRecord, _Mapping]]] = ..., removedSharedFolderUsers: _Optional[_Iterable[_Union[SharedFolderUser, _Mapping]]] = ..., removedSharedFolderTeams: _Optional[_Iterable[_Union[SharedFolderTeam, _Mapping]]] = ..., removedTeams: _Optional[_Iterable[bytes]] = ..., ksmAppShares: _Optional[_Iterable[_Union[KsmChange, _Mapping]]] = ..., ksmAppClients: _Optional[_Iterable[_Union[KsmChange, _Mapping]]] = ..., shareInvitations: _Optional[_Iterable[_Union[ShareInvitation, _Mapping]]] = ..., diagnostics: _Optional[_Union[SyncDiagnostics, _Mapping]] = ..., recordRotations: _Optional[_Iterable[_Union[RecordRotation, _Mapping]]] = ..., users: _Optional[_Iterable[_Union[User, _Mapping]]] = ..., removedUsers: _Optional[_Iterable[bytes]] = ..., securityScoreData: _Optional[_Iterable[_Union[SecurityScoreData, _Mapping]]] = ..., notificationSync: _Optional[_Iterable[_Union[_NotificationCenter_pb2.NotificationWrapper, _Mapping]]] = ..., keeperDriveData: _Optional[_Union[KeeperDriveData, _Mapping]] = ...) -> None: ... + +class DriveRecord(_message.Message): + __slots__ = ("recordUid", "revision", "version", "shared", "clientModifiedTime", "fileSize", "thumbnailSize") + RECORDUID_FIELD_NUMBER: _ClassVar[int] + REVISION_FIELD_NUMBER: _ClassVar[int] + VERSION_FIELD_NUMBER: _ClassVar[int] + SHARED_FIELD_NUMBER: _ClassVar[int] + CLIENTMODIFIEDTIME_FIELD_NUMBER: _ClassVar[int] + FILESIZE_FIELD_NUMBER: _ClassVar[int] + THUMBNAILSIZE_FIELD_NUMBER: _ClassVar[int] + recordUid: bytes + revision: int + version: int + shared: bool + clientModifiedTime: int + fileSize: int + thumbnailSize: int + def __init__(self, recordUid: _Optional[bytes] = ..., revision: _Optional[int] = ..., version: _Optional[int] = ..., shared: _Optional[bool] = ..., clientModifiedTime: _Optional[int] = ..., fileSize: _Optional[int] = ..., thumbnailSize: _Optional[int] = ...) -> None: ... + +class FolderSharingState(_message.Message): + __slots__ = ("folderUid", "shared", "count") + FOLDERUID_FIELD_NUMBER: _ClassVar[int] + SHARED_FIELD_NUMBER: _ClassVar[int] + COUNT_FIELD_NUMBER: _ClassVar[int] + folderUid: bytes + shared: bool + count: int + def __init__(self, folderUid: _Optional[bytes] = ..., shared: _Optional[bool] = ..., count: _Optional[int] = ...) -> None: ... + +class KeeperDriveData(_message.Message): + __slots__ = ("folders", "folderKeys", "folderAccesses", "revokedFolderAccesses", "recordData", "nonSharedData", "recordAccesses", "revokedRecordAccesses", "recordSharingStates", "recordLinks", "removedRecordLinks", "breachWatchRecords", "securityScoreData", "breachWatchSecurityData", "removedFolders", "removedFolderRecords", "folderRecords", "recordRotationData", "records", "folderSharingState", "rawDagData") + FOLDERS_FIELD_NUMBER: _ClassVar[int] + FOLDERKEYS_FIELD_NUMBER: _ClassVar[int] + FOLDERACCESSES_FIELD_NUMBER: _ClassVar[int] + REVOKEDFOLDERACCESSES_FIELD_NUMBER: _ClassVar[int] + RECORDDATA_FIELD_NUMBER: _ClassVar[int] + NONSHAREDDATA_FIELD_NUMBER: _ClassVar[int] + RECORDACCESSES_FIELD_NUMBER: _ClassVar[int] + REVOKEDRECORDACCESSES_FIELD_NUMBER: _ClassVar[int] + RECORDSHARINGSTATES_FIELD_NUMBER: _ClassVar[int] + RECORDLINKS_FIELD_NUMBER: _ClassVar[int] + REMOVEDRECORDLINKS_FIELD_NUMBER: _ClassVar[int] + BREACHWATCHRECORDS_FIELD_NUMBER: _ClassVar[int] + SECURITYSCOREDATA_FIELD_NUMBER: _ClassVar[int] + BREACHWATCHSECURITYDATA_FIELD_NUMBER: _ClassVar[int] + REMOVEDFOLDERS_FIELD_NUMBER: _ClassVar[int] + REMOVEDFOLDERRECORDS_FIELD_NUMBER: _ClassVar[int] + FOLDERRECORDS_FIELD_NUMBER: _ClassVar[int] + RECORDROTATIONDATA_FIELD_NUMBER: _ClassVar[int] + RECORDS_FIELD_NUMBER: _ClassVar[int] + FOLDERSHARINGSTATE_FIELD_NUMBER: _ClassVar[int] + RAWDAGDATA_FIELD_NUMBER: _ClassVar[int] + folders: _containers.RepeatedCompositeFieldContainer[_folder_pb2.FolderData] + folderKeys: _containers.RepeatedCompositeFieldContainer[_folder_pb2.FolderKey] + folderAccesses: _containers.RepeatedCompositeFieldContainer[_folder_pb2.FolderAccessData] + revokedFolderAccesses: _containers.RepeatedCompositeFieldContainer[_folder_pb2.RevokedAccess] + recordData: _containers.RepeatedCompositeFieldContainer[_folder_pb2.RecordData] + nonSharedData: _containers.RepeatedCompositeFieldContainer[NonSharedData] + recordAccesses: _containers.RepeatedCompositeFieldContainer[_folder_pb2.RecordAccessData] + revokedRecordAccesses: _containers.RepeatedCompositeFieldContainer[_record_sharing_pb2.RevokedAccess] + recordSharingStates: _containers.RepeatedCompositeFieldContainer[_record_sharing_pb2.RecordSharingState] + recordLinks: _containers.RepeatedCompositeFieldContainer[RecordLink] + removedRecordLinks: _containers.RepeatedCompositeFieldContainer[RecordLink] + breachWatchRecords: _containers.RepeatedCompositeFieldContainer[BreachWatchRecord] + securityScoreData: _containers.RepeatedCompositeFieldContainer[SecurityScoreData] + breachWatchSecurityData: _containers.RepeatedCompositeFieldContainer[BreachWatchSecurityData] + removedFolders: _containers.RepeatedCompositeFieldContainer[_folder_pb2.FolderRemoved] + removedFolderRecords: _containers.RepeatedCompositeFieldContainer[_record_pb2.FolderRecordKey] + folderRecords: _containers.RepeatedCompositeFieldContainer[_folder_pb2.FolderRecord] + recordRotationData: _containers.RepeatedCompositeFieldContainer[RecordRotation] + records: _containers.RepeatedCompositeFieldContainer[DriveRecord] + folderSharingState: _containers.RepeatedCompositeFieldContainer[FolderSharingState] + rawDagData: _containers.RepeatedCompositeFieldContainer[_dag_pb2.DebugData] + def __init__(self, folders: _Optional[_Iterable[_Union[_folder_pb2.FolderData, _Mapping]]] = ..., folderKeys: _Optional[_Iterable[_Union[_folder_pb2.FolderKey, _Mapping]]] = ..., folderAccesses: _Optional[_Iterable[_Union[_folder_pb2.FolderAccessData, _Mapping]]] = ..., revokedFolderAccesses: _Optional[_Iterable[_Union[_folder_pb2.RevokedAccess, _Mapping]]] = ..., recordData: _Optional[_Iterable[_Union[_folder_pb2.RecordData, _Mapping]]] = ..., nonSharedData: _Optional[_Iterable[_Union[NonSharedData, _Mapping]]] = ..., recordAccesses: _Optional[_Iterable[_Union[_folder_pb2.RecordAccessData, _Mapping]]] = ..., revokedRecordAccesses: _Optional[_Iterable[_Union[_record_sharing_pb2.RevokedAccess, _Mapping]]] = ..., recordSharingStates: _Optional[_Iterable[_Union[_record_sharing_pb2.RecordSharingState, _Mapping]]] = ..., recordLinks: _Optional[_Iterable[_Union[RecordLink, _Mapping]]] = ..., removedRecordLinks: _Optional[_Iterable[_Union[RecordLink, _Mapping]]] = ..., breachWatchRecords: _Optional[_Iterable[_Union[BreachWatchRecord, _Mapping]]] = ..., securityScoreData: _Optional[_Iterable[_Union[SecurityScoreData, _Mapping]]] = ..., breachWatchSecurityData: _Optional[_Iterable[_Union[BreachWatchSecurityData, _Mapping]]] = ..., removedFolders: _Optional[_Iterable[_Union[_folder_pb2.FolderRemoved, _Mapping]]] = ..., removedFolderRecords: _Optional[_Iterable[_Union[_record_pb2.FolderRecordKey, _Mapping]]] = ..., folderRecords: _Optional[_Iterable[_Union[_folder_pb2.FolderRecord, _Mapping]]] = ..., recordRotationData: _Optional[_Iterable[_Union[RecordRotation, _Mapping]]] = ..., records: _Optional[_Iterable[_Union[DriveRecord, _Mapping]]] = ..., folderSharingState: _Optional[_Iterable[_Union[FolderSharingState, _Mapping]]] = ..., rawDagData: _Optional[_Iterable[_Union[_dag_pb2.DebugData, _Mapping]]] = ...) -> None: ... class UserFolder(_message.Message): - __slots__ = ["folderUid", "parentUid", "userFolderKey", "keyType", "revision", "data"] + __slots__ = ("folderUid", "parentUid", "userFolderKey", "keyType", "revision", "data") FOLDERUID_FIELD_NUMBER: _ClassVar[int] PARENTUID_FIELD_NUMBER: _ClassVar[int] USERFOLDERKEY_FIELD_NUMBER: _ClassVar[int] @@ -152,7 +234,7 @@ class UserFolder(_message.Message): def __init__(self, folderUid: _Optional[bytes] = ..., parentUid: _Optional[bytes] = ..., userFolderKey: _Optional[bytes] = ..., keyType: _Optional[_Union[_record_pb2.RecordKeyType, str]] = ..., revision: _Optional[int] = ..., data: _Optional[bytes] = ...) -> None: ... class SharedFolder(_message.Message): - __slots__ = ["sharedFolderUid", "revision", "sharedFolderKey", "keyType", "data", "defaultManageRecords", "defaultManageUsers", "defaultCanEdit", "defaultCanReshare", "cacheStatus", "owner", "ownerAccountUid", "name"] + __slots__ = ("sharedFolderUid", "revision", "sharedFolderKey", "keyType", "data", "defaultManageRecords", "defaultManageUsers", "defaultCanEdit", "defaultCanReshare", "cacheStatus", "owner", "ownerAccountUid", "name") SHAREDFOLDERUID_FIELD_NUMBER: _ClassVar[int] REVISION_FIELD_NUMBER: _ClassVar[int] SHAREDFOLDERKEY_FIELD_NUMBER: _ClassVar[int] @@ -179,10 +261,10 @@ class SharedFolder(_message.Message): owner: str ownerAccountUid: bytes name: bytes - def __init__(self, sharedFolderUid: _Optional[bytes] = ..., revision: _Optional[int] = ..., sharedFolderKey: _Optional[bytes] = ..., keyType: _Optional[_Union[_record_pb2.RecordKeyType, str]] = ..., data: _Optional[bytes] = ..., defaultManageRecords: bool = ..., defaultManageUsers: bool = ..., defaultCanEdit: bool = ..., defaultCanReshare: bool = ..., cacheStatus: _Optional[_Union[CacheStatus, str]] = ..., owner: _Optional[str] = ..., ownerAccountUid: _Optional[bytes] = ..., name: _Optional[bytes] = ...) -> None: ... + def __init__(self, sharedFolderUid: _Optional[bytes] = ..., revision: _Optional[int] = ..., sharedFolderKey: _Optional[bytes] = ..., keyType: _Optional[_Union[_record_pb2.RecordKeyType, str]] = ..., data: _Optional[bytes] = ..., defaultManageRecords: _Optional[bool] = ..., defaultManageUsers: _Optional[bool] = ..., defaultCanEdit: _Optional[bool] = ..., defaultCanReshare: _Optional[bool] = ..., cacheStatus: _Optional[_Union[CacheStatus, str]] = ..., owner: _Optional[str] = ..., ownerAccountUid: _Optional[bytes] = ..., name: _Optional[bytes] = ...) -> None: ... class UserFolderSharedFolder(_message.Message): - __slots__ = ["folderUid", "sharedFolderUid", "revision"] + __slots__ = ("folderUid", "sharedFolderUid", "revision") FOLDERUID_FIELD_NUMBER: _ClassVar[int] SHAREDFOLDERUID_FIELD_NUMBER: _ClassVar[int] REVISION_FIELD_NUMBER: _ClassVar[int] @@ -192,7 +274,7 @@ class UserFolderSharedFolder(_message.Message): def __init__(self, folderUid: _Optional[bytes] = ..., sharedFolderUid: _Optional[bytes] = ..., revision: _Optional[int] = ...) -> None: ... class SharedFolderFolder(_message.Message): - __slots__ = ["sharedFolderUid", "folderUid", "parentUid", "sharedFolderFolderKey", "keyType", "revision", "data"] + __slots__ = ("sharedFolderUid", "folderUid", "parentUid", "sharedFolderFolderKey", "keyType", "revision", "data") SHAREDFOLDERUID_FIELD_NUMBER: _ClassVar[int] FOLDERUID_FIELD_NUMBER: _ClassVar[int] PARENTUID_FIELD_NUMBER: _ClassVar[int] @@ -210,7 +292,7 @@ class SharedFolderFolder(_message.Message): def __init__(self, sharedFolderUid: _Optional[bytes] = ..., folderUid: _Optional[bytes] = ..., parentUid: _Optional[bytes] = ..., sharedFolderFolderKey: _Optional[bytes] = ..., keyType: _Optional[_Union[_record_pb2.RecordKeyType, str]] = ..., revision: _Optional[int] = ..., data: _Optional[bytes] = ...) -> None: ... class SharedFolderKey(_message.Message): - __slots__ = ["sharedFolderUid", "sharedFolderKey", "keyType"] + __slots__ = ("sharedFolderUid", "sharedFolderKey", "keyType") SHAREDFOLDERUID_FIELD_NUMBER: _ClassVar[int] SHAREDFOLDERKEY_FIELD_NUMBER: _ClassVar[int] KEYTYPE_FIELD_NUMBER: _ClassVar[int] @@ -220,7 +302,7 @@ class SharedFolderKey(_message.Message): def __init__(self, sharedFolderUid: _Optional[bytes] = ..., sharedFolderKey: _Optional[bytes] = ..., keyType: _Optional[_Union[_record_pb2.RecordKeyType, str]] = ...) -> None: ... class Team(_message.Message): - __slots__ = ["teamUid", "name", "teamKey", "teamKeyType", "teamPrivateKey", "restrictEdit", "restrictShare", "restrictView", "removedSharedFolders", "sharedFolderKeys", "teamEccPrivateKey", "teamEccPublicKey"] + __slots__ = ("teamUid", "name", "teamKey", "teamKeyType", "teamPrivateKey", "restrictEdit", "restrictShare", "restrictView", "removedSharedFolders", "sharedFolderKeys", "teamEccPrivateKey", "teamEccPublicKey") TEAMUID_FIELD_NUMBER: _ClassVar[int] NAME_FIELD_NUMBER: _ClassVar[int] TEAMKEY_FIELD_NUMBER: _ClassVar[int] @@ -245,10 +327,10 @@ class Team(_message.Message): sharedFolderKeys: _containers.RepeatedCompositeFieldContainer[SharedFolderKey] teamEccPrivateKey: bytes teamEccPublicKey: bytes - def __init__(self, teamUid: _Optional[bytes] = ..., name: _Optional[str] = ..., teamKey: _Optional[bytes] = ..., teamKeyType: _Optional[_Union[_record_pb2.RecordKeyType, str]] = ..., teamPrivateKey: _Optional[bytes] = ..., restrictEdit: bool = ..., restrictShare: bool = ..., restrictView: bool = ..., removedSharedFolders: _Optional[_Iterable[bytes]] = ..., sharedFolderKeys: _Optional[_Iterable[_Union[SharedFolderKey, _Mapping]]] = ..., teamEccPrivateKey: _Optional[bytes] = ..., teamEccPublicKey: _Optional[bytes] = ...) -> None: ... + def __init__(self, teamUid: _Optional[bytes] = ..., name: _Optional[str] = ..., teamKey: _Optional[bytes] = ..., teamKeyType: _Optional[_Union[_record_pb2.RecordKeyType, str]] = ..., teamPrivateKey: _Optional[bytes] = ..., restrictEdit: _Optional[bool] = ..., restrictShare: _Optional[bool] = ..., restrictView: _Optional[bool] = ..., removedSharedFolders: _Optional[_Iterable[bytes]] = ..., sharedFolderKeys: _Optional[_Iterable[_Union[SharedFolderKey, _Mapping]]] = ..., teamEccPrivateKey: _Optional[bytes] = ..., teamEccPublicKey: _Optional[bytes] = ...) -> None: ... class Record(_message.Message): - __slots__ = ["recordUid", "revision", "version", "shared", "clientModifiedTime", "data", "extra", "udata", "fileSize", "thumbnailSize"] + __slots__ = ("recordUid", "revision", "version", "shared", "clientModifiedTime", "data", "extra", "udata", "fileSize", "thumbnailSize") RECORDUID_FIELD_NUMBER: _ClassVar[int] REVISION_FIELD_NUMBER: _ClassVar[int] VERSION_FIELD_NUMBER: _ClassVar[int] @@ -269,10 +351,10 @@ class Record(_message.Message): udata: str fileSize: int thumbnailSize: int - def __init__(self, recordUid: _Optional[bytes] = ..., revision: _Optional[int] = ..., version: _Optional[int] = ..., shared: bool = ..., clientModifiedTime: _Optional[int] = ..., data: _Optional[bytes] = ..., extra: _Optional[bytes] = ..., udata: _Optional[str] = ..., fileSize: _Optional[int] = ..., thumbnailSize: _Optional[int] = ...) -> None: ... + def __init__(self, recordUid: _Optional[bytes] = ..., revision: _Optional[int] = ..., version: _Optional[int] = ..., shared: _Optional[bool] = ..., clientModifiedTime: _Optional[int] = ..., data: _Optional[bytes] = ..., extra: _Optional[bytes] = ..., udata: _Optional[str] = ..., fileSize: _Optional[int] = ..., thumbnailSize: _Optional[int] = ...) -> None: ... class RecordLink(_message.Message): - __slots__ = ["parentRecordUid", "childRecordUid", "recordKey", "revision"] + __slots__ = ("parentRecordUid", "childRecordUid", "recordKey", "revision") PARENTRECORDUID_FIELD_NUMBER: _ClassVar[int] CHILDRECORDUID_FIELD_NUMBER: _ClassVar[int] RECORDKEY_FIELD_NUMBER: _ClassVar[int] @@ -284,7 +366,7 @@ class RecordLink(_message.Message): def __init__(self, parentRecordUid: _Optional[bytes] = ..., childRecordUid: _Optional[bytes] = ..., recordKey: _Optional[bytes] = ..., revision: _Optional[int] = ...) -> None: ... class UserFolderRecord(_message.Message): - __slots__ = ["folderUid", "recordUid", "revision"] + __slots__ = ("folderUid", "recordUid", "revision") FOLDERUID_FIELD_NUMBER: _ClassVar[int] RECORDUID_FIELD_NUMBER: _ClassVar[int] REVISION_FIELD_NUMBER: _ClassVar[int] @@ -294,7 +376,7 @@ class UserFolderRecord(_message.Message): def __init__(self, folderUid: _Optional[bytes] = ..., recordUid: _Optional[bytes] = ..., revision: _Optional[int] = ...) -> None: ... class SharedFolderFolderRecord(_message.Message): - __slots__ = ["sharedFolderUid", "folderUid", "recordUid", "revision"] + __slots__ = ("sharedFolderUid", "folderUid", "recordUid", "revision") SHAREDFOLDERUID_FIELD_NUMBER: _ClassVar[int] FOLDERUID_FIELD_NUMBER: _ClassVar[int] RECORDUID_FIELD_NUMBER: _ClassVar[int] @@ -306,7 +388,7 @@ class SharedFolderFolderRecord(_message.Message): def __init__(self, sharedFolderUid: _Optional[bytes] = ..., folderUid: _Optional[bytes] = ..., recordUid: _Optional[bytes] = ..., revision: _Optional[int] = ...) -> None: ... class NonSharedData(_message.Message): - __slots__ = ["recordUid", "data"] + __slots__ = ("recordUid", "data") RECORDUID_FIELD_NUMBER: _ClassVar[int] DATA_FIELD_NUMBER: _ClassVar[int] recordUid: bytes @@ -314,7 +396,7 @@ class NonSharedData(_message.Message): def __init__(self, recordUid: _Optional[bytes] = ..., data: _Optional[bytes] = ...) -> None: ... class RecordMetaData(_message.Message): - __slots__ = ["recordUid", "owner", "recordKey", "recordKeyType", "canShare", "canEdit", "ownerAccountUid", "expiration", "expirationNotificationType", "ownerUsername"] + __slots__ = ("recordUid", "owner", "recordKey", "recordKeyType", "canShare", "canEdit", "ownerAccountUid", "expiration", "expirationNotificationType", "ownerUsername") RECORDUID_FIELD_NUMBER: _ClassVar[int] OWNER_FIELD_NUMBER: _ClassVar[int] RECORDKEY_FIELD_NUMBER: _ClassVar[int] @@ -335,18 +417,18 @@ class RecordMetaData(_message.Message): expiration: int expirationNotificationType: _record_pb2.TimerNotificationType ownerUsername: str - def __init__(self, recordUid: _Optional[bytes] = ..., owner: bool = ..., recordKey: _Optional[bytes] = ..., recordKeyType: _Optional[_Union[_record_pb2.RecordKeyType, str]] = ..., canShare: bool = ..., canEdit: bool = ..., ownerAccountUid: _Optional[bytes] = ..., expiration: _Optional[int] = ..., expirationNotificationType: _Optional[_Union[_record_pb2.TimerNotificationType, str]] = ..., ownerUsername: _Optional[str] = ...) -> None: ... + def __init__(self, recordUid: _Optional[bytes] = ..., owner: _Optional[bool] = ..., recordKey: _Optional[bytes] = ..., recordKeyType: _Optional[_Union[_record_pb2.RecordKeyType, str]] = ..., canShare: _Optional[bool] = ..., canEdit: _Optional[bool] = ..., ownerAccountUid: _Optional[bytes] = ..., expiration: _Optional[int] = ..., expirationNotificationType: _Optional[_Union[_record_pb2.TimerNotificationType, str]] = ..., ownerUsername: _Optional[str] = ...) -> None: ... class SharingChange(_message.Message): - __slots__ = ["recordUid", "shared"] + __slots__ = ("recordUid", "shared") RECORDUID_FIELD_NUMBER: _ClassVar[int] SHARED_FIELD_NUMBER: _ClassVar[int] recordUid: bytes shared: bool - def __init__(self, recordUid: _Optional[bytes] = ..., shared: bool = ...) -> None: ... + def __init__(self, recordUid: _Optional[bytes] = ..., shared: _Optional[bool] = ...) -> None: ... class Profile(_message.Message): - __slots__ = ["data", "profileName", "revision"] + __slots__ = ("data", "profileName", "revision") DATA_FIELD_NUMBER: _ClassVar[int] PROFILENAME_FIELD_NUMBER: _ClassVar[int] REVISION_FIELD_NUMBER: _ClassVar[int] @@ -356,7 +438,7 @@ class Profile(_message.Message): def __init__(self, data: _Optional[bytes] = ..., profileName: _Optional[str] = ..., revision: _Optional[int] = ...) -> None: ... class ProfilePic(_message.Message): - __slots__ = ["url", "revision"] + __slots__ = ("url", "revision") URL_FIELD_NUMBER: _ClassVar[int] REVISION_FIELD_NUMBER: _ClassVar[int] url: str @@ -364,7 +446,7 @@ class ProfilePic(_message.Message): def __init__(self, url: _Optional[str] = ..., revision: _Optional[int] = ...) -> None: ... class PendingTeamMember(_message.Message): - __slots__ = ["enterpriseUserId", "userPublicKey", "teamUids", "userEccPublicKey"] + __slots__ = ("enterpriseUserId", "userPublicKey", "teamUids", "userEccPublicKey") ENTERPRISEUSERID_FIELD_NUMBER: _ClassVar[int] USERPUBLICKEY_FIELD_NUMBER: _ClassVar[int] TEAMUIDS_FIELD_NUMBER: _ClassVar[int] @@ -376,7 +458,7 @@ class PendingTeamMember(_message.Message): def __init__(self, enterpriseUserId: _Optional[int] = ..., userPublicKey: _Optional[bytes] = ..., teamUids: _Optional[_Iterable[bytes]] = ..., userEccPublicKey: _Optional[bytes] = ...) -> None: ... class BreachWatchRecord(_message.Message): - __slots__ = ["recordUid", "data", "type", "scannedBy", "revision", "scannedByAccountUid"] + __slots__ = ("recordUid", "data", "type", "scannedBy", "revision", "scannedByAccountUid") RECORDUID_FIELD_NUMBER: _ClassVar[int] DATA_FIELD_NUMBER: _ClassVar[int] TYPE_FIELD_NUMBER: _ClassVar[int] @@ -392,7 +474,7 @@ class BreachWatchRecord(_message.Message): def __init__(self, recordUid: _Optional[bytes] = ..., data: _Optional[bytes] = ..., type: _Optional[_Union[_breachwatch_pb2.BreachWatchInfoType, str]] = ..., scannedBy: _Optional[str] = ..., revision: _Optional[int] = ..., scannedByAccountUid: _Optional[bytes] = ...) -> None: ... class UserAuth(_message.Message): - __slots__ = ["uid", "loginType", "deleted", "iterations", "salt", "encryptedClientKey", "revision", "name"] + __slots__ = ("uid", "loginType", "deleted", "iterations", "salt", "encryptedClientKey", "revision", "name") UID_FIELD_NUMBER: _ClassVar[int] LOGINTYPE_FIELD_NUMBER: _ClassVar[int] DELETED_FIELD_NUMBER: _ClassVar[int] @@ -409,18 +491,20 @@ class UserAuth(_message.Message): encryptedClientKey: bytes revision: int name: str - def __init__(self, uid: _Optional[bytes] = ..., loginType: _Optional[_Union[_APIRequest_pb2.LoginType, str]] = ..., deleted: bool = ..., iterations: _Optional[int] = ..., salt: _Optional[bytes] = ..., encryptedClientKey: _Optional[bytes] = ..., revision: _Optional[int] = ..., name: _Optional[str] = ...) -> None: ... + def __init__(self, uid: _Optional[bytes] = ..., loginType: _Optional[_Union[_APIRequest_pb2.LoginType, str]] = ..., deleted: _Optional[bool] = ..., iterations: _Optional[int] = ..., salt: _Optional[bytes] = ..., encryptedClientKey: _Optional[bytes] = ..., revision: _Optional[int] = ..., name: _Optional[str] = ...) -> None: ... class BreachWatchSecurityData(_message.Message): - __slots__ = ["recordUid", "revision"] + __slots__ = ("recordUid", "revision", "removed") RECORDUID_FIELD_NUMBER: _ClassVar[int] REVISION_FIELD_NUMBER: _ClassVar[int] + REMOVED_FIELD_NUMBER: _ClassVar[int] recordUid: bytes revision: int - def __init__(self, recordUid: _Optional[bytes] = ..., revision: _Optional[int] = ...) -> None: ... + removed: bool + def __init__(self, recordUid: _Optional[bytes] = ..., revision: _Optional[int] = ..., removed: _Optional[bool] = ...) -> None: ... class ReusedPasswords(_message.Message): - __slots__ = ["count", "revision"] + __slots__ = ("count", "revision") COUNT_FIELD_NUMBER: _ClassVar[int] REVISION_FIELD_NUMBER: _ClassVar[int] count: int @@ -428,7 +512,7 @@ class ReusedPasswords(_message.Message): def __init__(self, count: _Optional[int] = ..., revision: _Optional[int] = ...) -> None: ... class SharedFolderRecord(_message.Message): - __slots__ = ["sharedFolderUid", "recordUid", "recordKey", "canShare", "canEdit", "ownerAccountUid", "expiration", "owner", "expirationNotificationType", "ownerUsername", "rotateOnExpiration"] + __slots__ = ("sharedFolderUid", "recordUid", "recordKey", "canShare", "canEdit", "ownerAccountUid", "expiration", "owner", "expirationNotificationType", "ownerUsername", "rotateOnExpiration") SHAREDFOLDERUID_FIELD_NUMBER: _ClassVar[int] RECORDUID_FIELD_NUMBER: _ClassVar[int] RECORDKEY_FIELD_NUMBER: _ClassVar[int] @@ -451,10 +535,10 @@ class SharedFolderRecord(_message.Message): expirationNotificationType: _record_pb2.TimerNotificationType ownerUsername: str rotateOnExpiration: bool - def __init__(self, sharedFolderUid: _Optional[bytes] = ..., recordUid: _Optional[bytes] = ..., recordKey: _Optional[bytes] = ..., canShare: bool = ..., canEdit: bool = ..., ownerAccountUid: _Optional[bytes] = ..., expiration: _Optional[int] = ..., owner: bool = ..., expirationNotificationType: _Optional[_Union[_record_pb2.TimerNotificationType, str]] = ..., ownerUsername: _Optional[str] = ..., rotateOnExpiration: bool = ...) -> None: ... + def __init__(self, sharedFolderUid: _Optional[bytes] = ..., recordUid: _Optional[bytes] = ..., recordKey: _Optional[bytes] = ..., canShare: _Optional[bool] = ..., canEdit: _Optional[bool] = ..., ownerAccountUid: _Optional[bytes] = ..., expiration: _Optional[int] = ..., owner: _Optional[bool] = ..., expirationNotificationType: _Optional[_Union[_record_pb2.TimerNotificationType, str]] = ..., ownerUsername: _Optional[str] = ..., rotateOnExpiration: _Optional[bool] = ...) -> None: ... class SharedFolderUser(_message.Message): - __slots__ = ["sharedFolderUid", "username", "manageRecords", "manageUsers", "accountUid", "expiration", "expirationNotificationType", "rotateOnExpiration"] + __slots__ = ("sharedFolderUid", "username", "manageRecords", "manageUsers", "accountUid", "expiration", "expirationNotificationType", "rotateOnExpiration") SHAREDFOLDERUID_FIELD_NUMBER: _ClassVar[int] USERNAME_FIELD_NUMBER: _ClassVar[int] MANAGERECORDS_FIELD_NUMBER: _ClassVar[int] @@ -471,10 +555,10 @@ class SharedFolderUser(_message.Message): expiration: int expirationNotificationType: _record_pb2.TimerNotificationType rotateOnExpiration: bool - def __init__(self, sharedFolderUid: _Optional[bytes] = ..., username: _Optional[str] = ..., manageRecords: bool = ..., manageUsers: bool = ..., accountUid: _Optional[bytes] = ..., expiration: _Optional[int] = ..., expirationNotificationType: _Optional[_Union[_record_pb2.TimerNotificationType, str]] = ..., rotateOnExpiration: bool = ...) -> None: ... + def __init__(self, sharedFolderUid: _Optional[bytes] = ..., username: _Optional[str] = ..., manageRecords: _Optional[bool] = ..., manageUsers: _Optional[bool] = ..., accountUid: _Optional[bytes] = ..., expiration: _Optional[int] = ..., expirationNotificationType: _Optional[_Union[_record_pb2.TimerNotificationType, str]] = ..., rotateOnExpiration: _Optional[bool] = ...) -> None: ... class SharedFolderTeam(_message.Message): - __slots__ = ["sharedFolderUid", "teamUid", "name", "manageRecords", "manageUsers", "expiration", "expirationNotificationType", "rotateOnExpiration"] + __slots__ = ("sharedFolderUid", "teamUid", "name", "manageRecords", "manageUsers", "expiration", "expirationNotificationType", "rotateOnExpiration") SHAREDFOLDERUID_FIELD_NUMBER: _ClassVar[int] TEAMUID_FIELD_NUMBER: _ClassVar[int] NAME_FIELD_NUMBER: _ClassVar[int] @@ -491,10 +575,10 @@ class SharedFolderTeam(_message.Message): expiration: int expirationNotificationType: _record_pb2.TimerNotificationType rotateOnExpiration: bool - def __init__(self, sharedFolderUid: _Optional[bytes] = ..., teamUid: _Optional[bytes] = ..., name: _Optional[str] = ..., manageRecords: bool = ..., manageUsers: bool = ..., expiration: _Optional[int] = ..., expirationNotificationType: _Optional[_Union[_record_pb2.TimerNotificationType, str]] = ..., rotateOnExpiration: bool = ...) -> None: ... + def __init__(self, sharedFolderUid: _Optional[bytes] = ..., teamUid: _Optional[bytes] = ..., name: _Optional[str] = ..., manageRecords: _Optional[bool] = ..., manageUsers: _Optional[bool] = ..., expiration: _Optional[int] = ..., expirationNotificationType: _Optional[_Union[_record_pb2.TimerNotificationType, str]] = ..., rotateOnExpiration: _Optional[bool] = ...) -> None: ... class KsmChange(_message.Message): - __slots__ = ["appRecordUid", "detailId", "removed", "appClientType", "expiration"] + __slots__ = ("appRecordUid", "detailId", "removed", "appClientType", "expiration") APPRECORDUID_FIELD_NUMBER: _ClassVar[int] DETAILID_FIELD_NUMBER: _ClassVar[int] REMOVED_FIELD_NUMBER: _ClassVar[int] @@ -505,16 +589,16 @@ class KsmChange(_message.Message): removed: bool appClientType: _enterprise_pb2.AppClientType expiration: int - def __init__(self, appRecordUid: _Optional[bytes] = ..., detailId: _Optional[bytes] = ..., removed: bool = ..., appClientType: _Optional[_Union[_enterprise_pb2.AppClientType, str]] = ..., expiration: _Optional[int] = ...) -> None: ... + def __init__(self, appRecordUid: _Optional[bytes] = ..., detailId: _Optional[bytes] = ..., removed: _Optional[bool] = ..., appClientType: _Optional[_Union[_enterprise_pb2.AppClientType, str]] = ..., expiration: _Optional[int] = ...) -> None: ... class ShareInvitation(_message.Message): - __slots__ = ["username"] + __slots__ = ("username",) USERNAME_FIELD_NUMBER: _ClassVar[int] username: str def __init__(self, username: _Optional[str] = ...) -> None: ... class User(_message.Message): - __slots__ = ["accountUid", "username"] + __slots__ = ("accountUid", "username") ACCOUNTUID_FIELD_NUMBER: _ClassVar[int] USERNAME_FIELD_NUMBER: _ClassVar[int] accountUid: bytes @@ -522,7 +606,7 @@ class User(_message.Message): def __init__(self, accountUid: _Optional[bytes] = ..., username: _Optional[str] = ...) -> None: ... class SyncDiagnostics(_message.Message): - __slots__ = ["continuationToken", "userId", "enterpriseUserId", "syncedTo", "syncingTo"] + __slots__ = ("continuationToken", "userId", "enterpriseUserId", "syncedTo", "syncingTo") CONTINUATIONTOKEN_FIELD_NUMBER: _ClassVar[int] USERID_FIELD_NUMBER: _ClassVar[int] ENTERPRISEUSERID_FIELD_NUMBER: _ClassVar[int] @@ -536,7 +620,7 @@ class SyncDiagnostics(_message.Message): def __init__(self, continuationToken: _Optional[bytes] = ..., userId: _Optional[int] = ..., enterpriseUserId: _Optional[int] = ..., syncedTo: _Optional[int] = ..., syncingTo: _Optional[int] = ...) -> None: ... class RecordRotation(_message.Message): - __slots__ = ["recordUid", "revision", "configurationUid", "schedule", "pwdComplexity", "disabled", "resourceUid", "lastRotation", "lastRotationStatus"] + __slots__ = ("recordUid", "revision", "configurationUid", "schedule", "pwdComplexity", "disabled", "resourceUid", "lastRotation", "lastRotationStatus") RECORDUID_FIELD_NUMBER: _ClassVar[int] REVISION_FIELD_NUMBER: _ClassVar[int] CONFIGURATIONUID_FIELD_NUMBER: _ClassVar[int] @@ -555,10 +639,10 @@ class RecordRotation(_message.Message): resourceUid: bytes lastRotation: int lastRotationStatus: RecordRotationStatus - def __init__(self, recordUid: _Optional[bytes] = ..., revision: _Optional[int] = ..., configurationUid: _Optional[bytes] = ..., schedule: _Optional[str] = ..., pwdComplexity: _Optional[bytes] = ..., disabled: bool = ..., resourceUid: _Optional[bytes] = ..., lastRotation: _Optional[int] = ..., lastRotationStatus: _Optional[_Union[RecordRotationStatus, str]] = ...) -> None: ... + def __init__(self, recordUid: _Optional[bytes] = ..., revision: _Optional[int] = ..., configurationUid: _Optional[bytes] = ..., schedule: _Optional[str] = ..., pwdComplexity: _Optional[bytes] = ..., disabled: _Optional[bool] = ..., resourceUid: _Optional[bytes] = ..., lastRotation: _Optional[int] = ..., lastRotationStatus: _Optional[_Union[RecordRotationStatus, str]] = ...) -> None: ... class SecurityScoreData(_message.Message): - __slots__ = ["recordUid", "data", "revision"] + __slots__ = ("recordUid", "data", "revision") RECORDUID_FIELD_NUMBER: _ClassVar[int] DATA_FIELD_NUMBER: _ClassVar[int] REVISION_FIELD_NUMBER: _ClassVar[int] @@ -568,13 +652,13 @@ class SecurityScoreData(_message.Message): def __init__(self, recordUid: _Optional[bytes] = ..., data: _Optional[bytes] = ..., revision: _Optional[int] = ...) -> None: ... class BreachWatchGetSyncDataRequest(_message.Message): - __slots__ = ["recordUids"] + __slots__ = ("recordUids",) RECORDUIDS_FIELD_NUMBER: _ClassVar[int] recordUids: _containers.RepeatedScalarFieldContainer[bytes] def __init__(self, recordUids: _Optional[_Iterable[bytes]] = ...) -> None: ... class BreachWatchGetSyncDataResponse(_message.Message): - __slots__ = ["breachWatchRecords", "breachWatchSecurityData", "users"] + __slots__ = ("breachWatchRecords", "breachWatchSecurityData", "users") BREACHWATCHRECORDS_FIELD_NUMBER: _ClassVar[int] BREACHWATCHSECURITYDATA_FIELD_NUMBER: _ClassVar[int] USERS_FIELD_NUMBER: _ClassVar[int] @@ -584,7 +668,7 @@ class BreachWatchGetSyncDataResponse(_message.Message): def __init__(self, breachWatchRecords: _Optional[_Iterable[_Union[BreachWatchRecord, _Mapping]]] = ..., breachWatchSecurityData: _Optional[_Iterable[_Union[BreachWatchSecurityData, _Mapping]]] = ..., users: _Optional[_Iterable[_Union[User, _Mapping]]] = ...) -> None: ... class GetAccountUidMapResponse(_message.Message): - __slots__ = ["users"] + __slots__ = ("users",) USERS_FIELD_NUMBER: _ClassVar[int] users: _containers.RepeatedCompositeFieldContainer[User] def __init__(self, users: _Optional[_Iterable[_Union[User, _Mapping]]] = ...) -> None: ... diff --git a/keepercommander/proto/dag_pb2.py b/keepercommander/proto/dag_pb2.py new file mode 100644 index 000000000..40889c765 --- /dev/null +++ b/keepercommander/proto/dag_pb2.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: dag.proto +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder + + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\tdag.proto\x12\x03\x44\x61g\">\n\x03Ref\x12\x1a\n\x04type\x18\x01 \x01(\x0e\x32\x0c.Dag.RefType\x12\r\n\x05value\x18\x02 \x01(\x0c\x12\x0c\n\x04name\x18\x03 \x01(\t\"z\n\x04\x44\x61ta\x12\x1f\n\x08\x64\x61taType\x18\x01 \x01(\x0e\x32\r.Dag.DataType\x12\x15\n\x03ref\x18\x02 \x01(\x0b\x32\x08.Dag.Ref\x12\x1b\n\tparentRef\x18\x03 \x01(\x0b\x32\x08.Dag.Ref\x12\x0f\n\x07\x63ontent\x18\x04 \x01(\x0c\x12\x0c\n\x04path\x18\x05 \x01(\t\"G\n\x08SyncData\x12\x17\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32\t.Dag.Data\x12\x11\n\tsyncPoint\x18\x02 \x01(\x03\x12\x0f\n\x07hasMore\x18\x03 \x01(\x08\"q\n\tDebugData\x12\x10\n\x08\x64\x61taType\x18\x01 \x01(\t\x12\x0c\n\x04path\x18\x04 \x01(\t\x12\x1e\n\x03ref\x18\x02 \x01(\x0b\x32\x11.Dag.DebugRefInfo\x12$\n\tparentRef\x18\x03 \x01(\x0b\x32\x11.Dag.DebugRefInfo\".\n\x0c\x44\x65\x62ugRefInfo\x12\x0f\n\x07refType\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x0c*\x8d\x01\n\x07RefType\x12\x0b\n\x07GENERAL\x10\x00\x12\x08\n\x04USER\x10\x01\x12\n\n\x06\x44\x45VICE\x10\x02\x12\x07\n\x03REC\x10\x03\x12\n\n\x06\x46OLDER\x10\x04\x12\x08\n\x04TEAM\x10\x05\x12\x0e\n\nENTERPRISE\x10\x06\x12\x11\n\rPAM_DIRECTORY\x10\x07\x12\x0f\n\x0bPAM_MACHINE\x10\x08\x12\x0c\n\x08PAM_USER\x10\t*X\n\x08\x44\x61taType\x12\x08\n\x04\x44\x41TA\x10\x00\x12\x07\n\x03KEY\x10\x01\x12\x08\n\x04LINK\x10\x02\x12\x07\n\x03\x41\x43L\x10\x03\x12\x0c\n\x08\x44\x45LETION\x10\x04\x12\n\n\x06\x44\x45NIAL\x10\x05\x12\x0c\n\x08UNDENIAL\x10\x06\x42\x1f\n\x18\x63om.keepersecurity.protoB\x03\x44\x61gb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'dag_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n\030com.keepersecurity.protoB\003Dag' + _globals['_REFTYPE']._serialized_start=443 + _globals['_REFTYPE']._serialized_end=584 + _globals['_DATATYPE']._serialized_start=586 + _globals['_DATATYPE']._serialized_end=674 + _globals['_REF']._serialized_start=18 + _globals['_REF']._serialized_end=80 + _globals['_DATA']._serialized_start=82 + _globals['_DATA']._serialized_end=204 + _globals['_SYNCDATA']._serialized_start=206 + _globals['_SYNCDATA']._serialized_end=277 + _globals['_DEBUGDATA']._serialized_start=279 + _globals['_DEBUGDATA']._serialized_end=392 + _globals['_DEBUGREFINFO']._serialized_start=394 + _globals['_DEBUGREFINFO']._serialized_end=440 +# @@protoc_insertion_point(module_scope) diff --git a/keepercommander/proto/dag_pb2.pyi b/keepercommander/proto/dag_pb2.pyi new file mode 100644 index 000000000..026e97603 --- /dev/null +++ b/keepercommander/proto/dag_pb2.pyi @@ -0,0 +1,102 @@ +from google.protobuf.internal import containers as _containers +from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from collections.abc import Iterable as _Iterable, Mapping as _Mapping +from typing import ClassVar as _ClassVar, Optional as _Optional, Union as _Union + +DESCRIPTOR: _descriptor.FileDescriptor + +class RefType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = () + GENERAL: _ClassVar[RefType] + USER: _ClassVar[RefType] + DEVICE: _ClassVar[RefType] + REC: _ClassVar[RefType] + FOLDER: _ClassVar[RefType] + TEAM: _ClassVar[RefType] + ENTERPRISE: _ClassVar[RefType] + PAM_DIRECTORY: _ClassVar[RefType] + PAM_MACHINE: _ClassVar[RefType] + PAM_USER: _ClassVar[RefType] + +class DataType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = () + DATA: _ClassVar[DataType] + KEY: _ClassVar[DataType] + LINK: _ClassVar[DataType] + ACL: _ClassVar[DataType] + DELETION: _ClassVar[DataType] + DENIAL: _ClassVar[DataType] + UNDENIAL: _ClassVar[DataType] +GENERAL: RefType +USER: RefType +DEVICE: RefType +REC: RefType +FOLDER: RefType +TEAM: RefType +ENTERPRISE: RefType +PAM_DIRECTORY: RefType +PAM_MACHINE: RefType +PAM_USER: RefType +DATA: DataType +KEY: DataType +LINK: DataType +ACL: DataType +DELETION: DataType +DENIAL: DataType +UNDENIAL: DataType + +class Ref(_message.Message): + __slots__ = ("type", "value", "name") + TYPE_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + NAME_FIELD_NUMBER: _ClassVar[int] + type: RefType + value: bytes + name: str + def __init__(self, type: _Optional[_Union[RefType, str]] = ..., value: _Optional[bytes] = ..., name: _Optional[str] = ...) -> None: ... + +class Data(_message.Message): + __slots__ = ("dataType", "ref", "parentRef", "content", "path") + DATATYPE_FIELD_NUMBER: _ClassVar[int] + REF_FIELD_NUMBER: _ClassVar[int] + PARENTREF_FIELD_NUMBER: _ClassVar[int] + CONTENT_FIELD_NUMBER: _ClassVar[int] + PATH_FIELD_NUMBER: _ClassVar[int] + dataType: DataType + ref: Ref + parentRef: Ref + content: bytes + path: str + def __init__(self, dataType: _Optional[_Union[DataType, str]] = ..., ref: _Optional[_Union[Ref, _Mapping]] = ..., parentRef: _Optional[_Union[Ref, _Mapping]] = ..., content: _Optional[bytes] = ..., path: _Optional[str] = ...) -> None: ... + +class SyncData(_message.Message): + __slots__ = ("data", "syncPoint", "hasMore") + DATA_FIELD_NUMBER: _ClassVar[int] + SYNCPOINT_FIELD_NUMBER: _ClassVar[int] + HASMORE_FIELD_NUMBER: _ClassVar[int] + data: _containers.RepeatedCompositeFieldContainer[Data] + syncPoint: int + hasMore: bool + def __init__(self, data: _Optional[_Iterable[_Union[Data, _Mapping]]] = ..., syncPoint: _Optional[int] = ..., hasMore: _Optional[bool] = ...) -> None: ... + +class DebugData(_message.Message): + __slots__ = ("dataType", "path", "ref", "parentRef") + DATATYPE_FIELD_NUMBER: _ClassVar[int] + PATH_FIELD_NUMBER: _ClassVar[int] + REF_FIELD_NUMBER: _ClassVar[int] + PARENTREF_FIELD_NUMBER: _ClassVar[int] + dataType: str + path: str + ref: DebugRefInfo + parentRef: DebugRefInfo + def __init__(self, dataType: _Optional[str] = ..., path: _Optional[str] = ..., ref: _Optional[_Union[DebugRefInfo, _Mapping]] = ..., parentRef: _Optional[_Union[DebugRefInfo, _Mapping]] = ...) -> None: ... + +class DebugRefInfo(_message.Message): + __slots__ = ("refType", "value") + REFTYPE_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + refType: str + value: bytes + def __init__(self, refType: _Optional[str] = ..., value: _Optional[bytes] = ...) -> None: ... diff --git a/keepercommander/proto/enterprise_pb2.py b/keepercommander/proto/enterprise_pb2.py index 2f2da1a3c..709b672fd 100644 --- a/keepercommander/proto/enterprise_pb2.py +++ b/keepercommander/proto/enterprise_pb2.py @@ -13,7 +13,7 @@ -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x10\x65nterprise.proto\x12\nEnterprise\"\x84\x01\n\x18\x45nterpriseKeyPairRequest\x12\x1b\n\x13\x65nterprisePublicKey\x18\x01 \x01(\x0c\x12%\n\x1d\x65ncryptedEnterprisePrivateKey\x18\x02 \x01(\x0c\x12$\n\x07keyType\x18\x03 \x01(\x0e\x32\x13.Enterprise.KeyType\"\'\n\x14GetTeamMemberRequest\x12\x0f\n\x07teamUid\x18\x01 \x01(\x0c\"}\n\x0e\x45nterpriseUser\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\r\n\x05\x65mail\x18\x02 \x01(\t\x12\x1a\n\x12\x65nterpriseUsername\x18\x03 \x01(\t\x12\x14\n\x0cisShareAdmin\x18\x04 \x01(\x08\x12\x10\n\x08username\x18\x05 \x01(\t\"K\n\x15GetTeamMemberResponse\x12\x32\n\x0e\x65nterpriseUser\x18\x01 \x03(\x0b\x32\x1a.Enterprise.EnterpriseUser\"-\n\x11\x45nterpriseUserIds\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x03(\x03\"B\n\x19\x45nterprisePersonalAccount\x12\r\n\x05\x65mail\x18\x01 \x01(\t\x12\x16\n\x0eOBSOLETE_FIELD\x18\x02 \x01(\x0c\"S\n\x17\x45ncryptedTeamKeyRequest\x12\x0f\n\x07teamUid\x18\x01 \x01(\x0c\x12\x18\n\x10\x65ncryptedTeamKey\x18\x02 \x01(\x0c\x12\r\n\x05\x66orce\x18\x03 \x01(\x08\"+\n\x0fReEncryptedData\x12\n\n\x02id\x18\x01 \x01(\x03\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\t\"?\n\x12ReEncryptedRoleKey\x12\x0f\n\x07role_id\x18\x01 \x01(\x03\x12\x18\n\x10\x65ncryptedRoleKey\x18\x02 \x01(\x0c\"P\n\x16ReEncryptedUserDataKey\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x1c\n\x14userEncryptedDataKey\x18\x02 \x01(\x0c\"\xd8\x02\n\x1bNodeToManagedCompanyRequest\x12\x11\n\tcompanyId\x18\x01 \x01(\x05\x12*\n\x05nodes\x18\x02 \x03(\x0b\x32\x1b.Enterprise.ReEncryptedData\x12*\n\x05roles\x18\x03 \x03(\x0b\x32\x1b.Enterprise.ReEncryptedData\x12*\n\x05users\x18\x04 \x03(\x0b\x32\x1b.Enterprise.ReEncryptedData\x12\x30\n\x08roleKeys\x18\x05 \x03(\x0b\x32\x1e.Enterprise.ReEncryptedRoleKey\x12\x35\n\x08teamKeys\x18\x06 \x03(\x0b\x32#.Enterprise.EncryptedTeamKeyRequest\x12\x39\n\rusersDataKeys\x18\x07 \x03(\x0b\x32\".Enterprise.ReEncryptedUserDataKey\",\n\x08RoleTeam\x12\x0f\n\x07role_id\x18\x01 \x01(\x03\x12\x0f\n\x07teamUid\x18\x02 \x01(\x0c\"4\n\tRoleTeams\x12\'\n\trole_team\x18\x01 \x03(\x0b\x32\x14.Enterprise.RoleTeam\"/\n\x0bTeamsByRole\x12\x0f\n\x07role_id\x18\x01 \x01(\x03\x12\x0f\n\x07teamUid\x18\x02 \x03(\x0c\"<\n\x12ManagedNodesByRole\x12\x0f\n\x07role_id\x18\x01 \x01(\x03\x12\x15\n\rmanagedNodeId\x18\x02 \x03(\x03\"R\n\x0fRoleUserAddKeys\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x0f\n\x07treeKey\x18\x02 \x01(\t\x12\x14\n\x0croleAdminKey\x18\x03 \x01(\t\"T\n\x0bRoleUserAdd\x12\x0f\n\x07role_id\x18\x01 \x01(\x03\x12\x34\n\x0froleUserAddKeys\x18\x02 \x03(\x0b\x32\x1b.Enterprise.RoleUserAddKeys\"D\n\x13RoleUsersAddRequest\x12-\n\x0croleUserAdds\x18\x01 \x03(\x0b\x32\x17.Enterprise.RoleUserAdd\"\x80\x01\n\x11RoleUserAddResult\x12\x0e\n\x06roleId\x18\x01 \x01(\x03\x12\x18\n\x10\x65nterpriseUserId\x18\x02 \x01(\x03\x12\x30\n\x06status\x18\x03 \x01(\x0e\x32 .Enterprise.RoleUserModifyStatus\x12\x0f\n\x07message\x18\x04 \x01(\t\"F\n\x14RoleUsersAddResponse\x12.\n\x07results\x18\x01 \x03(\x0b\x32\x1d.Enterprise.RoleUserAddResult\"<\n\x0eRoleUserRemove\x12\x0f\n\x07role_id\x18\x01 \x01(\x03\x12\x19\n\x11\x65nterpriseUserIds\x18\x02 \x03(\x03\"M\n\x16RoleUsersRemoveRequest\x12\x33\n\x0froleUserRemoves\x18\x01 \x03(\x0b\x32\x1a.Enterprise.RoleUserRemove\"\x83\x01\n\x14RoleUserRemoveResult\x12\x0e\n\x06roleId\x18\x01 \x01(\x03\x12\x18\n\x10\x65nterpriseUserId\x18\x02 \x01(\x03\x12\x30\n\x06status\x18\x03 \x01(\x0e\x32 .Enterprise.RoleUserModifyStatus\x12\x0f\n\x07message\x18\x04 \x01(\t\"L\n\x17RoleUsersRemoveResponse\x12\x31\n\x07results\x18\x01 \x03(\x0b\x32 .Enterprise.RoleUserRemoveResult\"\xa0\x04\n\x16\x45nterpriseRegistration\x12\x18\n\x10\x65ncryptedTreeKey\x18\x01 \x01(\x0c\x12\x16\n\x0e\x65nterpriseName\x18\x02 \x01(\t\x12\x14\n\x0crootNodeData\x18\x03 \x01(\x0c\x12\x15\n\radminUserData\x18\x04 \x01(\x0c\x12\x11\n\tadminName\x18\x05 \x01(\t\x12\x10\n\x08roleData\x18\x06 \x01(\x0c\x12\x38\n\nrsaKeyPair\x18\x07 \x01(\x0b\x32$.Enterprise.EnterpriseKeyPairRequest\x12\x13\n\x0bnumberSeats\x18\x08 \x01(\x05\x12\x32\n\x0e\x65nterpriseType\x18\t \x01(\x0e\x32\x1a.Enterprise.EnterpriseType\x12\x15\n\rrolePublicKey\x18\n \x01(\x0c\x12*\n\"rolePrivateKeyEncryptedWithRoleKey\x18\x0b \x01(\x0c\x12#\n\x1broleKeyEncryptedWithTreeKey\x18\x0c \x01(\x0c\x12\x38\n\neccKeyPair\x18\r \x01(\x0b\x32$.Enterprise.EnterpriseKeyPairRequest\x12\x18\n\x10\x61llUsersRoleData\x18\x0e \x01(\x0c\x12)\n!roleKeyEncryptedWithUserPublicKey\x18\x0f \x01(\x0c\x12\x18\n\x10\x61pproverRoleData\x18\x10 \x01(\x0c\"H\n\x1a\x44omainPasswordRulesRequest\x12\x10\n\x08username\x18\x01 \x01(\t\x12\x18\n\x10verificationCode\x18\x02 \x01(\t\"\\\n\x19\x44omainPasswordRulesFields\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\x0f\n\x07minimum\x18\x02 \x01(\x05\x12\x0f\n\x07maximum\x18\x03 \x01(\x05\x12\x0f\n\x07\x61llowed\x18\x04 \x01(\x08\"E\n\x10LoginToMcRequest\x12\x16\n\x0emcEnterpriseId\x18\x01 \x01(\x05\x12\x19\n\x11messageSessionUid\x18\x02 \x01(\x0c\"L\n\x11LoginToMcResponse\x12\x1d\n\x15\x65ncryptedSessionToken\x18\x01 \x01(\x0c\x12\x18\n\x10\x65ncryptedTreeKey\x18\x02 \x01(\t\"g\n\x1b\x44omainPasswordRulesResponse\x12H\n\x19\x64omainPasswordRulesFields\x18\x01 \x03(\x0b\x32%.Enterprise.DomainPasswordRulesFields\"\x88\x01\n\x18\x41pproveUserDeviceRequest\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x1c\n\x14\x65ncryptedDeviceToken\x18\x02 \x01(\x0c\x12\x1e\n\x16\x65ncryptedDeviceDataKey\x18\x03 \x01(\x0c\x12\x14\n\x0c\x64\x65nyApproval\x18\x04 \x01(\x08\"t\n\x19\x41pproveUserDeviceResponse\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x1c\n\x14\x65ncryptedDeviceToken\x18\x02 \x01(\x0c\x12\x0e\n\x06\x66\x61iled\x18\x03 \x01(\x08\x12\x0f\n\x07message\x18\x04 \x01(\t\"Y\n\x19\x41pproveUserDevicesRequest\x12<\n\x0e\x64\x65viceRequests\x18\x01 \x03(\x0b\x32$.Enterprise.ApproveUserDeviceRequest\"\\\n\x1a\x41pproveUserDevicesResponse\x12>\n\x0f\x64\x65viceResponses\x18\x01 \x03(\x0b\x32%.Enterprise.ApproveUserDeviceResponse\"\x87\x01\n\x15\x45nterpriseUserDataKey\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x1c\n\x14userEncryptedDataKey\x18\x02 \x01(\x0c\x12\x11\n\tkeyTypeId\x18\x03 \x01(\x05\x12\x0f\n\x07roleKey\x18\x04 \x01(\x0c\x12\x12\n\nprivateKey\x18\x05 \x01(\x0c\"I\n\x16\x45nterpriseUserDataKeys\x12/\n\x04keys\x18\x01 \x03(\x0b\x32!.Enterprise.EnterpriseUserDataKey\"g\n\x1a\x45nterpriseUserDataKeyLight\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x1c\n\x14userEncryptedDataKey\x18\x02 \x01(\x0c\x12\x11\n\tkeyTypeId\x18\x03 \x01(\x05\"d\n\x1c\x45nterpriseUserDataKeysByNode\x12\x0e\n\x06nodeId\x18\x01 \x01(\x03\x12\x34\n\x04keys\x18\x02 \x03(\x0b\x32&.Enterprise.EnterpriseUserDataKeyLight\"^\n$EnterpriseUserDataKeysByNodeResponse\x12\x36\n\x04keys\x18\x01 \x03(\x0b\x32(.Enterprise.EnterpriseUserDataKeysByNode\"2\n\x15\x45nterpriseDataRequest\x12\x19\n\x11\x63ontinuationToken\x18\x01 \x01(\x0c\"0\n\x13SpecialProvisioning\x12\x0b\n\x03url\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\"\x84\x02\n\x11GeneralDataEntity\x12\x16\n\x0e\x65nterpriseName\x18\x01 \x01(\t\x12\x1a\n\x12restrictVisibility\x18\x02 \x01(\x08\x12<\n\x13specialProvisioning\x18\x04 \x01(\x0b\x32\x1f.Enterprise.SpecialProvisioning\x12\x30\n\ruserPrivilege\x18\x07 \x01(\x0b\x32\x19.Enterprise.UserPrivilege\x12\x13\n\x0b\x64istributor\x18\x08 \x01(\x08\x12\x1d\n\x15\x66orbidAccountTransfer\x18\t \x01(\x08\x12\x17\n\x0fshowUserOnboard\x18\n \x01(\x08\"\xfd\x01\n\x04Node\x12\x0e\n\x06nodeId\x18\x01 \x01(\x03\x12\x10\n\x08parentId\x18\x02 \x01(\x03\x12\x10\n\x08\x62ridgeId\x18\x03 \x01(\x03\x12\x0e\n\x06scimId\x18\x04 \x01(\x03\x12\x11\n\tlicenseId\x18\x05 \x01(\x03\x12\x15\n\rencryptedData\x18\x06 \x01(\t\x12\x12\n\nduoEnabled\x18\x07 \x01(\x08\x12\x12\n\nrsaEnabled\x18\x08 \x01(\x08\x12 \n\x14ssoServiceProviderId\x18\t \x01(\x03\x42\x02\x18\x01\x12\x1a\n\x12restrictVisibility\x18\n \x01(\x08\x12!\n\x15ssoServiceProviderIds\x18\x0b \x03(\x03\x42\x02\x10\x01\"\x8e\x01\n\x04Role\x12\x0e\n\x06roleId\x18\x01 \x01(\x03\x12\x0e\n\x06nodeId\x18\x02 \x01(\x03\x12\x15\n\rencryptedData\x18\x03 \x01(\t\x12\x0f\n\x07keyType\x18\x04 \x01(\t\x12\x14\n\x0cvisibleBelow\x18\x05 \x01(\x08\x12\x16\n\x0enewUserInherit\x18\x06 \x01(\x08\x12\x10\n\x08roleType\x18\x07 \x01(\t\"\xb8\x02\n\x04User\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x0e\n\x06nodeId\x18\x02 \x01(\x03\x12\x15\n\rencryptedData\x18\x03 \x01(\t\x12\x0f\n\x07keyType\x18\x04 \x01(\t\x12\x10\n\x08username\x18\x05 \x01(\t\x12\x0e\n\x06status\x18\x06 \x01(\t\x12\x0c\n\x04lock\x18\x07 \x01(\x05\x12\x0e\n\x06userId\x18\x08 \x01(\x05\x12\x1e\n\x16\x61\x63\x63ountShareExpiration\x18\t \x01(\x03\x12\x10\n\x08\x66ullName\x18\n \x01(\t\x12\x10\n\x08jobTitle\x18\x0b \x01(\t\x12\x12\n\ntfaEnabled\x18\x0c \x01(\x08\x12\x46\n\x18transferAcceptanceStatus\x18\r \x01(\x0e\x32$.Enterprise.TransferAcceptanceStatus\"7\n\tUserAlias\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x10\n\x08username\x18\x02 \x01(\t\"\xac\x01\n\x18\x43omplianceReportMetaData\x12\x11\n\treportUid\x18\x01 \x01(\x0c\x12\x0e\n\x06nodeId\x18\x02 \x01(\x03\x12\x12\n\nreportName\x18\x03 \x01(\t\x12\x15\n\rdateGenerated\x18\x04 \x01(\x03\x12\x11\n\trunByName\x18\x05 \x01(\t\x12\x16\n\x0enumberOfOwners\x18\x07 \x01(\x05\x12\x17\n\x0fnumberOfRecords\x18\x08 \x01(\x05\"S\n\x0bManagedNode\x12\x0e\n\x06roleId\x18\x01 \x01(\x03\x12\x15\n\rmanagedNodeId\x18\x02 \x01(\x03\x12\x1d\n\x15\x63\x61scadeNodeManagement\x18\x03 \x01(\x08\"T\n\x0fUserManagedNode\x12\x0e\n\x06nodeId\x18\x01 \x01(\x03\x12\x1d\n\x15\x63\x61scadeNodeManagement\x18\x02 \x01(\x08\x12\x12\n\nprivileges\x18\x03 \x03(\t\"w\n\rUserPrivilege\x12\x35\n\x10userManagedNodes\x18\x01 \x03(\x0b\x32\x1b.Enterprise.UserManagedNode\x12\x18\n\x10\x65nterpriseUserId\x18\x02 \x01(\x03\x12\x15\n\rencryptedData\x18\x03 \x01(\t\"4\n\x08RoleUser\x12\x0e\n\x06roleId\x18\x01 \x01(\x03\x12\x18\n\x10\x65nterpriseUserId\x18\x02 \x01(\x03\"M\n\rRolePrivilege\x12\x15\n\rmanagedNodeId\x18\x01 \x01(\x03\x12\x0e\n\x06roleId\x18\x02 \x01(\x03\x12\x15\n\rprivilegeType\x18\x03 \x01(\t\"I\n\x0fRoleEnforcement\x12\x0e\n\x06roleId\x18\x01 \x01(\x03\x12\x17\n\x0f\x65nforcementType\x18\x02 \x01(\t\x12\r\n\x05value\x18\x03 \x01(\t\"\xa9\x01\n\x04Team\x12\x0f\n\x07teamUid\x18\x01 \x01(\x0c\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0e\n\x06nodeId\x18\x03 \x01(\x03\x12\x14\n\x0crestrictEdit\x18\x04 \x01(\x08\x12\x15\n\rrestrictShare\x18\x05 \x01(\x08\x12\x14\n\x0crestrictView\x18\x06 \x01(\x08\x12\x15\n\rencryptedData\x18\x07 \x01(\t\x12\x18\n\x10\x65ncryptedTeamKey\x18\x08 \x01(\t\"G\n\x08TeamUser\x12\x0f\n\x07teamUid\x18\x01 \x01(\x0c\x12\x18\n\x10\x65nterpriseUserId\x18\x02 \x01(\x03\x12\x10\n\x08userType\x18\x03 \x01(\t\"K\n\x1aGetDistributorInfoResponse\x12-\n\x0c\x64istributors\x18\x01 \x03(\x0b\x32\x17.Enterprise.Distributor\"B\n\x0b\x44istributor\x12\x0c\n\x04name\x18\x01 \x01(\t\x12%\n\x08mspInfos\x18\x02 \x03(\x0b\x32\x13.Enterprise.MspInfo\"\x9d\x02\n\x07MspInfo\x12\x14\n\x0c\x65nterpriseId\x18\x01 \x01(\x05\x12\x16\n\x0e\x65nterpriseName\x18\x02 \x01(\t\x12\x19\n\x11\x61llocatedLicenses\x18\x03 \x01(\x05\x12\x19\n\x11\x61llowedMcProducts\x18\x04 \x03(\t\x12\x15\n\rallowedAddOns\x18\x05 \x03(\t\x12\x17\n\x0fmaxFilePlanType\x18\x06 \x01(\t\x12\x34\n\x10managedCompanies\x18\x07 \x03(\x0b\x32\x1a.Enterprise.ManagedCompany\x12\x1e\n\x16\x61llowUnlimitedLicenses\x18\x08 \x01(\x08\x12(\n\x06\x61\x64\x64Ons\x18\t \x03(\x0b\x32\x18.Enterprise.LicenseAddOn\"\x91\x02\n\x0eManagedCompany\x12\x16\n\x0emcEnterpriseId\x18\x01 \x01(\x05\x12\x18\n\x10mcEnterpriseName\x18\x02 \x01(\t\x12\x11\n\tmspNodeId\x18\x03 \x01(\x03\x12\x15\n\rnumberOfSeats\x18\x04 \x01(\x05\x12\x15\n\rnumberOfUsers\x18\x05 \x01(\x05\x12\x11\n\tproductId\x18\x06 \x01(\t\x12\x11\n\tisExpired\x18\x07 \x01(\x08\x12\x0f\n\x07treeKey\x18\x08 \x01(\t\x12\x15\n\rtree_key_role\x18\t \x01(\x03\x12\x14\n\x0c\x66ilePlanType\x18\n \x01(\t\x12(\n\x06\x61\x64\x64Ons\x18\x0b \x03(\x0b\x32\x18.Enterprise.LicenseAddOn\"R\n\x07MSPPool\x12\x11\n\tproductId\x18\x01 \x01(\t\x12\r\n\x05seats\x18\x02 \x01(\x05\x12\x16\n\x0e\x61vailableSeats\x18\x03 \x01(\x05\x12\r\n\x05stash\x18\x04 \x01(\x05\":\n\nMSPContact\x12\x14\n\x0c\x65nterpriseId\x18\x01 \x01(\x05\x12\x16\n\x0e\x65nterpriseName\x18\x02 \x01(\t\"\xec\x01\n\x0cLicenseAddOn\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07\x65nabled\x18\x02 \x01(\x08\x12\x0f\n\x07isTrial\x18\x03 \x01(\x08\x12\x12\n\nexpiration\x18\x04 \x01(\x03\x12\x0f\n\x07\x63reated\x18\x05 \x01(\x03\x12\r\n\x05seats\x18\x06 \x01(\x05\x12\x16\n\x0e\x61\x63tivationTime\x18\x07 \x01(\x03\x12\x19\n\x11includedInProduct\x18\x08 \x01(\x08\x12\x14\n\x0c\x61piCallCount\x18\t \x01(\x05\x12\x17\n\x0ftierDescription\x18\n \x01(\t\x12\x16\n\x0eseatsAllocated\x18\x0b \x01(\x05\"s\n\tMCDefault\x12\x11\n\tmcProduct\x18\x01 \x01(\t\x12\x0e\n\x06\x61\x64\x64Ons\x18\x02 \x03(\t\x12\x14\n\x0c\x66ilePlanType\x18\x03 \x01(\t\x12\x13\n\x0bmaxLicenses\x18\x04 \x01(\x05\x12\x18\n\x10\x66ixedMaxLicenses\x18\x05 \x01(\x08\"\xd2\x01\n\nMSPPermits\x12\x12\n\nrestricted\x18\x01 \x01(\x08\x12\x1a\n\x12maxAllowedLicenses\x18\x02 \x01(\x05\x12\x19\n\x11\x61llowedMcProducts\x18\x03 \x03(\t\x12\x15\n\rallowedAddOns\x18\x04 \x03(\t\x12\x17\n\x0fmaxFilePlanType\x18\x05 \x01(\t\x12\x1e\n\x16\x61llowUnlimitedLicenses\x18\x06 \x01(\x08\x12)\n\nmcDefaults\x18\x07 \x03(\x0b\x32\x15.Enterprise.MCDefault\"\xa0\x04\n\x07License\x12\x0c\n\x04paid\x18\x01 \x01(\x08\x12\x15\n\rnumberOfSeats\x18\x02 \x01(\x05\x12\x12\n\nexpiration\x18\x03 \x01(\x03\x12\x14\n\x0clicenseKeyId\x18\x04 \x01(\x05\x12\x15\n\rproductTypeId\x18\x05 \x01(\x05\x12\x0c\n\x04name\x18\x06 \x01(\t\x12\x1b\n\x13\x65nterpriseLicenseId\x18\x07 \x01(\x03\x12\x16\n\x0eseatsAllocated\x18\x08 \x01(\x05\x12\x14\n\x0cseatsPending\x18\t \x01(\x05\x12\x0c\n\x04tier\x18\n \x01(\x05\x12\x16\n\x0e\x66ilePlanTypeId\x18\x0b \x01(\x05\x12\x10\n\x08maxBytes\x18\x0c \x01(\x03\x12\x19\n\x11storageExpiration\x18\r \x01(\x03\x12\x15\n\rlicenseStatus\x18\x0e \x01(\t\x12$\n\x07mspPool\x18\x0f \x03(\x0b\x32\x13.Enterprise.MSPPool\x12)\n\tmanagedBy\x18\x10 \x01(\x0b\x32\x16.Enterprise.MSPContact\x12(\n\x06\x61\x64\x64Ons\x18\x11 \x03(\x0b\x32\x18.Enterprise.LicenseAddOn\x12\x17\n\x0fnextBillingDate\x18\x12 \x01(\x03\x12\x17\n\x0fhasMSPLegacyLog\x18\x13 \x01(\x08\x12*\n\nmspPermits\x18\x14 \x01(\x0b\x32\x16.Enterprise.MSPPermits\x12\x13\n\x0b\x64istributor\x18\x15 \x01(\x08\"n\n\x06\x42ridge\x12\x10\n\x08\x62ridgeId\x18\x01 \x01(\x03\x12\x0e\n\x06nodeId\x18\x02 \x01(\x03\x12\x18\n\x10wanIpEnforcement\x18\x03 \x01(\t\x12\x18\n\x10lanIpEnforcement\x18\x04 \x01(\t\x12\x0e\n\x06status\x18\x05 \x01(\t\"t\n\x04Scim\x12\x0e\n\x06scimId\x18\x01 \x01(\x03\x12\x0e\n\x06nodeId\x18\x02 \x01(\x03\x12\x0e\n\x06status\x18\x03 \x01(\t\x12\x12\n\nlastSynced\x18\x04 \x01(\x03\x12\x12\n\nrolePrefix\x18\x05 \x01(\t\x12\x14\n\x0cuniqueGroups\x18\x06 \x01(\x08\"L\n\x0e\x45mailProvision\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x0e\n\x06nodeId\x18\x02 \x01(\x03\x12\x0e\n\x06\x64omain\x18\x03 \x01(\t\x12\x0e\n\x06method\x18\x04 \x01(\t\"R\n\nQueuedTeam\x12\x0f\n\x07teamUid\x18\x01 \x01(\x0c\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0e\n\x06nodeId\x18\x03 \x01(\x03\x12\x15\n\rencryptedData\x18\x04 \x01(\t\"0\n\x0eQueuedTeamUser\x12\x0f\n\x07teamUid\x18\x01 \x01(\x0c\x12\r\n\x05users\x18\x02 \x03(\x03\"\xa4\x01\n\x0eTeamsAddResult\x12\x34\n\x11successfulTeamAdd\x18\x01 \x03(\x0b\x32\x19.Enterprise.TeamAddResult\x12\x36\n\x13unsuccessfulTeamAdd\x18\x02 \x03(\x0b\x32\x19.Enterprise.TeamAddResult\x12\x0e\n\x06result\x18\x03 \x01(\t\x12\x14\n\x0c\x65rrorMessage\x18\x04 \x01(\t\"U\n\rTeamAddResult\x12\x1e\n\x04team\x18\x01 \x01(\x0b\x32\x10.Enterprise.Team\x12\x0e\n\x06result\x18\x02 \x01(\t\x12\x14\n\x0c\x65rrorMessage\x18\x03 \x01(\t\"\x91\x01\n\nSsoService\x12\x1c\n\x14ssoServiceProviderId\x18\x01 \x01(\x03\x12\x0e\n\x06nodeId\x18\x02 \x01(\x03\x12\x0c\n\x04name\x18\x03 \x01(\t\x12\x0e\n\x06sp_url\x18\x04 \x01(\t\x12\x16\n\x0einviteNewUsers\x18\x05 \x01(\x08\x12\x0e\n\x06\x61\x63tive\x18\x06 \x01(\x08\x12\x0f\n\x07isCloud\x18\x07 \x01(\x08\"1\n\x10ReportFilterUser\x12\x0e\n\x06userId\x18\x01 \x01(\x05\x12\r\n\x05\x65mail\x18\x02 \x01(\t\"\x97\x02\n\x1d\x44\x65viceRequestForAdminApproval\x12\x10\n\x08\x64\x65viceId\x18\x01 \x01(\x03\x12\x18\n\x10\x65nterpriseUserId\x18\x02 \x01(\x03\x12\x1c\n\x14\x65ncryptedDeviceToken\x18\x03 \x01(\x0c\x12\x17\n\x0f\x64\x65vicePublicKey\x18\x04 \x01(\x0c\x12\x12\n\ndeviceName\x18\x05 \x01(\t\x12\x15\n\rclientVersion\x18\x06 \x01(\t\x12\x12\n\ndeviceType\x18\x07 \x01(\t\x12\x0c\n\x04\x64\x61te\x18\x08 \x01(\x03\x12\x11\n\tipAddress\x18\t \x01(\t\x12\x10\n\x08location\x18\n \x01(\t\x12\r\n\x05\x65mail\x18\x0b \x01(\t\x12\x12\n\naccountUid\x18\x0c \x01(\x0c\"`\n\x0e\x45nterpriseData\x12\x30\n\x06\x65ntity\x18\x01 \x01(\x0e\x32 .Enterprise.EnterpriseDataEntity\x12\x0e\n\x06\x64\x65lete\x18\x02 \x01(\x08\x12\x0c\n\x04\x64\x61ta\x18\x03 \x03(\x0c\"\xd0\x01\n\x16\x45nterpriseDataResponse\x12\x19\n\x11\x63ontinuationToken\x18\x01 \x01(\x0c\x12\x0f\n\x07hasMore\x18\x02 \x01(\x08\x12,\n\x0b\x63\x61\x63heStatus\x18\x03 \x01(\x0e\x32\x17.Enterprise.CacheStatus\x12(\n\x04\x64\x61ta\x18\x04 \x03(\x0b\x32\x1a.Enterprise.EnterpriseData\x12\x32\n\x0bgeneralData\x18\x05 \x01(\x0b\x32\x1d.Enterprise.GeneralDataEntity\"*\n\rBackupRequest\x12\x19\n\x11\x63ontinuationToken\x18\x01 \x01(\x0c\"\x98\x01\n\x0c\x42\x61\x63kupRecord\x12\x0e\n\x06userId\x18\x01 \x01(\x05\x12\x11\n\trecordUid\x18\x02 \x01(\x0c\x12\x0b\n\x03key\x18\x03 \x01(\x0c\x12*\n\x07keyType\x18\x04 \x01(\x0e\x32\x19.Enterprise.BackupKeyType\x12\x0f\n\x07version\x18\x05 \x01(\x05\x12\x0c\n\x04\x64\x61ta\x18\x06 \x01(\x0c\x12\r\n\x05\x65xtra\x18\x07 \x01(\x0c\".\n\tBackupKey\x12\x0e\n\x06userId\x18\x01 \x01(\x05\x12\x11\n\tbackupKey\x18\x02 \x01(\x0c\"\x8d\x02\n\nBackupUser\x12\x0e\n\x06userId\x18\x01 \x01(\x05\x12\x10\n\x08userName\x18\x02 \x01(\t\x12\x0f\n\x07\x64\x61taKey\x18\x03 \x01(\x0c\x12\x36\n\x0b\x64\x61taKeyType\x18\x04 \x01(\x0e\x32!.Enterprise.BackupUserDataKeyType\x12\x12\n\nprivateKey\x18\x05 \x01(\x0c\x12\x0f\n\x07treeKey\x18\x06 \x01(\x0c\x12.\n\x0btreeKeyType\x18\x07 \x01(\x0e\x32\x19.Enterprise.BackupKeyType\x12)\n\nbackupKeys\x18\x08 \x03(\x0b\x32\x15.Enterprise.BackupKey\x12\x14\n\x0cprivateECKey\x18\t \x01(\x0c\"\x9e\x01\n\x0e\x42\x61\x63kupResponse\x12\x1f\n\x17\x65nterpriseEccPrivateKey\x18\x01 \x01(\x0c\x12%\n\x05users\x18\x02 \x03(\x0b\x32\x16.Enterprise.BackupUser\x12)\n\x07records\x18\x03 \x03(\x0b\x32\x18.Enterprise.BackupRecord\x12\x19\n\x11\x63ontinuationToken\x18\x04 \x01(\x0c\"e\n\nBackupFile\x12\x0c\n\x04user\x18\x01 \x01(\t\x12\x11\n\tbackupUid\x18\x02 \x01(\x0c\x12\x10\n\x08\x66ileName\x18\x03 \x01(\t\x12\x0f\n\x07\x63reated\x18\x04 \x01(\x03\x12\x13\n\x0b\x64ownloadUrl\x18\x05 \x01(\t\"8\n\x0f\x42\x61\x63kupsResponse\x12%\n\x05\x66iles\x18\x01 \x03(\x0b\x32\x16.Enterprise.BackupFile\".\n\x1cGetEnterpriseDataKeysRequest\x12\x0e\n\x06roleId\x18\x01 \x03(\x03\"\xff\x01\n\x1dGetEnterpriseDataKeysResponse\x12:\n\x12reEncryptedRoleKey\x18\x01 \x03(\x0b\x32\x1e.Enterprise.ReEncryptedRoleKey\x12$\n\x07roleKey\x18\x02 \x03(\x0b\x32\x13.Enterprise.RoleKey\x12\"\n\x06mspKey\x18\x03 \x01(\x0b\x32\x12.Enterprise.MspKey\x12\x32\n\x0e\x65nterpriseKeys\x18\x04 \x01(\x0b\x32\x1a.Enterprise.EnterpriseKeys\x12$\n\x07treeKey\x18\x05 \x01(\x0b\x32\x13.Enterprise.TreeKey\"^\n\x07RoleKey\x12\x0e\n\x06roleId\x18\x01 \x01(\x03\x12\x14\n\x0c\x65ncryptedKey\x18\x02 \x01(\t\x12-\n\x07keyType\x18\x03 \x01(\x0e\x32\x1c.Enterprise.EncryptedKeyType\"d\n\x06MspKey\x12\x1b\n\x13\x65ncryptedMspTreeKey\x18\x01 \x01(\t\x12=\n\x17\x65ncryptedMspTreeKeyType\x18\x02 \x01(\x0e\x32\x1c.Enterprise.EncryptedKeyType\"|\n\x0e\x45nterpriseKeys\x12\x14\n\x0crsaPublicKey\x18\x01 \x01(\x0c\x12\x1e\n\x16rsaEncryptedPrivateKey\x18\x02 \x01(\x0c\x12\x14\n\x0c\x65\x63\x63PublicKey\x18\x03 \x01(\x0c\x12\x1e\n\x16\x65\x63\x63\x45ncryptedPrivateKey\x18\x04 \x01(\x0c\"H\n\x07TreeKey\x12\x0f\n\x07treeKey\x18\x01 \x01(\t\x12,\n\tkeyTypeId\x18\x02 \x01(\x0e\x32\x19.Enterprise.BackupKeyType\"E\n\x14SharedRecordResponse\x12-\n\x06\x65vents\x18\x01 \x03(\x0b\x32\x1d.Enterprise.SharedRecordEvent\"p\n\x11SharedRecordEvent\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x10\n\x08userName\x18\x02 \x01(\t\x12\x0f\n\x07\x63\x61nEdit\x18\x03 \x01(\x08\x12\x12\n\ncanReshare\x18\x04 \x01(\x08\x12\x11\n\tshareFrom\x18\x05 \x01(\x05\".\n\x1cSetRestrictVisibilityRequest\x12\x0e\n\x06nodeId\x18\x01 \x01(\x03\"\xd0\x01\n\x0eUserAddRequest\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x0e\n\x06nodeId\x18\x02 \x01(\x03\x12\x15\n\rencryptedData\x18\x03 \x01(\x0c\x12-\n\x07keyType\x18\x04 \x01(\x0e\x32\x1c.Enterprise.EncryptedKeyType\x12\x10\n\x08\x66ullName\x18\x05 \x01(\t\x12\x10\n\x08jobTitle\x18\x06 \x01(\t\x12\r\n\x05\x65mail\x18\x07 \x01(\t\x12\x1b\n\x13suppressEmailInvite\x18\x08 \x01(\x08\":\n\x11UserUpdateRequest\x12%\n\x05users\x18\x01 \x03(\x0b\x32\x16.Enterprise.UserUpdate\"\xaf\x01\n\nUserUpdate\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x0e\n\x06nodeId\x18\x02 \x01(\x03\x12\x15\n\rencryptedData\x18\x03 \x01(\x0c\x12-\n\x07keyType\x18\x04 \x01(\x0e\x32\x1c.Enterprise.EncryptedKeyType\x12\x10\n\x08\x66ullName\x18\x05 \x01(\t\x12\x10\n\x08jobTitle\x18\x06 \x01(\t\x12\r\n\x05\x65mail\x18\x07 \x01(\t\"A\n\x12UserUpdateResponse\x12+\n\x05users\x18\x01 \x03(\x0b\x32\x1c.Enterprise.UserUpdateResult\"Z\n\x10UserUpdateResult\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12,\n\x06status\x18\x02 \x01(\x0e\x32\x1c.Enterprise.UserUpdateStatus\"J\n\x1d\x43omplianceRecordOwnersRequest\x12\x0f\n\x07nodeIds\x18\x01 \x03(\x03\x12\x18\n\x10includeNonShared\x18\x02 \x01(\x08\"O\n\x1e\x43omplianceRecordOwnersResponse\x12-\n\x0crecordOwners\x18\x01 \x03(\x0b\x32\x17.Enterprise.RecordOwner\"7\n\x0bRecordOwner\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x0e\n\x06shared\x18\x02 \x01(\x08\"\xa6\x01\n PreliminaryComplianceDataRequest\x12\x19\n\x11\x65nterpriseUserIds\x18\x01 \x03(\x03\x12\x18\n\x10includeNonShared\x18\x02 \x01(\x08\x12\x19\n\x11\x63ontinuationToken\x18\x03 \x01(\x0c\x12\x32\n*includeTotalMatchingRecordsInFirstResponse\x18\x04 \x01(\x08\"\x9f\x01\n!PreliminaryComplianceDataResponse\x12\x30\n\rauditUserData\x18\x01 \x03(\x0b\x32\x19.Enterprise.AuditUserData\x12\x19\n\x11\x63ontinuationToken\x18\x02 \x01(\x0c\x12\x0f\n\x07hasMore\x18\x03 \x01(\x08\x12\x1c\n\x14totalMatchingRecords\x18\x04 \x01(\x05\"K\n\x0f\x41uditUserRecord\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x15\n\rencryptedData\x18\x02 \x01(\x0c\x12\x0e\n\x06shared\x18\x03 \x01(\x08\"\x8d\x01\n\rAuditUserData\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x35\n\x10\x61uditUserRecords\x18\x02 \x03(\x0b\x32\x1b.Enterprise.AuditUserRecord\x12+\n\x06status\x18\x03 \x01(\x0e\x32\x1b.Enterprise.AuditUserStatus\"\x7f\n\x17\x43omplianceReportFilters\x12\x14\n\x0crecordTitles\x18\x01 \x03(\t\x12\x12\n\nrecordUids\x18\x02 \x03(\x0c\x12\x11\n\tjobTitles\x18\x03 \x03(\x03\x12\x0c\n\x04urls\x18\x04 \x03(\t\x12\x19\n\x11\x65nterpriseUserIds\x18\x05 \x03(\x03\"\x7f\n\x17\x43omplianceReportRequest\x12<\n\x13\x63omplianceReportRun\x18\x01 \x01(\x0b\x32\x1f.Enterprise.ComplianceReportRun\x12\x12\n\nreportName\x18\x02 \x01(\t\x12\x12\n\nsaveReport\x18\x03 \x01(\x08\"\x85\x01\n\x13\x43omplianceReportRun\x12N\n\x17reportCriteriaAndFilter\x18\x01 \x01(\x0b\x32-.Enterprise.ComplianceReportCriteriaAndFilter\x12\r\n\x05users\x18\x02 \x03(\x03\x12\x0f\n\x07records\x18\x03 \x03(\x0c\"\xfc\x01\n!ComplianceReportCriteriaAndFilter\x12\x0e\n\x06nodeId\x18\x01 \x01(\x03\x12\x13\n\x0b\x63riteriaUid\x18\x02 \x01(\x0c\x12\x14\n\x0c\x63riteriaName\x18\x03 \x01(\t\x12\x36\n\x08\x63riteria\x18\x04 \x01(\x0b\x32$.Enterprise.ComplianceReportCriteria\x12\x33\n\x07\x66ilters\x18\x05 \x03(\x0b\x32\".Enterprise.ComplianceReportFilter\x12\x14\n\x0clastModified\x18\x06 \x01(\x03\x12\x19\n\x11nodeEncryptedData\x18\x07 \x01(\x0c\"b\n\x18\x43omplianceReportCriteria\x12\x11\n\tjobTitles\x18\x01 \x03(\t\x12\x19\n\x11\x65nterpriseUserIds\x18\x02 \x03(\x03\x12\x18\n\x10includeNonShared\x18\x03 \x01(\x08\"x\n\x16\x43omplianceReportFilter\x12\x14\n\x0crecordTitles\x18\x01 \x03(\t\x12\x12\n\nrecordUids\x18\x02 \x03(\x0c\x12\x11\n\tjobTitles\x18\x03 \x03(\t\x12\x0c\n\x04urls\x18\x04 \x03(\t\x12\x13\n\x0brecordTypes\x18\x05 \x03(\t\"\xa1\x05\n\x18\x43omplianceReportResponse\x12\x15\n\rdateGenerated\x18\x01 \x01(\x03\x12\x15\n\rrunByUserName\x18\x02 \x01(\t\x12\x12\n\nreportName\x18\x03 \x01(\t\x12\x11\n\treportUid\x18\x04 \x01(\x0c\x12<\n\x13\x63omplianceReportRun\x18\x05 \x01(\x0b\x32\x1f.Enterprise.ComplianceReportRun\x12-\n\x0cuserProfiles\x18\x06 \x03(\x0b\x32\x17.Enterprise.UserProfile\x12)\n\nauditTeams\x18\x07 \x03(\x0b\x32\x15.Enterprise.AuditTeam\x12-\n\x0c\x61uditRecords\x18\x08 \x03(\x0b\x32\x17.Enterprise.AuditRecord\x12+\n\x0buserRecords\x18\t \x03(\x0b\x32\x16.Enterprise.UserRecord\x12;\n\x13sharedFolderRecords\x18\n \x03(\x0b\x32\x1e.Enterprise.SharedFolderRecord\x12\x37\n\x11sharedFolderUsers\x18\x0b \x03(\x0b\x32\x1c.Enterprise.SharedFolderUser\x12\x37\n\x11sharedFolderTeams\x18\x0c \x03(\x0b\x32\x1c.Enterprise.SharedFolderTeam\x12\x31\n\x0e\x61uditTeamUsers\x18\r \x03(\x0b\x32\x19.Enterprise.AuditTeamUser\x12)\n\nauditRoles\x18\x0e \x03(\x0b\x32\x15.Enterprise.AuditRole\x12/\n\rlinkedRecords\x18\x0f \x03(\x0b\x32\x18.Enterprise.LinkedRecord\"\x81\x01\n\x0b\x41uditRecord\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x11\n\tauditData\x18\x02 \x01(\x0c\x12\x16\n\x0ehasAttachments\x18\x03 \x01(\x08\x12\x0f\n\x07inTrash\x18\x04 \x01(\x08\x12\x10\n\x08treeLeft\x18\x05 \x01(\x05\x12\x11\n\ttreeRight\x18\x06 \x01(\x05\"\x80\x02\n\tAuditRole\x12\x0e\n\x06roleId\x18\x01 \x01(\x03\x12\x15\n\rencryptedData\x18\x02 \x01(\x0c\x12&\n\x1erestrictShareOutsideEnterprise\x18\x03 \x01(\x08\x12\x18\n\x10restrictShareAll\x18\x04 \x01(\x08\x12\"\n\x1arestrictShareOfAttachments\x18\x05 \x01(\x08\x12)\n!restrictMaskPasswordsWhileEditing\x18\x06 \x01(\x08\x12;\n\x13roleNodeManagements\x18\x07 \x03(\x0b\x32\x1e.Enterprise.RoleNodeManagement\"^\n\x12RoleNodeManagement\x12\x10\n\x08treeLeft\x18\x01 \x01(\x05\x12\x11\n\ttreeRight\x18\x02 \x01(\x05\x12\x0f\n\x07\x63\x61scade\x18\x03 \x01(\x08\x12\x12\n\nprivileges\x18\x04 \x01(\x05\"k\n\x0bUserProfile\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x10\n\x08\x66ullName\x18\x02 \x01(\t\x12\x10\n\x08jobTitle\x18\x03 \x01(\t\x12\r\n\x05\x65mail\x18\x04 \x01(\t\x12\x0f\n\x07roleIds\x18\x05 \x03(\x03\"=\n\x10RecordPermission\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x16\n\x0epermissionBits\x18\x02 \x01(\x05\"_\n\nUserRecord\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x37\n\x11recordPermissions\x18\x02 \x03(\x0b\x32\x1c.Enterprise.RecordPermission\"[\n\tAuditTeam\x12\x0f\n\x07teamUid\x18\x01 \x01(\x0c\x12\x10\n\x08teamName\x18\x02 \x01(\t\x12\x14\n\x0crestrictEdit\x18\x03 \x01(\x08\x12\x15\n\rrestrictShare\x18\x04 \x01(\x08\";\n\rAuditTeamUser\x12\x0f\n\x07teamUid\x18\x01 \x01(\x0c\x12\x19\n\x11\x65nterpriseUserIds\x18\x02 \x03(\x03\"\x9f\x01\n\x12SharedFolderRecord\x12\x17\n\x0fsharedFolderUid\x18\x01 \x01(\x0c\x12\x37\n\x11recordPermissions\x18\x02 \x03(\x0b\x32\x1c.Enterprise.RecordPermission\x12\x37\n\x11shareAdminRecords\x18\x03 \x03(\x0b\x32\x1c.Enterprise.ShareAdminRecord\"M\n\x10ShareAdminRecord\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x1f\n\x17recordPermissionIndexes\x18\x02 \x03(\x05\"F\n\x10SharedFolderUser\x12\x17\n\x0fsharedFolderUid\x18\x01 \x01(\x0c\x12\x19\n\x11\x65nterpriseUserIds\x18\x02 \x03(\x03\"=\n\x10SharedFolderTeam\x12\x17\n\x0fsharedFolderUid\x18\x01 \x01(\x0c\x12\x10\n\x08teamUids\x18\x02 \x03(\x0c\"/\n\x1aGetComplianceReportRequest\x12\x11\n\treportUid\x18\x01 \x01(\x0c\"2\n\x1bGetComplianceReportResponse\x12\x13\n\x0b\x64ownloadUrl\x18\x01 \x01(\t\"6\n\x1f\x43omplianceReportCriteriaRequest\x12\x13\n\x0b\x63riteriaUid\x18\x01 \x01(\x0c\";\n$SaveComplianceReportCriteriaResponse\x12\x13\n\x0b\x63riteriaUid\x18\x01 \x01(\x0c\"4\n\x0cLinkedRecord\x12\x10\n\x08ownerUid\x18\x01 \x01(\x0c\x12\x12\n\nrecordUids\x18\x02 \x03(\x0c\"W\n\x17GetSharingAdminsRequest\x12\x17\n\x0fsharedFolderUid\x18\x01 \x01(\x0c\x12\x11\n\trecordUid\x18\x02 \x01(\x0c\x12\x10\n\x08username\x18\x03 \x01(\t\"\xe0\x01\n\x0eUserProfileExt\x12\r\n\x05\x65mail\x18\x01 \x01(\t\x12\x10\n\x08\x66ullName\x18\x02 \x01(\t\x12\x10\n\x08jobTitle\x18\x03 \x01(\t\x12\x14\n\x0cisMSPMCAdmin\x18\x04 \x01(\x08\x12\x18\n\x10isInSharedFolder\x18\x05 \x01(\x08\x12&\n\x1eisShareAdminForRequestedObject\x18\x06 \x01(\x08\x12(\n isShareAdminForSharedFolderOwner\x18\x07 \x01(\x08\x12\x19\n\x11hasAccessToObject\x18\x08 \x01(\x08\"O\n\x18GetSharingAdminsResponse\x12\x33\n\x0fuserProfileExts\x18\x01 \x03(\x0b\x32\x1a.Enterprise.UserProfileExt\"_\n\x1eTeamsEnterpriseUsersAddRequest\x12=\n\x05teams\x18\x01 \x03(\x0b\x32..Enterprise.TeamsEnterpriseUsersAddTeamRequest\"t\n\"TeamsEnterpriseUsersAddTeamRequest\x12\x0f\n\x07teamUid\x18\x01 \x01(\x0c\x12=\n\x05users\x18\x02 \x03(\x0b\x32..Enterprise.TeamsEnterpriseUsersAddUserRequest\"\xab\x01\n\"TeamsEnterpriseUsersAddUserRequest\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12*\n\x08userType\x18\x02 \x01(\x0e\x32\x18.Enterprise.TeamUserType\x12\x13\n\x07teamKey\x18\x03 \x01(\tB\x02\x18\x01\x12*\n\x0ctypedTeamKey\x18\x04 \x01(\x0b\x32\x14.Enterprise.TypedKey\"F\n\x08TypedKey\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12-\n\x07keyType\x18\x02 \x01(\x0e\x32\x1c.Enterprise.EncryptedKeyType\"s\n\x1fTeamsEnterpriseUsersAddResponse\x12>\n\x05teams\x18\x01 \x03(\x0b\x32/.Enterprise.TeamsEnterpriseUsersAddTeamResponse\x12\x10\n\x08revision\x18\x02 \x01(\x03\"\xc4\x01\n#TeamsEnterpriseUsersAddTeamResponse\x12\x0f\n\x07teamUid\x18\x01 \x01(\x0c\x12>\n\x05users\x18\x02 \x03(\x0b\x32/.Enterprise.TeamsEnterpriseUsersAddUserResponse\x12\x0f\n\x07success\x18\x03 \x01(\x08\x12\x0f\n\x07message\x18\x04 \x01(\t\x12\x12\n\nresultCode\x18\x05 \x01(\t\x12\x16\n\x0e\x61\x64\x64itionalInfo\x18\x06 \x01(\t\"\x9f\x01\n#TeamsEnterpriseUsersAddUserResponse\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x10\n\x08revision\x18\x02 \x01(\x03\x12\x0f\n\x07success\x18\x03 \x01(\x08\x12\x0f\n\x07message\x18\x04 \x01(\t\x12\x12\n\nresultCode\x18\x05 \x01(\t\x12\x16\n\x0e\x61\x64\x64itionalInfo\x18\x06 \x01(\t\"E\n\x18TeamEnterpriseUserRemove\x12\x0f\n\x07teamUid\x18\x01 \x01(\x0c\x12\x18\n\x10\x65nterpriseUserId\x18\x02 \x01(\x03\"j\n TeamEnterpriseUserRemovesRequest\x12\x46\n\x18teamEnterpriseUserRemove\x18\x01 \x03(\x0b\x32$.Enterprise.TeamEnterpriseUserRemove\"{\n!TeamEnterpriseUserRemovesResponse\x12V\n teamEnterpriseUserRemoveResponse\x18\x01 \x03(\x0b\x32,.Enterprise.TeamEnterpriseUserRemoveResponse\"\xb8\x01\n TeamEnterpriseUserRemoveResponse\x12\x46\n\x18teamEnterpriseUserRemove\x18\x01 \x01(\x0b\x32$.Enterprise.TeamEnterpriseUserRemove\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x12\n\nresultCode\x18\x03 \x01(\t\x12\x0f\n\x07message\x18\x04 \x01(\t\x12\x16\n\x0e\x61\x64\x64itionalInfo\x18\x05 \x01(\t\"M\n\x0b\x44omainAlias\x12\x0e\n\x06\x64omain\x18\x01 \x01(\t\x12\r\n\x05\x61lias\x18\x02 \x01(\t\x12\x0e\n\x06status\x18\x03 \x01(\x05\x12\x0f\n\x07message\x18\x04 \x01(\t\"B\n\x12\x44omainAliasRequest\x12,\n\x0b\x64omainAlias\x18\x01 \x03(\x0b\x32\x17.Enterprise.DomainAlias\"C\n\x13\x44omainAliasResponse\x12,\n\x0b\x64omainAlias\x18\x01 \x03(\x0b\x32\x17.Enterprise.DomainAlias\"m\n\x1f\x45nterpriseUsersProvisionRequest\x12\x33\n\x05users\x18\x01 \x03(\x0b\x32$.Enterprise.EnterpriseUsersProvision\x12\x15\n\rclientVersion\x18\x02 \x01(\t\"\xb6\x03\n\x18\x45nterpriseUsersProvision\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x10\n\x08username\x18\x02 \x01(\t\x12\x0e\n\x06nodeId\x18\x03 \x01(\x03\x12\x15\n\rencryptedData\x18\x04 \x01(\t\x12-\n\x07keyType\x18\x05 \x01(\x0e\x32\x1c.Enterprise.EncryptedKeyType\x12\x10\n\x08\x66ullName\x18\x06 \x01(\t\x12\x10\n\x08jobTitle\x18\x07 \x01(\t\x12\x1e\n\x16\x65nterpriseUsersDataKey\x18\x08 \x01(\x0c\x12\x14\n\x0c\x61uthVerifier\x18\t \x01(\x0c\x12\x18\n\x10\x65ncryptionParams\x18\n \x01(\x0c\x12\x14\n\x0crsaPublicKey\x18\x0b \x01(\x0c\x12\x1e\n\x16rsaEncryptedPrivateKey\x18\x0c \x01(\x0c\x12\x14\n\x0c\x65\x63\x63PublicKey\x18\r \x01(\x0c\x12\x1e\n\x16\x65\x63\x63\x45ncryptedPrivateKey\x18\x0e \x01(\x0c\x12\x1c\n\x14\x65ncryptedDeviceToken\x18\x0f \x01(\x0c\x12\x1a\n\x12\x65ncryptedClientKey\x18\x10 \x01(\x0c\"_\n EnterpriseUsersProvisionResponse\x12;\n\x07results\x18\x01 \x03(\x0b\x32*.Enterprise.EnterpriseUsersProvisionResult\"q\n\x1e\x45nterpriseUsersProvisionResult\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x0c\n\x04\x63ode\x18\x02 \x01(\t\x12\x0f\n\x07message\x18\x03 \x01(\t\x12\x16\n\x0e\x61\x64\x64itionalInfo\x18\x04 \x01(\t\"a\n\x19\x45nterpriseUsersAddRequest\x12-\n\x05users\x18\x01 \x03(\x0b\x32\x1e.Enterprise.EnterpriseUsersAdd\x12\x15\n\rclientVersion\x18\x02 \x01(\t\"\x8c\x02\n\x12\x45nterpriseUsersAdd\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x10\n\x08username\x18\x02 \x01(\t\x12\x0e\n\x06nodeId\x18\x03 \x01(\x03\x12\x15\n\rencryptedData\x18\x04 \x01(\t\x12-\n\x07keyType\x18\x05 \x01(\x0e\x32\x1c.Enterprise.EncryptedKeyType\x12\x10\n\x08\x66ullName\x18\x06 \x01(\t\x12\x10\n\x08jobTitle\x18\x07 \x01(\t\x12\x1b\n\x13suppressEmailInvite\x18\x08 \x01(\x08\x12\x15\n\rinviteeLocale\x18\t \x01(\t\x12\x0c\n\x04move\x18\n \x01(\x08\x12\x0e\n\x06roleId\x18\x0b \x01(\x03\"\x9b\x01\n\x1a\x45nterpriseUsersAddResponse\x12\x35\n\x07results\x18\x01 \x03(\x0b\x32$.Enterprise.EnterpriseUsersAddResult\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x0c\n\x04\x63ode\x18\x03 \x01(\t\x12\x0f\n\x07message\x18\x04 \x01(\t\x12\x16\n\x0e\x61\x64\x64itionalInfo\x18\x05 \x01(\t\"\x96\x01\n\x18\x45nterpriseUsersAddResult\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x18\n\x10verificationCode\x18\x03 \x01(\t\x12\x0c\n\x04\x63ode\x18\x04 \x01(\t\x12\x0f\n\x07message\x18\x05 \x01(\t\x12\x16\n\x0e\x61\x64\x64itionalInfo\x18\x06 \x01(\t\"\xb9\x01\n\x17UpdateMSPPermitsRequest\x12\x17\n\x0fmspEnterpriseId\x18\x01 \x01(\x05\x12\x1a\n\x12maxAllowedLicenses\x18\x02 \x01(\x05\x12\x19\n\x11\x61llowedMcProducts\x18\x03 \x03(\t\x12\x15\n\rallowedAddOns\x18\x04 \x03(\t\x12\x17\n\x0fmaxFilePlanType\x18\x05 \x01(\t\x12\x1e\n\x16\x61llowUnlimitedLicenses\x18\x06 \x01(\x08\"9\n\x1c\x44\x65leteEnterpriseUsersRequest\x12\x19\n\x11\x65nterpriseUserIds\x18\x01 \x03(\x03\"o\n\x1a\x44\x65leteEnterpriseUserStatus\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x37\n\x06status\x18\x02 \x01(\x0e\x32\'.Enterprise.DeleteEnterpriseUsersResult\"]\n\x1d\x44\x65leteEnterpriseUsersResponse\x12<\n\x0c\x64\x65leteStatus\x18\x01 \x03(\x0b\x32&.Enterprise.DeleteEnterpriseUserStatus\"w\n\x18\x43learSecurityDataRequest\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x03(\x03\x12\x10\n\x08\x61llUsers\x18\x02 \x01(\x08\x12/\n\x04type\x18\x03 \x01(\x0e\x32!.Enterprise.ClearSecurityDataType\"%\n\x13ListDomainsResponse\x12\x0e\n\x06\x64omain\x18\x01 \x03(\t\"d\n\x14ReserveDomainRequest\x12<\n\x13reserveDomainAction\x18\x01 \x01(\x0e\x32\x1f.Enterprise.ReserveDomainAction\x12\x0e\n\x06\x64omain\x18\x02 \x01(\t\"&\n\x15ReserveDomainResponse\x12\r\n\x05token\x18\x01 \x01(\t*\x1b\n\x07KeyType\x12\x07\n\x03RSA\x10\x00\x12\x07\n\x03\x45\x43\x43\x10\x01*\x9a\x02\n\x14RoleUserModifyStatus\x12\x0f\n\x0bROLE_EXISTS\x10\x00\x12\x14\n\x10MISSING_TREE_KEY\x10\x01\x12\x14\n\x10MISSING_ROLE_KEY\x10\x02\x12\x1e\n\x1aINVALID_ENTERPRISE_USER_ID\x10\x03\x12\x1b\n\x17PENDING_ENTERPRISE_USER\x10\x04\x12\x13\n\x0fINVALID_NODE_ID\x10\x05\x12!\n\x1dMAY_NOT_REMOVE_SELF_FROM_ROLE\x10\x06\x12\x1c\n\x18MUST_HAVE_ONE_USER_ADMIN\x10\x07\x12\x13\n\x0fINVALID_ROLE_ID\x10\x08\x12\x1d\n\x19PAM_LICENSE_SEAT_EXCEEDED\x10\t*=\n\x0e\x45nterpriseType\x12\x17\n\x13\x45NTERPRISE_STANDARD\x10\x00\x12\x12\n\x0e\x45NTERPRISE_MSP\x10\x01*s\n\x18TransferAcceptanceStatus\x12\r\n\tUNDEFINED\x10\x00\x12\x10\n\x0cNOT_REQUIRED\x10\x01\x12\x10\n\x0cNOT_ACCEPTED\x10\x02\x12\x16\n\x12PARTIALLY_ACCEPTED\x10\x03\x12\x0c\n\x08\x41\x43\x43\x45PTED\x10\x04*\xe1\x03\n\x14\x45nterpriseDataEntity\x12\x0b\n\x07UNKNOWN\x10\x00\x12\t\n\x05NODES\x10\x01\x12\t\n\x05ROLES\x10\x02\x12\t\n\x05USERS\x10\x03\x12\t\n\x05TEAMS\x10\x04\x12\x0e\n\nTEAM_USERS\x10\x05\x12\x0e\n\nROLE_USERS\x10\x06\x12\x13\n\x0fROLE_PRIVILEGES\x10\x07\x12\x15\n\x11ROLE_ENFORCEMENTS\x10\x08\x12\x0e\n\nROLE_TEAMS\x10\t\x12\x0c\n\x08LICENSES\x10\n\x12\x11\n\rMANAGED_NODES\x10\x0b\x12\x15\n\x11MANAGED_COMPANIES\x10\x0c\x12\x0b\n\x07\x42RIDGES\x10\r\x12\t\n\x05SCIMS\x10\x0e\x12\x13\n\x0f\x45MAIL_PROVISION\x10\x0f\x12\x10\n\x0cQUEUED_TEAMS\x10\x10\x12\x15\n\x11QUEUED_TEAM_USERS\x10\x11\x12\x10\n\x0cSSO_SERVICES\x10\x12\x12\x17\n\x13REPORT_FILTER_USERS\x10\x13\x12&\n\"DEVICES_REQUEST_FOR_ADMIN_APPROVAL\x10\x14\x12\x10\n\x0cUSER_ALIASES\x10\x15\x12)\n%COMPLIANCE_REPORT_CRITERIA_AND_FILTER\x10\x16\x12\x16\n\x12\x43OMPLIANCE_REPORTS\x10\x17*\"\n\x0b\x43\x61\x63heStatus\x12\x08\n\x04KEEP\x10\x00\x12\t\n\x05\x43LEAR\x10\x01*\x93\x01\n\rBackupKeyType\x12\n\n\x06NO_KEY\x10\x00\x12\x19\n\x15\x45NCRYPTED_BY_DATA_KEY\x10\x01\x12\x1b\n\x17\x45NCRYPTED_BY_PUBLIC_KEY\x10\x02\x12\x1d\n\x19\x45NCRYPTED_BY_DATA_KEY_GCM\x10\x03\x12\x1f\n\x1b\x45NCRYPTED_BY_PUBLIC_KEY_ECC\x10\x04*:\n\x15\x42\x61\x63kupUserDataKeyType\x12\x07\n\x03OWN\x10\x00\x12\x18\n\x14SHARED_TO_ENTERPRISE\x10\x01*\xa5\x01\n\x10\x45ncryptedKeyType\x12\r\n\tKT_NO_KEY\x10\x00\x12\x1c\n\x18KT_ENCRYPTED_BY_DATA_KEY\x10\x01\x12\x1e\n\x1aKT_ENCRYPTED_BY_PUBLIC_KEY\x10\x02\x12 \n\x1cKT_ENCRYPTED_BY_DATA_KEY_GCM\x10\x03\x12\"\n\x1eKT_ENCRYPTED_BY_PUBLIC_KEY_ECC\x10\x04*\x8e\x02\n\x12\x45nterpriseFlagType\x12\x0b\n\x07INVALID\x10\x00\x12\x1a\n\x16\x41LLOW_PERSONAL_LICENSE\x10\x01\x12\x18\n\x14SPECIAL_PROVISIONING\x10\x02\x12\x10\n\x0cRECORD_TYPES\x10\x03\x12\x13\n\x0fSECRETS_MANAGER\x10\x04\x12\x15\n\x11\x45NTERPRISE_LOCKED\x10\x05\x12\x15\n\x11\x46ORBID_KEY_TYPE_2\x10\x06\x12\x15\n\x11\x43ONSOLE_ONBOARDED\x10\x07\x12\x1b\n\x17\x46ORBID_ACCOUNT_TRANSFER\x10\x08\x12\x15\n\x11NPS_POPUP_OPT_OUT\x10\t\x12\x15\n\x11SHOW_USER_ONBOARD\x10\n*E\n\x10UserUpdateStatus\x12\x12\n\x0eUSER_UPDATE_OK\x10\x00\x12\x1d\n\x19USER_UPDATE_ACCESS_DENIED\x10\x01*I\n\x0f\x41uditUserStatus\x12\x06\n\x02OK\x10\x00\x12\x11\n\rACCESS_DENIED\x10\x01\x12\x1b\n\x17NO_LONGER_IN_ENTERPRISE\x10\x02*3\n\x0cTeamUserType\x12\x08\n\x04USER\x10\x00\x12\t\n\x05\x41\x44MIN\x10\x01\x12\x0e\n\nADMIN_ONLY\x10\x02*x\n\rAppClientType\x12\x0c\n\x08NOT_USED\x10\x00\x12\x0b\n\x07GENERAL\x10\x01\x12%\n!DISCOVERY_AND_ROTATION_CONTROLLER\x10\x02\x12\x12\n\x0eKCM_CONTROLLER\x10\x03\x12\x11\n\rSELF_DESTRUCT\x10\x04*\x8f\x01\n\x1b\x44\x65leteEnterpriseUsersResult\x12\x0b\n\x07SUCCESS\x10\x00\x12\x1a\n\x16NOT_AN_ENTERPRISE_USER\x10\x01\x12\x16\n\x12\x43\x41NNOT_DELETE_SELF\x10\x02\x12$\n BRIDGE_CANNOT_DELETE_ACTIVE_USER\x10\x03\x12\t\n\x05\x45RROR\x10\x04*\x87\x01\n\x15\x43learSecurityDataType\x12\x1e\n\x1aRECALCULATE_SUMMARY_REPORT\x10\x00\x12\'\n#FORCE_CLIENT_CHECK_FOR_MISSING_DATA\x10\x01\x12%\n!FORCE_CLIENT_RESEND_SECURITY_DATA\x10\x02*J\n\x13ReserveDomainAction\x12\x10\n\x0c\x44OMAIN_TOKEN\x10\x00\x12\x0e\n\nDOMAIN_ADD\x10\x01\x12\x11\n\rDOMAIN_DELETE\x10\x02\x42&\n\x18\x63om.keepersecurity.protoB\nEnterpriseb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x10\x65nterprise.proto\x12\nEnterprise\"\x84\x01\n\x18\x45nterpriseKeyPairRequest\x12\x1b\n\x13\x65nterprisePublicKey\x18\x01 \x01(\x0c\x12%\n\x1d\x65ncryptedEnterprisePrivateKey\x18\x02 \x01(\x0c\x12$\n\x07keyType\x18\x03 \x01(\x0e\x32\x13.Enterprise.KeyType\"\'\n\x14GetTeamMemberRequest\x12\x0f\n\x07teamUid\x18\x01 \x01(\x0c\"}\n\x0e\x45nterpriseUser\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\r\n\x05\x65mail\x18\x02 \x01(\t\x12\x1a\n\x12\x65nterpriseUsername\x18\x03 \x01(\t\x12\x14\n\x0cisShareAdmin\x18\x04 \x01(\x08\x12\x10\n\x08username\x18\x05 \x01(\t\"K\n\x15GetTeamMemberResponse\x12\x32\n\x0e\x65nterpriseUser\x18\x01 \x03(\x0b\x32\x1a.Enterprise.EnterpriseUser\"-\n\x11\x45nterpriseUserIds\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x03(\x03\"B\n\x19\x45nterprisePersonalAccount\x12\r\n\x05\x65mail\x18\x01 \x01(\t\x12\x16\n\x0eOBSOLETE_FIELD\x18\x02 \x01(\x0c\"S\n\x17\x45ncryptedTeamKeyRequest\x12\x0f\n\x07teamUid\x18\x01 \x01(\x0c\x12\x18\n\x10\x65ncryptedTeamKey\x18\x02 \x01(\x0c\x12\r\n\x05\x66orce\x18\x03 \x01(\x08\"+\n\x0fReEncryptedData\x12\n\n\x02id\x18\x01 \x01(\x03\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\t\"?\n\x12ReEncryptedRoleKey\x12\x0f\n\x07role_id\x18\x01 \x01(\x03\x12\x18\n\x10\x65ncryptedRoleKey\x18\x02 \x01(\x0c\"P\n\x16ReEncryptedUserDataKey\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x1c\n\x14userEncryptedDataKey\x18\x02 \x01(\x0c\"\xd8\x02\n\x1bNodeToManagedCompanyRequest\x12\x11\n\tcompanyId\x18\x01 \x01(\x05\x12*\n\x05nodes\x18\x02 \x03(\x0b\x32\x1b.Enterprise.ReEncryptedData\x12*\n\x05roles\x18\x03 \x03(\x0b\x32\x1b.Enterprise.ReEncryptedData\x12*\n\x05users\x18\x04 \x03(\x0b\x32\x1b.Enterprise.ReEncryptedData\x12\x30\n\x08roleKeys\x18\x05 \x03(\x0b\x32\x1e.Enterprise.ReEncryptedRoleKey\x12\x35\n\x08teamKeys\x18\x06 \x03(\x0b\x32#.Enterprise.EncryptedTeamKeyRequest\x12\x39\n\rusersDataKeys\x18\x07 \x03(\x0b\x32\".Enterprise.ReEncryptedUserDataKey\",\n\x08RoleTeam\x12\x0f\n\x07role_id\x18\x01 \x01(\x03\x12\x0f\n\x07teamUid\x18\x02 \x01(\x0c\"4\n\tRoleTeams\x12\'\n\trole_team\x18\x01 \x03(\x0b\x32\x14.Enterprise.RoleTeam\"/\n\x0bTeamsByRole\x12\x0f\n\x07role_id\x18\x01 \x01(\x03\x12\x0f\n\x07teamUid\x18\x02 \x03(\x0c\"<\n\x12ManagedNodesByRole\x12\x0f\n\x07role_id\x18\x01 \x01(\x03\x12\x15\n\rmanagedNodeId\x18\x02 \x03(\x03\"R\n\x0fRoleUserAddKeys\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x0f\n\x07treeKey\x18\x02 \x01(\t\x12\x14\n\x0croleAdminKey\x18\x03 \x01(\t\"T\n\x0bRoleUserAdd\x12\x0f\n\x07role_id\x18\x01 \x01(\x03\x12\x34\n\x0froleUserAddKeys\x18\x02 \x03(\x0b\x32\x1b.Enterprise.RoleUserAddKeys\"D\n\x13RoleUsersAddRequest\x12-\n\x0croleUserAdds\x18\x01 \x03(\x0b\x32\x17.Enterprise.RoleUserAdd\"\x80\x01\n\x11RoleUserAddResult\x12\x0e\n\x06roleId\x18\x01 \x01(\x03\x12\x18\n\x10\x65nterpriseUserId\x18\x02 \x01(\x03\x12\x30\n\x06status\x18\x03 \x01(\x0e\x32 .Enterprise.RoleUserModifyStatus\x12\x0f\n\x07message\x18\x04 \x01(\t\"F\n\x14RoleUsersAddResponse\x12.\n\x07results\x18\x01 \x03(\x0b\x32\x1d.Enterprise.RoleUserAddResult\"<\n\x0eRoleUserRemove\x12\x0f\n\x07role_id\x18\x01 \x01(\x03\x12\x19\n\x11\x65nterpriseUserIds\x18\x02 \x03(\x03\"M\n\x16RoleUsersRemoveRequest\x12\x33\n\x0froleUserRemoves\x18\x01 \x03(\x0b\x32\x1a.Enterprise.RoleUserRemove\"\x83\x01\n\x14RoleUserRemoveResult\x12\x0e\n\x06roleId\x18\x01 \x01(\x03\x12\x18\n\x10\x65nterpriseUserId\x18\x02 \x01(\x03\x12\x30\n\x06status\x18\x03 \x01(\x0e\x32 .Enterprise.RoleUserModifyStatus\x12\x0f\n\x07message\x18\x04 \x01(\t\"L\n\x17RoleUsersRemoveResponse\x12\x31\n\x07results\x18\x01 \x03(\x0b\x32 .Enterprise.RoleUserRemoveResult\"\xa0\x04\n\x16\x45nterpriseRegistration\x12\x18\n\x10\x65ncryptedTreeKey\x18\x01 \x01(\x0c\x12\x16\n\x0e\x65nterpriseName\x18\x02 \x01(\t\x12\x14\n\x0crootNodeData\x18\x03 \x01(\x0c\x12\x15\n\radminUserData\x18\x04 \x01(\x0c\x12\x11\n\tadminName\x18\x05 \x01(\t\x12\x10\n\x08roleData\x18\x06 \x01(\x0c\x12\x38\n\nrsaKeyPair\x18\x07 \x01(\x0b\x32$.Enterprise.EnterpriseKeyPairRequest\x12\x13\n\x0bnumberSeats\x18\x08 \x01(\x05\x12\x32\n\x0e\x65nterpriseType\x18\t \x01(\x0e\x32\x1a.Enterprise.EnterpriseType\x12\x15\n\rrolePublicKey\x18\n \x01(\x0c\x12*\n\"rolePrivateKeyEncryptedWithRoleKey\x18\x0b \x01(\x0c\x12#\n\x1broleKeyEncryptedWithTreeKey\x18\x0c \x01(\x0c\x12\x38\n\neccKeyPair\x18\r \x01(\x0b\x32$.Enterprise.EnterpriseKeyPairRequest\x12\x18\n\x10\x61llUsersRoleData\x18\x0e \x01(\x0c\x12)\n!roleKeyEncryptedWithUserPublicKey\x18\x0f \x01(\x0c\x12\x18\n\x10\x61pproverRoleData\x18\x10 \x01(\x0c\"H\n\x1a\x44omainPasswordRulesRequest\x12\x10\n\x08username\x18\x01 \x01(\t\x12\x18\n\x10verificationCode\x18\x02 \x01(\t\"\\\n\x19\x44omainPasswordRulesFields\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\x0f\n\x07minimum\x18\x02 \x01(\x05\x12\x0f\n\x07maximum\x18\x03 \x01(\x05\x12\x0f\n\x07\x61llowed\x18\x04 \x01(\x08\"E\n\x10LoginToMcRequest\x12\x16\n\x0emcEnterpriseId\x18\x01 \x01(\x05\x12\x19\n\x11messageSessionUid\x18\x02 \x01(\x0c\"d\n\x11LoginToMcResponse\x12\x1d\n\x15\x65ncryptedSessionToken\x18\x01 \x01(\x0c\x12\x18\n\x10\x65ncryptedTreeKey\x18\x02 \x01(\t\x12\x16\n\x0e\x66orbidKeyType2\x18\x04 \x01(\x08\"g\n\x1b\x44omainPasswordRulesResponse\x12H\n\x19\x64omainPasswordRulesFields\x18\x01 \x03(\x0b\x32%.Enterprise.DomainPasswordRulesFields\"\x88\x01\n\x18\x41pproveUserDeviceRequest\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x1c\n\x14\x65ncryptedDeviceToken\x18\x02 \x01(\x0c\x12\x1e\n\x16\x65ncryptedDeviceDataKey\x18\x03 \x01(\x0c\x12\x14\n\x0c\x64\x65nyApproval\x18\x04 \x01(\x08\"t\n\x19\x41pproveUserDeviceResponse\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x1c\n\x14\x65ncryptedDeviceToken\x18\x02 \x01(\x0c\x12\x0e\n\x06\x66\x61iled\x18\x03 \x01(\x08\x12\x0f\n\x07message\x18\x04 \x01(\t\"Y\n\x19\x41pproveUserDevicesRequest\x12<\n\x0e\x64\x65viceRequests\x18\x01 \x03(\x0b\x32$.Enterprise.ApproveUserDeviceRequest\"\\\n\x1a\x41pproveUserDevicesResponse\x12>\n\x0f\x64\x65viceResponses\x18\x01 \x03(\x0b\x32%.Enterprise.ApproveUserDeviceResponse\"\x87\x01\n\x15\x45nterpriseUserDataKey\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x1c\n\x14userEncryptedDataKey\x18\x02 \x01(\x0c\x12\x11\n\tkeyTypeId\x18\x03 \x01(\x05\x12\x0f\n\x07roleKey\x18\x04 \x01(\x0c\x12\x12\n\nprivateKey\x18\x05 \x01(\x0c\"I\n\x16\x45nterpriseUserDataKeys\x12/\n\x04keys\x18\x01 \x03(\x0b\x32!.Enterprise.EnterpriseUserDataKey\"g\n\x1a\x45nterpriseUserDataKeyLight\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x1c\n\x14userEncryptedDataKey\x18\x02 \x01(\x0c\x12\x11\n\tkeyTypeId\x18\x03 \x01(\x05\"d\n\x1c\x45nterpriseUserDataKeysByNode\x12\x0e\n\x06nodeId\x18\x01 \x01(\x03\x12\x34\n\x04keys\x18\x02 \x03(\x0b\x32&.Enterprise.EnterpriseUserDataKeyLight\"^\n$EnterpriseUserDataKeysByNodeResponse\x12\x36\n\x04keys\x18\x01 \x03(\x0b\x32(.Enterprise.EnterpriseUserDataKeysByNode\"2\n\x15\x45nterpriseDataRequest\x12\x19\n\x11\x63ontinuationToken\x18\x01 \x01(\x0c\"0\n\x13SpecialProvisioning\x12\x0b\n\x03url\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\"\x84\x02\n\x11GeneralDataEntity\x12\x16\n\x0e\x65nterpriseName\x18\x01 \x01(\t\x12\x1a\n\x12restrictVisibility\x18\x02 \x01(\x08\x12<\n\x13specialProvisioning\x18\x04 \x01(\x0b\x32\x1f.Enterprise.SpecialProvisioning\x12\x30\n\ruserPrivilege\x18\x07 \x01(\x0b\x32\x19.Enterprise.UserPrivilege\x12\x13\n\x0b\x64istributor\x18\x08 \x01(\x08\x12\x1d\n\x15\x66orbidAccountTransfer\x18\t \x01(\x08\x12\x17\n\x0fshowUserOnboard\x18\n \x01(\x08\"\xfd\x01\n\x04Node\x12\x0e\n\x06nodeId\x18\x01 \x01(\x03\x12\x10\n\x08parentId\x18\x02 \x01(\x03\x12\x10\n\x08\x62ridgeId\x18\x03 \x01(\x03\x12\x0e\n\x06scimId\x18\x04 \x01(\x03\x12\x11\n\tlicenseId\x18\x05 \x01(\x03\x12\x15\n\rencryptedData\x18\x06 \x01(\t\x12\x12\n\nduoEnabled\x18\x07 \x01(\x08\x12\x12\n\nrsaEnabled\x18\x08 \x01(\x08\x12 \n\x14ssoServiceProviderId\x18\t \x01(\x03\x42\x02\x18\x01\x12\x1a\n\x12restrictVisibility\x18\n \x01(\x08\x12!\n\x15ssoServiceProviderIds\x18\x0b \x03(\x03\x42\x02\x10\x01\"\x8e\x01\n\x04Role\x12\x0e\n\x06roleId\x18\x01 \x01(\x03\x12\x0e\n\x06nodeId\x18\x02 \x01(\x03\x12\x15\n\rencryptedData\x18\x03 \x01(\t\x12\x0f\n\x07keyType\x18\x04 \x01(\t\x12\x14\n\x0cvisibleBelow\x18\x05 \x01(\x08\x12\x16\n\x0enewUserInherit\x18\x06 \x01(\x08\x12\x10\n\x08roleType\x18\x07 \x01(\t\"\xb8\x02\n\x04User\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x0e\n\x06nodeId\x18\x02 \x01(\x03\x12\x15\n\rencryptedData\x18\x03 \x01(\t\x12\x0f\n\x07keyType\x18\x04 \x01(\t\x12\x10\n\x08username\x18\x05 \x01(\t\x12\x0e\n\x06status\x18\x06 \x01(\t\x12\x0c\n\x04lock\x18\x07 \x01(\x05\x12\x0e\n\x06userId\x18\x08 \x01(\x05\x12\x1e\n\x16\x61\x63\x63ountShareExpiration\x18\t \x01(\x03\x12\x10\n\x08\x66ullName\x18\n \x01(\t\x12\x10\n\x08jobTitle\x18\x0b \x01(\t\x12\x12\n\ntfaEnabled\x18\x0c \x01(\x08\x12\x46\n\x18transferAcceptanceStatus\x18\r \x01(\x0e\x32$.Enterprise.TransferAcceptanceStatus\"7\n\tUserAlias\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x10\n\x08username\x18\x02 \x01(\t\"\xac\x01\n\x18\x43omplianceReportMetaData\x12\x11\n\treportUid\x18\x01 \x01(\x0c\x12\x0e\n\x06nodeId\x18\x02 \x01(\x03\x12\x12\n\nreportName\x18\x03 \x01(\t\x12\x15\n\rdateGenerated\x18\x04 \x01(\x03\x12\x11\n\trunByName\x18\x05 \x01(\t\x12\x16\n\x0enumberOfOwners\x18\x07 \x01(\x05\x12\x17\n\x0fnumberOfRecords\x18\x08 \x01(\x05\"S\n\x0bManagedNode\x12\x0e\n\x06roleId\x18\x01 \x01(\x03\x12\x15\n\rmanagedNodeId\x18\x02 \x01(\x03\x12\x1d\n\x15\x63\x61scadeNodeManagement\x18\x03 \x01(\x08\"T\n\x0fUserManagedNode\x12\x0e\n\x06nodeId\x18\x01 \x01(\x03\x12\x1d\n\x15\x63\x61scadeNodeManagement\x18\x02 \x01(\x08\x12\x12\n\nprivileges\x18\x03 \x03(\t\"w\n\rUserPrivilege\x12\x35\n\x10userManagedNodes\x18\x01 \x03(\x0b\x32\x1b.Enterprise.UserManagedNode\x12\x18\n\x10\x65nterpriseUserId\x18\x02 \x01(\x03\x12\x15\n\rencryptedData\x18\x03 \x01(\t\"4\n\x08RoleUser\x12\x0e\n\x06roleId\x18\x01 \x01(\x03\x12\x18\n\x10\x65nterpriseUserId\x18\x02 \x01(\x03\"M\n\rRolePrivilege\x12\x15\n\rmanagedNodeId\x18\x01 \x01(\x03\x12\x0e\n\x06roleId\x18\x02 \x01(\x03\x12\x15\n\rprivilegeType\x18\x03 \x01(\t\"T\n\x17PrivilegesByManagedNode\x12\x15\n\rmanagedNodeId\x18\x01 \x01(\x03\x12\x0e\n\x06roleId\x18\x02 \x01(\x03\x12\x12\n\nprivileges\x18\x03 \x03(\t\"I\n\x0fRoleEnforcement\x12\x0e\n\x06roleId\x18\x01 \x01(\x03\x12\x17\n\x0f\x65nforcementType\x18\x02 \x01(\t\x12\r\n\x05value\x18\x03 \x01(\t\"\xa9\x01\n\x04Team\x12\x0f\n\x07teamUid\x18\x01 \x01(\x0c\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0e\n\x06nodeId\x18\x03 \x01(\x03\x12\x14\n\x0crestrictEdit\x18\x04 \x01(\x08\x12\x15\n\rrestrictShare\x18\x05 \x01(\x08\x12\x14\n\x0crestrictView\x18\x06 \x01(\x08\x12\x15\n\rencryptedData\x18\x07 \x01(\t\x12\x18\n\x10\x65ncryptedTeamKey\x18\x08 \x01(\t\"G\n\x08TeamUser\x12\x0f\n\x07teamUid\x18\x01 \x01(\x0c\x12\x18\n\x10\x65nterpriseUserId\x18\x02 \x01(\x03\x12\x10\n\x08userType\x18\x03 \x01(\t\"K\n\x1aGetDistributorInfoResponse\x12-\n\x0c\x64istributors\x18\x01 \x03(\x0b\x32\x17.Enterprise.Distributor\"B\n\x0b\x44istributor\x12\x0c\n\x04name\x18\x01 \x01(\t\x12%\n\x08mspInfos\x18\x02 \x03(\x0b\x32\x13.Enterprise.MspInfo\"\x9d\x02\n\x07MspInfo\x12\x14\n\x0c\x65nterpriseId\x18\x01 \x01(\x05\x12\x16\n\x0e\x65nterpriseName\x18\x02 \x01(\t\x12\x19\n\x11\x61llocatedLicenses\x18\x03 \x01(\x05\x12\x19\n\x11\x61llowedMcProducts\x18\x04 \x03(\t\x12\x15\n\rallowedAddOns\x18\x05 \x03(\t\x12\x17\n\x0fmaxFilePlanType\x18\x06 \x01(\t\x12\x34\n\x10managedCompanies\x18\x07 \x03(\x0b\x32\x1a.Enterprise.ManagedCompany\x12\x1e\n\x16\x61llowUnlimitedLicenses\x18\x08 \x01(\x08\x12(\n\x06\x61\x64\x64Ons\x18\t \x03(\x0b\x32\x18.Enterprise.LicenseAddOn\"\x91\x02\n\x0eManagedCompany\x12\x16\n\x0emcEnterpriseId\x18\x01 \x01(\x05\x12\x18\n\x10mcEnterpriseName\x18\x02 \x01(\t\x12\x11\n\tmspNodeId\x18\x03 \x01(\x03\x12\x15\n\rnumberOfSeats\x18\x04 \x01(\x05\x12\x15\n\rnumberOfUsers\x18\x05 \x01(\x05\x12\x11\n\tproductId\x18\x06 \x01(\t\x12\x11\n\tisExpired\x18\x07 \x01(\x08\x12\x0f\n\x07treeKey\x18\x08 \x01(\t\x12\x15\n\rtree_key_role\x18\t \x01(\x03\x12\x14\n\x0c\x66ilePlanType\x18\n \x01(\t\x12(\n\x06\x61\x64\x64Ons\x18\x0b \x03(\x0b\x32\x18.Enterprise.LicenseAddOn\"R\n\x07MSPPool\x12\x11\n\tproductId\x18\x01 \x01(\t\x12\r\n\x05seats\x18\x02 \x01(\x05\x12\x16\n\x0e\x61vailableSeats\x18\x03 \x01(\x05\x12\r\n\x05stash\x18\x04 \x01(\x05\":\n\nMSPContact\x12\x14\n\x0c\x65nterpriseId\x18\x01 \x01(\x05\x12\x16\n\x0e\x65nterpriseName\x18\x02 \x01(\t\"\xec\x01\n\x0cLicenseAddOn\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07\x65nabled\x18\x02 \x01(\x08\x12\x0f\n\x07isTrial\x18\x03 \x01(\x08\x12\x12\n\nexpiration\x18\x04 \x01(\x03\x12\x0f\n\x07\x63reated\x18\x05 \x01(\x03\x12\r\n\x05seats\x18\x06 \x01(\x05\x12\x16\n\x0e\x61\x63tivationTime\x18\x07 \x01(\x03\x12\x19\n\x11includedInProduct\x18\x08 \x01(\x08\x12\x14\n\x0c\x61piCallCount\x18\t \x01(\x05\x12\x17\n\x0ftierDescription\x18\n \x01(\t\x12\x16\n\x0eseatsAllocated\x18\x0b \x01(\x05\"s\n\tMCDefault\x12\x11\n\tmcProduct\x18\x01 \x01(\t\x12\x0e\n\x06\x61\x64\x64Ons\x18\x02 \x03(\t\x12\x14\n\x0c\x66ilePlanType\x18\x03 \x01(\t\x12\x13\n\x0bmaxLicenses\x18\x04 \x01(\x05\x12\x18\n\x10\x66ixedMaxLicenses\x18\x05 \x01(\x08\"\xd2\x01\n\nMSPPermits\x12\x12\n\nrestricted\x18\x01 \x01(\x08\x12\x1a\n\x12maxAllowedLicenses\x18\x02 \x01(\x05\x12\x19\n\x11\x61llowedMcProducts\x18\x03 \x03(\t\x12\x15\n\rallowedAddOns\x18\x04 \x03(\t\x12\x17\n\x0fmaxFilePlanType\x18\x05 \x01(\t\x12\x1e\n\x16\x61llowUnlimitedLicenses\x18\x06 \x01(\x08\x12)\n\nmcDefaults\x18\x07 \x03(\x0b\x32\x15.Enterprise.MCDefault\"\xa0\x04\n\x07License\x12\x0c\n\x04paid\x18\x01 \x01(\x08\x12\x15\n\rnumberOfSeats\x18\x02 \x01(\x05\x12\x12\n\nexpiration\x18\x03 \x01(\x03\x12\x14\n\x0clicenseKeyId\x18\x04 \x01(\x05\x12\x15\n\rproductTypeId\x18\x05 \x01(\x05\x12\x0c\n\x04name\x18\x06 \x01(\t\x12\x1b\n\x13\x65nterpriseLicenseId\x18\x07 \x01(\x03\x12\x16\n\x0eseatsAllocated\x18\x08 \x01(\x05\x12\x14\n\x0cseatsPending\x18\t \x01(\x05\x12\x0c\n\x04tier\x18\n \x01(\x05\x12\x16\n\x0e\x66ilePlanTypeId\x18\x0b \x01(\x05\x12\x10\n\x08maxBytes\x18\x0c \x01(\x03\x12\x19\n\x11storageExpiration\x18\r \x01(\x03\x12\x15\n\rlicenseStatus\x18\x0e \x01(\t\x12$\n\x07mspPool\x18\x0f \x03(\x0b\x32\x13.Enterprise.MSPPool\x12)\n\tmanagedBy\x18\x10 \x01(\x0b\x32\x16.Enterprise.MSPContact\x12(\n\x06\x61\x64\x64Ons\x18\x11 \x03(\x0b\x32\x18.Enterprise.LicenseAddOn\x12\x17\n\x0fnextBillingDate\x18\x12 \x01(\x03\x12\x17\n\x0fhasMSPLegacyLog\x18\x13 \x01(\x08\x12*\n\nmspPermits\x18\x14 \x01(\x0b\x32\x16.Enterprise.MSPPermits\x12\x13\n\x0b\x64istributor\x18\x15 \x01(\x08\"n\n\x06\x42ridge\x12\x10\n\x08\x62ridgeId\x18\x01 \x01(\x03\x12\x0e\n\x06nodeId\x18\x02 \x01(\x03\x12\x18\n\x10wanIpEnforcement\x18\x03 \x01(\t\x12\x18\n\x10lanIpEnforcement\x18\x04 \x01(\t\x12\x0e\n\x06status\x18\x05 \x01(\t\"t\n\x04Scim\x12\x0e\n\x06scimId\x18\x01 \x01(\x03\x12\x0e\n\x06nodeId\x18\x02 \x01(\x03\x12\x0e\n\x06status\x18\x03 \x01(\t\x12\x12\n\nlastSynced\x18\x04 \x01(\x03\x12\x12\n\nrolePrefix\x18\x05 \x01(\t\x12\x14\n\x0cuniqueGroups\x18\x06 \x01(\x08\"L\n\x0e\x45mailProvision\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x0e\n\x06nodeId\x18\x02 \x01(\x03\x12\x0e\n\x06\x64omain\x18\x03 \x01(\t\x12\x0e\n\x06method\x18\x04 \x01(\t\"R\n\nQueuedTeam\x12\x0f\n\x07teamUid\x18\x01 \x01(\x0c\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0e\n\x06nodeId\x18\x03 \x01(\x03\x12\x15\n\rencryptedData\x18\x04 \x01(\t\"0\n\x0eQueuedTeamUser\x12\x0f\n\x07teamUid\x18\x01 \x01(\x0c\x12\r\n\x05users\x18\x02 \x03(\x03\"\xa4\x01\n\x0eTeamsAddResult\x12\x34\n\x11successfulTeamAdd\x18\x01 \x03(\x0b\x32\x19.Enterprise.TeamAddResult\x12\x36\n\x13unsuccessfulTeamAdd\x18\x02 \x03(\x0b\x32\x19.Enterprise.TeamAddResult\x12\x0e\n\x06result\x18\x03 \x01(\t\x12\x14\n\x0c\x65rrorMessage\x18\x04 \x01(\t\"U\n\rTeamAddResult\x12\x1e\n\x04team\x18\x01 \x01(\x0b\x32\x10.Enterprise.Team\x12\x0e\n\x06result\x18\x02 \x01(\t\x12\x14\n\x0c\x65rrorMessage\x18\x03 \x01(\t\"\x91\x01\n\nSsoService\x12\x1c\n\x14ssoServiceProviderId\x18\x01 \x01(\x03\x12\x0e\n\x06nodeId\x18\x02 \x01(\x03\x12\x0c\n\x04name\x18\x03 \x01(\t\x12\x0e\n\x06sp_url\x18\x04 \x01(\t\x12\x16\n\x0einviteNewUsers\x18\x05 \x01(\x08\x12\x0e\n\x06\x61\x63tive\x18\x06 \x01(\x08\x12\x0f\n\x07isCloud\x18\x07 \x01(\x08\"1\n\x10ReportFilterUser\x12\x0e\n\x06userId\x18\x01 \x01(\x05\x12\r\n\x05\x65mail\x18\x02 \x01(\t\"\x97\x02\n\x1d\x44\x65viceRequestForAdminApproval\x12\x10\n\x08\x64\x65viceId\x18\x01 \x01(\x03\x12\x18\n\x10\x65nterpriseUserId\x18\x02 \x01(\x03\x12\x1c\n\x14\x65ncryptedDeviceToken\x18\x03 \x01(\x0c\x12\x17\n\x0f\x64\x65vicePublicKey\x18\x04 \x01(\x0c\x12\x12\n\ndeviceName\x18\x05 \x01(\t\x12\x15\n\rclientVersion\x18\x06 \x01(\t\x12\x12\n\ndeviceType\x18\x07 \x01(\t\x12\x0c\n\x04\x64\x61te\x18\x08 \x01(\x03\x12\x11\n\tipAddress\x18\t \x01(\t\x12\x10\n\x08location\x18\n \x01(\t\x12\r\n\x05\x65mail\x18\x0b \x01(\t\x12\x12\n\naccountUid\x18\x0c \x01(\x0c\"`\n\x0e\x45nterpriseData\x12\x30\n\x06\x65ntity\x18\x01 \x01(\x0e\x32 .Enterprise.EnterpriseDataEntity\x12\x0e\n\x06\x64\x65lete\x18\x02 \x01(\x08\x12\x0c\n\x04\x64\x61ta\x18\x03 \x03(\x0c\"\xd0\x01\n\x16\x45nterpriseDataResponse\x12\x19\n\x11\x63ontinuationToken\x18\x01 \x01(\x0c\x12\x0f\n\x07hasMore\x18\x02 \x01(\x08\x12,\n\x0b\x63\x61\x63heStatus\x18\x03 \x01(\x0e\x32\x17.Enterprise.CacheStatus\x12(\n\x04\x64\x61ta\x18\x04 \x03(\x0b\x32\x1a.Enterprise.EnterpriseData\x12\x32\n\x0bgeneralData\x18\x05 \x01(\x0b\x32\x1d.Enterprise.GeneralDataEntity\"*\n\rBackupRequest\x12\x19\n\x11\x63ontinuationToken\x18\x01 \x01(\x0c\"\x98\x01\n\x0c\x42\x61\x63kupRecord\x12\x0e\n\x06userId\x18\x01 \x01(\x05\x12\x11\n\trecordUid\x18\x02 \x01(\x0c\x12\x0b\n\x03key\x18\x03 \x01(\x0c\x12*\n\x07keyType\x18\x04 \x01(\x0e\x32\x19.Enterprise.BackupKeyType\x12\x0f\n\x07version\x18\x05 \x01(\x05\x12\x0c\n\x04\x64\x61ta\x18\x06 \x01(\x0c\x12\r\n\x05\x65xtra\x18\x07 \x01(\x0c\".\n\tBackupKey\x12\x0e\n\x06userId\x18\x01 \x01(\x05\x12\x11\n\tbackupKey\x18\x02 \x01(\x0c\"\x8d\x02\n\nBackupUser\x12\x0e\n\x06userId\x18\x01 \x01(\x05\x12\x10\n\x08userName\x18\x02 \x01(\t\x12\x0f\n\x07\x64\x61taKey\x18\x03 \x01(\x0c\x12\x36\n\x0b\x64\x61taKeyType\x18\x04 \x01(\x0e\x32!.Enterprise.BackupUserDataKeyType\x12\x12\n\nprivateKey\x18\x05 \x01(\x0c\x12\x0f\n\x07treeKey\x18\x06 \x01(\x0c\x12.\n\x0btreeKeyType\x18\x07 \x01(\x0e\x32\x19.Enterprise.BackupKeyType\x12)\n\nbackupKeys\x18\x08 \x03(\x0b\x32\x15.Enterprise.BackupKey\x12\x14\n\x0cprivateECKey\x18\t \x01(\x0c\"\x9e\x01\n\x0e\x42\x61\x63kupResponse\x12\x1f\n\x17\x65nterpriseEccPrivateKey\x18\x01 \x01(\x0c\x12%\n\x05users\x18\x02 \x03(\x0b\x32\x16.Enterprise.BackupUser\x12)\n\x07records\x18\x03 \x03(\x0b\x32\x18.Enterprise.BackupRecord\x12\x19\n\x11\x63ontinuationToken\x18\x04 \x01(\x0c\"e\n\nBackupFile\x12\x0c\n\x04user\x18\x01 \x01(\t\x12\x11\n\tbackupUid\x18\x02 \x01(\x0c\x12\x10\n\x08\x66ileName\x18\x03 \x01(\t\x12\x0f\n\x07\x63reated\x18\x04 \x01(\x03\x12\x13\n\x0b\x64ownloadUrl\x18\x05 \x01(\t\"8\n\x0f\x42\x61\x63kupsResponse\x12%\n\x05\x66iles\x18\x01 \x03(\x0b\x32\x16.Enterprise.BackupFile\".\n\x1cGetEnterpriseDataKeysRequest\x12\x0e\n\x06roleId\x18\x01 \x03(\x03\"\xff\x01\n\x1dGetEnterpriseDataKeysResponse\x12:\n\x12reEncryptedRoleKey\x18\x01 \x03(\x0b\x32\x1e.Enterprise.ReEncryptedRoleKey\x12$\n\x07roleKey\x18\x02 \x03(\x0b\x32\x13.Enterprise.RoleKey\x12\"\n\x06mspKey\x18\x03 \x01(\x0b\x32\x12.Enterprise.MspKey\x12\x32\n\x0e\x65nterpriseKeys\x18\x04 \x01(\x0b\x32\x1a.Enterprise.EnterpriseKeys\x12$\n\x07treeKey\x18\x05 \x01(\x0b\x32\x13.Enterprise.TreeKey\"^\n\x07RoleKey\x12\x0e\n\x06roleId\x18\x01 \x01(\x03\x12\x14\n\x0c\x65ncryptedKey\x18\x02 \x01(\t\x12-\n\x07keyType\x18\x03 \x01(\x0e\x32\x1c.Enterprise.EncryptedKeyType\"d\n\x06MspKey\x12\x1b\n\x13\x65ncryptedMspTreeKey\x18\x01 \x01(\t\x12=\n\x17\x65ncryptedMspTreeKeyType\x18\x02 \x01(\x0e\x32\x1c.Enterprise.EncryptedKeyType\"|\n\x0e\x45nterpriseKeys\x12\x14\n\x0crsaPublicKey\x18\x01 \x01(\x0c\x12\x1e\n\x16rsaEncryptedPrivateKey\x18\x02 \x01(\x0c\x12\x14\n\x0c\x65\x63\x63PublicKey\x18\x03 \x01(\x0c\x12\x1e\n\x16\x65\x63\x63\x45ncryptedPrivateKey\x18\x04 \x01(\x0c\"H\n\x07TreeKey\x12\x0f\n\x07treeKey\x18\x01 \x01(\t\x12,\n\tkeyTypeId\x18\x02 \x01(\x0e\x32\x19.Enterprise.BackupKeyType\"E\n\x14SharedRecordResponse\x12-\n\x06\x65vents\x18\x01 \x03(\x0b\x32\x1d.Enterprise.SharedRecordEvent\"p\n\x11SharedRecordEvent\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x10\n\x08userName\x18\x02 \x01(\t\x12\x0f\n\x07\x63\x61nEdit\x18\x03 \x01(\x08\x12\x12\n\ncanReshare\x18\x04 \x01(\x08\x12\x11\n\tshareFrom\x18\x05 \x01(\x05\".\n\x1cSetRestrictVisibilityRequest\x12\x0e\n\x06nodeId\x18\x01 \x01(\x03\"\xd0\x01\n\x0eUserAddRequest\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x0e\n\x06nodeId\x18\x02 \x01(\x03\x12\x15\n\rencryptedData\x18\x03 \x01(\x0c\x12-\n\x07keyType\x18\x04 \x01(\x0e\x32\x1c.Enterprise.EncryptedKeyType\x12\x10\n\x08\x66ullName\x18\x05 \x01(\t\x12\x10\n\x08jobTitle\x18\x06 \x01(\t\x12\r\n\x05\x65mail\x18\x07 \x01(\t\x12\x1b\n\x13suppressEmailInvite\x18\x08 \x01(\x08\":\n\x11UserUpdateRequest\x12%\n\x05users\x18\x01 \x03(\x0b\x32\x16.Enterprise.UserUpdate\"\xaf\x01\n\nUserUpdate\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x0e\n\x06nodeId\x18\x02 \x01(\x03\x12\x15\n\rencryptedData\x18\x03 \x01(\x0c\x12-\n\x07keyType\x18\x04 \x01(\x0e\x32\x1c.Enterprise.EncryptedKeyType\x12\x10\n\x08\x66ullName\x18\x05 \x01(\t\x12\x10\n\x08jobTitle\x18\x06 \x01(\t\x12\r\n\x05\x65mail\x18\x07 \x01(\t\"A\n\x12UserUpdateResponse\x12+\n\x05users\x18\x01 \x03(\x0b\x32\x1c.Enterprise.UserUpdateResult\"Z\n\x10UserUpdateResult\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12,\n\x06status\x18\x02 \x01(\x0e\x32\x1c.Enterprise.UserUpdateStatus\"J\n\x1d\x43omplianceRecordOwnersRequest\x12\x0f\n\x07nodeIds\x18\x01 \x03(\x03\x12\x18\n\x10includeNonShared\x18\x02 \x01(\x08\"O\n\x1e\x43omplianceRecordOwnersResponse\x12-\n\x0crecordOwners\x18\x01 \x03(\x0b\x32\x17.Enterprise.RecordOwner\"7\n\x0bRecordOwner\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x0e\n\x06shared\x18\x02 \x01(\x08\"\xa6\x01\n PreliminaryComplianceDataRequest\x12\x19\n\x11\x65nterpriseUserIds\x18\x01 \x03(\x03\x12\x18\n\x10includeNonShared\x18\x02 \x01(\x08\x12\x19\n\x11\x63ontinuationToken\x18\x03 \x01(\x0c\x12\x32\n*includeTotalMatchingRecordsInFirstResponse\x18\x04 \x01(\x08\"\x9f\x01\n!PreliminaryComplianceDataResponse\x12\x30\n\rauditUserData\x18\x01 \x03(\x0b\x32\x19.Enterprise.AuditUserData\x12\x19\n\x11\x63ontinuationToken\x18\x02 \x01(\x0c\x12\x0f\n\x07hasMore\x18\x03 \x01(\x08\x12\x1c\n\x14totalMatchingRecords\x18\x04 \x01(\x05\"K\n\x0f\x41uditUserRecord\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x15\n\rencryptedData\x18\x02 \x01(\x0c\x12\x0e\n\x06shared\x18\x03 \x01(\x08\"\x8d\x01\n\rAuditUserData\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x35\n\x10\x61uditUserRecords\x18\x02 \x03(\x0b\x32\x1b.Enterprise.AuditUserRecord\x12+\n\x06status\x18\x03 \x01(\x0e\x32\x1b.Enterprise.AuditUserStatus\"\x7f\n\x17\x43omplianceReportFilters\x12\x14\n\x0crecordTitles\x18\x01 \x03(\t\x12\x12\n\nrecordUids\x18\x02 \x03(\x0c\x12\x11\n\tjobTitles\x18\x03 \x03(\x03\x12\x0c\n\x04urls\x18\x04 \x03(\t\x12\x19\n\x11\x65nterpriseUserIds\x18\x05 \x03(\x03\"\x7f\n\x17\x43omplianceReportRequest\x12<\n\x13\x63omplianceReportRun\x18\x01 \x01(\x0b\x32\x1f.Enterprise.ComplianceReportRun\x12\x12\n\nreportName\x18\x02 \x01(\t\x12\x12\n\nsaveReport\x18\x03 \x01(\x08\"\x85\x01\n\x13\x43omplianceReportRun\x12N\n\x17reportCriteriaAndFilter\x18\x01 \x01(\x0b\x32-.Enterprise.ComplianceReportCriteriaAndFilter\x12\r\n\x05users\x18\x02 \x03(\x03\x12\x0f\n\x07records\x18\x03 \x03(\x0c\"\xfc\x01\n!ComplianceReportCriteriaAndFilter\x12\x0e\n\x06nodeId\x18\x01 \x01(\x03\x12\x13\n\x0b\x63riteriaUid\x18\x02 \x01(\x0c\x12\x14\n\x0c\x63riteriaName\x18\x03 \x01(\t\x12\x36\n\x08\x63riteria\x18\x04 \x01(\x0b\x32$.Enterprise.ComplianceReportCriteria\x12\x33\n\x07\x66ilters\x18\x05 \x03(\x0b\x32\".Enterprise.ComplianceReportFilter\x12\x14\n\x0clastModified\x18\x06 \x01(\x03\x12\x19\n\x11nodeEncryptedData\x18\x07 \x01(\x0c\"b\n\x18\x43omplianceReportCriteria\x12\x11\n\tjobTitles\x18\x01 \x03(\t\x12\x19\n\x11\x65nterpriseUserIds\x18\x02 \x03(\x03\x12\x18\n\x10includeNonShared\x18\x03 \x01(\x08\"x\n\x16\x43omplianceReportFilter\x12\x14\n\x0crecordTitles\x18\x01 \x03(\t\x12\x12\n\nrecordUids\x18\x02 \x03(\x0c\x12\x11\n\tjobTitles\x18\x03 \x03(\t\x12\x0c\n\x04urls\x18\x04 \x03(\t\x12\x13\n\x0brecordTypes\x18\x05 \x03(\t\"\xa1\x05\n\x18\x43omplianceReportResponse\x12\x15\n\rdateGenerated\x18\x01 \x01(\x03\x12\x15\n\rrunByUserName\x18\x02 \x01(\t\x12\x12\n\nreportName\x18\x03 \x01(\t\x12\x11\n\treportUid\x18\x04 \x01(\x0c\x12<\n\x13\x63omplianceReportRun\x18\x05 \x01(\x0b\x32\x1f.Enterprise.ComplianceReportRun\x12-\n\x0cuserProfiles\x18\x06 \x03(\x0b\x32\x17.Enterprise.UserProfile\x12)\n\nauditTeams\x18\x07 \x03(\x0b\x32\x15.Enterprise.AuditTeam\x12-\n\x0c\x61uditRecords\x18\x08 \x03(\x0b\x32\x17.Enterprise.AuditRecord\x12+\n\x0buserRecords\x18\t \x03(\x0b\x32\x16.Enterprise.UserRecord\x12;\n\x13sharedFolderRecords\x18\n \x03(\x0b\x32\x1e.Enterprise.SharedFolderRecord\x12\x37\n\x11sharedFolderUsers\x18\x0b \x03(\x0b\x32\x1c.Enterprise.SharedFolderUser\x12\x37\n\x11sharedFolderTeams\x18\x0c \x03(\x0b\x32\x1c.Enterprise.SharedFolderTeam\x12\x31\n\x0e\x61uditTeamUsers\x18\r \x03(\x0b\x32\x19.Enterprise.AuditTeamUser\x12)\n\nauditRoles\x18\x0e \x03(\x0b\x32\x15.Enterprise.AuditRole\x12/\n\rlinkedRecords\x18\x0f \x03(\x0b\x32\x18.Enterprise.LinkedRecord\"\x81\x01\n\x0b\x41uditRecord\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x11\n\tauditData\x18\x02 \x01(\x0c\x12\x16\n\x0ehasAttachments\x18\x03 \x01(\x08\x12\x0f\n\x07inTrash\x18\x04 \x01(\x08\x12\x10\n\x08treeLeft\x18\x05 \x01(\x05\x12\x11\n\ttreeRight\x18\x06 \x01(\x05\"\x80\x02\n\tAuditRole\x12\x0e\n\x06roleId\x18\x01 \x01(\x03\x12\x15\n\rencryptedData\x18\x02 \x01(\x0c\x12&\n\x1erestrictShareOutsideEnterprise\x18\x03 \x01(\x08\x12\x18\n\x10restrictShareAll\x18\x04 \x01(\x08\x12\"\n\x1arestrictShareOfAttachments\x18\x05 \x01(\x08\x12)\n!restrictMaskPasswordsWhileEditing\x18\x06 \x01(\x08\x12;\n\x13roleNodeManagements\x18\x07 \x03(\x0b\x32\x1e.Enterprise.RoleNodeManagement\"^\n\x12RoleNodeManagement\x12\x10\n\x08treeLeft\x18\x01 \x01(\x05\x12\x11\n\ttreeRight\x18\x02 \x01(\x05\x12\x0f\n\x07\x63\x61scade\x18\x03 \x01(\x08\x12\x12\n\nprivileges\x18\x04 \x01(\x05\"k\n\x0bUserProfile\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x10\n\x08\x66ullName\x18\x02 \x01(\t\x12\x10\n\x08jobTitle\x18\x03 \x01(\t\x12\r\n\x05\x65mail\x18\x04 \x01(\t\x12\x0f\n\x07roleIds\x18\x05 \x03(\x03\"=\n\x10RecordPermission\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x16\n\x0epermissionBits\x18\x02 \x01(\x05\"_\n\nUserRecord\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x37\n\x11recordPermissions\x18\x02 \x03(\x0b\x32\x1c.Enterprise.RecordPermission\"[\n\tAuditTeam\x12\x0f\n\x07teamUid\x18\x01 \x01(\x0c\x12\x10\n\x08teamName\x18\x02 \x01(\t\x12\x14\n\x0crestrictEdit\x18\x03 \x01(\x08\x12\x15\n\rrestrictShare\x18\x04 \x01(\x08\";\n\rAuditTeamUser\x12\x0f\n\x07teamUid\x18\x01 \x01(\x0c\x12\x19\n\x11\x65nterpriseUserIds\x18\x02 \x03(\x03\"\x9f\x01\n\x12SharedFolderRecord\x12\x17\n\x0fsharedFolderUid\x18\x01 \x01(\x0c\x12\x37\n\x11recordPermissions\x18\x02 \x03(\x0b\x32\x1c.Enterprise.RecordPermission\x12\x37\n\x11shareAdminRecords\x18\x03 \x03(\x0b\x32\x1c.Enterprise.ShareAdminRecord\"M\n\x10ShareAdminRecord\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x1f\n\x17recordPermissionIndexes\x18\x02 \x03(\x05\"F\n\x10SharedFolderUser\x12\x17\n\x0fsharedFolderUid\x18\x01 \x01(\x0c\x12\x19\n\x11\x65nterpriseUserIds\x18\x02 \x03(\x03\"=\n\x10SharedFolderTeam\x12\x17\n\x0fsharedFolderUid\x18\x01 \x01(\x0c\x12\x10\n\x08teamUids\x18\x02 \x03(\x0c\"/\n\x1aGetComplianceReportRequest\x12\x11\n\treportUid\x18\x01 \x01(\x0c\"2\n\x1bGetComplianceReportResponse\x12\x13\n\x0b\x64ownloadUrl\x18\x01 \x01(\t\"6\n\x1f\x43omplianceReportCriteriaRequest\x12\x13\n\x0b\x63riteriaUid\x18\x01 \x01(\x0c\";\n$SaveComplianceReportCriteriaResponse\x12\x13\n\x0b\x63riteriaUid\x18\x01 \x01(\x0c\"4\n\x0cLinkedRecord\x12\x10\n\x08ownerUid\x18\x01 \x01(\x0c\x12\x12\n\nrecordUids\x18\x02 \x03(\x0c\"W\n\x17GetSharingAdminsRequest\x12\x17\n\x0fsharedFolderUid\x18\x01 \x01(\x0c\x12\x11\n\trecordUid\x18\x02 \x01(\x0c\x12\x10\n\x08username\x18\x03 \x01(\t\"\xe0\x01\n\x0eUserProfileExt\x12\r\n\x05\x65mail\x18\x01 \x01(\t\x12\x10\n\x08\x66ullName\x18\x02 \x01(\t\x12\x10\n\x08jobTitle\x18\x03 \x01(\t\x12\x14\n\x0cisMSPMCAdmin\x18\x04 \x01(\x08\x12\x18\n\x10isInSharedFolder\x18\x05 \x01(\x08\x12&\n\x1eisShareAdminForRequestedObject\x18\x06 \x01(\x08\x12(\n isShareAdminForSharedFolderOwner\x18\x07 \x01(\x08\x12\x19\n\x11hasAccessToObject\x18\x08 \x01(\x08\"O\n\x18GetSharingAdminsResponse\x12\x33\n\x0fuserProfileExts\x18\x01 \x03(\x0b\x32\x1a.Enterprise.UserProfileExt\"_\n\x1eTeamsEnterpriseUsersAddRequest\x12=\n\x05teams\x18\x01 \x03(\x0b\x32..Enterprise.TeamsEnterpriseUsersAddTeamRequest\"t\n\"TeamsEnterpriseUsersAddTeamRequest\x12\x0f\n\x07teamUid\x18\x01 \x01(\x0c\x12=\n\x05users\x18\x02 \x03(\x0b\x32..Enterprise.TeamsEnterpriseUsersAddUserRequest\"\xab\x01\n\"TeamsEnterpriseUsersAddUserRequest\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12*\n\x08userType\x18\x02 \x01(\x0e\x32\x18.Enterprise.TeamUserType\x12\x13\n\x07teamKey\x18\x03 \x01(\tB\x02\x18\x01\x12*\n\x0ctypedTeamKey\x18\x04 \x01(\x0b\x32\x14.Enterprise.TypedKey\"F\n\x08TypedKey\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12-\n\x07keyType\x18\x02 \x01(\x0e\x32\x1c.Enterprise.EncryptedKeyType\"s\n\x1fTeamsEnterpriseUsersAddResponse\x12>\n\x05teams\x18\x01 \x03(\x0b\x32/.Enterprise.TeamsEnterpriseUsersAddTeamResponse\x12\x10\n\x08revision\x18\x02 \x01(\x03\"\xc4\x01\n#TeamsEnterpriseUsersAddTeamResponse\x12\x0f\n\x07teamUid\x18\x01 \x01(\x0c\x12>\n\x05users\x18\x02 \x03(\x0b\x32/.Enterprise.TeamsEnterpriseUsersAddUserResponse\x12\x0f\n\x07success\x18\x03 \x01(\x08\x12\x0f\n\x07message\x18\x04 \x01(\t\x12\x12\n\nresultCode\x18\x05 \x01(\t\x12\x16\n\x0e\x61\x64\x64itionalInfo\x18\x06 \x01(\t\"\x9f\x01\n#TeamsEnterpriseUsersAddUserResponse\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x10\n\x08revision\x18\x02 \x01(\x03\x12\x0f\n\x07success\x18\x03 \x01(\x08\x12\x0f\n\x07message\x18\x04 \x01(\t\x12\x12\n\nresultCode\x18\x05 \x01(\t\x12\x16\n\x0e\x61\x64\x64itionalInfo\x18\x06 \x01(\t\"E\n\x18TeamEnterpriseUserRemove\x12\x0f\n\x07teamUid\x18\x01 \x01(\x0c\x12\x18\n\x10\x65nterpriseUserId\x18\x02 \x01(\x03\"j\n TeamEnterpriseUserRemovesRequest\x12\x46\n\x18teamEnterpriseUserRemove\x18\x01 \x03(\x0b\x32$.Enterprise.TeamEnterpriseUserRemove\"{\n!TeamEnterpriseUserRemovesResponse\x12V\n teamEnterpriseUserRemoveResponse\x18\x01 \x03(\x0b\x32,.Enterprise.TeamEnterpriseUserRemoveResponse\"\xb8\x01\n TeamEnterpriseUserRemoveResponse\x12\x46\n\x18teamEnterpriseUserRemove\x18\x01 \x01(\x0b\x32$.Enterprise.TeamEnterpriseUserRemove\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x12\n\nresultCode\x18\x03 \x01(\t\x12\x0f\n\x07message\x18\x04 \x01(\t\x12\x16\n\x0e\x61\x64\x64itionalInfo\x18\x05 \x01(\t\"M\n\x0b\x44omainAlias\x12\x0e\n\x06\x64omain\x18\x01 \x01(\t\x12\r\n\x05\x61lias\x18\x02 \x01(\t\x12\x0e\n\x06status\x18\x03 \x01(\x05\x12\x0f\n\x07message\x18\x04 \x01(\t\"B\n\x12\x44omainAliasRequest\x12,\n\x0b\x64omainAlias\x18\x01 \x03(\x0b\x32\x17.Enterprise.DomainAlias\"C\n\x13\x44omainAliasResponse\x12,\n\x0b\x64omainAlias\x18\x01 \x03(\x0b\x32\x17.Enterprise.DomainAlias\"m\n\x1f\x45nterpriseUsersProvisionRequest\x12\x33\n\x05users\x18\x01 \x03(\x0b\x32$.Enterprise.EnterpriseUsersProvision\x12\x15\n\rclientVersion\x18\x02 \x01(\t\"\xb6\x03\n\x18\x45nterpriseUsersProvision\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x10\n\x08username\x18\x02 \x01(\t\x12\x0e\n\x06nodeId\x18\x03 \x01(\x03\x12\x15\n\rencryptedData\x18\x04 \x01(\t\x12-\n\x07keyType\x18\x05 \x01(\x0e\x32\x1c.Enterprise.EncryptedKeyType\x12\x10\n\x08\x66ullName\x18\x06 \x01(\t\x12\x10\n\x08jobTitle\x18\x07 \x01(\t\x12\x1e\n\x16\x65nterpriseUsersDataKey\x18\x08 \x01(\x0c\x12\x14\n\x0c\x61uthVerifier\x18\t \x01(\x0c\x12\x18\n\x10\x65ncryptionParams\x18\n \x01(\x0c\x12\x14\n\x0crsaPublicKey\x18\x0b \x01(\x0c\x12\x1e\n\x16rsaEncryptedPrivateKey\x18\x0c \x01(\x0c\x12\x14\n\x0c\x65\x63\x63PublicKey\x18\r \x01(\x0c\x12\x1e\n\x16\x65\x63\x63\x45ncryptedPrivateKey\x18\x0e \x01(\x0c\x12\x1c\n\x14\x65ncryptedDeviceToken\x18\x0f \x01(\x0c\x12\x1a\n\x12\x65ncryptedClientKey\x18\x10 \x01(\x0c\"_\n EnterpriseUsersProvisionResponse\x12;\n\x07results\x18\x01 \x03(\x0b\x32*.Enterprise.EnterpriseUsersProvisionResult\"q\n\x1e\x45nterpriseUsersProvisionResult\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x0c\n\x04\x63ode\x18\x02 \x01(\t\x12\x0f\n\x07message\x18\x03 \x01(\t\x12\x16\n\x0e\x61\x64\x64itionalInfo\x18\x04 \x01(\t\"a\n\x19\x45nterpriseUsersAddRequest\x12-\n\x05users\x18\x01 \x03(\x0b\x32\x1e.Enterprise.EnterpriseUsersAdd\x12\x15\n\rclientVersion\x18\x02 \x01(\t\"\x8c\x02\n\x12\x45nterpriseUsersAdd\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x10\n\x08username\x18\x02 \x01(\t\x12\x0e\n\x06nodeId\x18\x03 \x01(\x03\x12\x15\n\rencryptedData\x18\x04 \x01(\t\x12-\n\x07keyType\x18\x05 \x01(\x0e\x32\x1c.Enterprise.EncryptedKeyType\x12\x10\n\x08\x66ullName\x18\x06 \x01(\t\x12\x10\n\x08jobTitle\x18\x07 \x01(\t\x12\x1b\n\x13suppressEmailInvite\x18\x08 \x01(\x08\x12\x15\n\rinviteeLocale\x18\t \x01(\t\x12\x0c\n\x04move\x18\n \x01(\x08\x12\x0e\n\x06roleId\x18\x0b \x01(\x03\"\x9b\x01\n\x1a\x45nterpriseUsersAddResponse\x12\x35\n\x07results\x18\x01 \x03(\x0b\x32$.Enterprise.EnterpriseUsersAddResult\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x0c\n\x04\x63ode\x18\x03 \x01(\t\x12\x0f\n\x07message\x18\x04 \x01(\t\x12\x16\n\x0e\x61\x64\x64itionalInfo\x18\x05 \x01(\t\"\x96\x01\n\x18\x45nterpriseUsersAddResult\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x18\n\x10verificationCode\x18\x03 \x01(\t\x12\x0c\n\x04\x63ode\x18\x04 \x01(\t\x12\x0f\n\x07message\x18\x05 \x01(\t\x12\x16\n\x0e\x61\x64\x64itionalInfo\x18\x06 \x01(\t\"\xb9\x01\n\x17UpdateMSPPermitsRequest\x12\x17\n\x0fmspEnterpriseId\x18\x01 \x01(\x05\x12\x1a\n\x12maxAllowedLicenses\x18\x02 \x01(\x05\x12\x19\n\x11\x61llowedMcProducts\x18\x03 \x03(\t\x12\x15\n\rallowedAddOns\x18\x04 \x03(\t\x12\x17\n\x0fmaxFilePlanType\x18\x05 \x01(\t\x12\x1e\n\x16\x61llowUnlimitedLicenses\x18\x06 \x01(\x08\"9\n\x1c\x44\x65leteEnterpriseUsersRequest\x12\x19\n\x11\x65nterpriseUserIds\x18\x01 \x03(\x03\"o\n\x1a\x44\x65leteEnterpriseUserStatus\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12\x37\n\x06status\x18\x02 \x01(\x0e\x32\'.Enterprise.DeleteEnterpriseUsersResult\"]\n\x1d\x44\x65leteEnterpriseUsersResponse\x12<\n\x0c\x64\x65leteStatus\x18\x01 \x03(\x0b\x32&.Enterprise.DeleteEnterpriseUserStatus\"w\n\x18\x43learSecurityDataRequest\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x03(\x03\x12\x10\n\x08\x61llUsers\x18\x02 \x01(\x08\x12/\n\x04type\x18\x03 \x01(\x0e\x32!.Enterprise.ClearSecurityDataType\"%\n\x13ListDomainsResponse\x12\x0e\n\x06\x64omain\x18\x01 \x03(\t\"d\n\x14ReserveDomainRequest\x12<\n\x13reserveDomainAction\x18\x01 \x01(\x0e\x32\x1f.Enterprise.ReserveDomainAction\x12\x0e\n\x06\x64omain\x18\x02 \x01(\t\"&\n\x15ReserveDomainResponse\x12\r\n\x05token\x18\x01 \x01(\t\".\n\x0bRolesByTeam\x12\x0f\n\x07teamUid\x18\x01 \x01(\x0c\x12\x0e\n\x06roleId\x18\x02 \x03(\x03\"\x8d\x01\n\x10LockUsersRequest\x12\x1d\n\x15lockEnterpriseUserIds\x18\x01 \x03(\x03\x12 \n\x18\x64isableEnterpriseUserIds\x18\x02 \x03(\x03\x12\x1f\n\x17unlockEnterpriseUserIds\x18\x03 \x03(\x03\x12\x17\n\x0f\x64\x65leteIfPending\x18\x04 \x01(\x08\"C\n\x11LockUsersResponse\x12.\n\x08response\x18\x01 \x03(\x0b\x32\x1c.Enterprise.LockUserResponse\"n\n\x10LockUserResponse\x12\x18\n\x10\x65nterpriseUserId\x18\x01 \x01(\x03\x12*\n\x06status\x18\x02 \x01(\x0e\x32\x1a.Enterprise.UserLockStatus\x12\x14\n\x0c\x65rrorMessage\x18\x03 \x01(\t*\x1b\n\x07KeyType\x12\x07\n\x03RSA\x10\x00\x12\x07\n\x03\x45\x43\x43\x10\x01*\x9a\x02\n\x14RoleUserModifyStatus\x12\x0f\n\x0bROLE_EXISTS\x10\x00\x12\x14\n\x10MISSING_TREE_KEY\x10\x01\x12\x14\n\x10MISSING_ROLE_KEY\x10\x02\x12\x1e\n\x1aINVALID_ENTERPRISE_USER_ID\x10\x03\x12\x1b\n\x17PENDING_ENTERPRISE_USER\x10\x04\x12\x13\n\x0fINVALID_NODE_ID\x10\x05\x12!\n\x1dMAY_NOT_REMOVE_SELF_FROM_ROLE\x10\x06\x12\x1c\n\x18MUST_HAVE_ONE_USER_ADMIN\x10\x07\x12\x13\n\x0fINVALID_ROLE_ID\x10\x08\x12\x1d\n\x19PAM_LICENSE_SEAT_EXCEEDED\x10\t*=\n\x0e\x45nterpriseType\x12\x17\n\x13\x45NTERPRISE_STANDARD\x10\x00\x12\x12\n\x0e\x45NTERPRISE_MSP\x10\x01*s\n\x18TransferAcceptanceStatus\x12\r\n\tUNDEFINED\x10\x00\x12\x10\n\x0cNOT_REQUIRED\x10\x01\x12\x10\n\x0cNOT_ACCEPTED\x10\x02\x12\x16\n\x12PARTIALLY_ACCEPTED\x10\x03\x12\x0c\n\x08\x41\x43\x43\x45PTED\x10\x04*\xe1\x03\n\x14\x45nterpriseDataEntity\x12\x0b\n\x07UNKNOWN\x10\x00\x12\t\n\x05NODES\x10\x01\x12\t\n\x05ROLES\x10\x02\x12\t\n\x05USERS\x10\x03\x12\t\n\x05TEAMS\x10\x04\x12\x0e\n\nTEAM_USERS\x10\x05\x12\x0e\n\nROLE_USERS\x10\x06\x12\x13\n\x0fROLE_PRIVILEGES\x10\x07\x12\x15\n\x11ROLE_ENFORCEMENTS\x10\x08\x12\x0e\n\nROLE_TEAMS\x10\t\x12\x0c\n\x08LICENSES\x10\n\x12\x11\n\rMANAGED_NODES\x10\x0b\x12\x15\n\x11MANAGED_COMPANIES\x10\x0c\x12\x0b\n\x07\x42RIDGES\x10\r\x12\t\n\x05SCIMS\x10\x0e\x12\x13\n\x0f\x45MAIL_PROVISION\x10\x0f\x12\x10\n\x0cQUEUED_TEAMS\x10\x10\x12\x15\n\x11QUEUED_TEAM_USERS\x10\x11\x12\x10\n\x0cSSO_SERVICES\x10\x12\x12\x17\n\x13REPORT_FILTER_USERS\x10\x13\x12&\n\"DEVICES_REQUEST_FOR_ADMIN_APPROVAL\x10\x14\x12\x10\n\x0cUSER_ALIASES\x10\x15\x12)\n%COMPLIANCE_REPORT_CRITERIA_AND_FILTER\x10\x16\x12\x16\n\x12\x43OMPLIANCE_REPORTS\x10\x17*\"\n\x0b\x43\x61\x63heStatus\x12\x08\n\x04KEEP\x10\x00\x12\t\n\x05\x43LEAR\x10\x01*\x93\x01\n\rBackupKeyType\x12\n\n\x06NO_KEY\x10\x00\x12\x19\n\x15\x45NCRYPTED_BY_DATA_KEY\x10\x01\x12\x1b\n\x17\x45NCRYPTED_BY_PUBLIC_KEY\x10\x02\x12\x1d\n\x19\x45NCRYPTED_BY_DATA_KEY_GCM\x10\x03\x12\x1f\n\x1b\x45NCRYPTED_BY_PUBLIC_KEY_ECC\x10\x04*:\n\x15\x42\x61\x63kupUserDataKeyType\x12\x07\n\x03OWN\x10\x00\x12\x18\n\x14SHARED_TO_ENTERPRISE\x10\x01*\xa5\x01\n\x10\x45ncryptedKeyType\x12\r\n\tKT_NO_KEY\x10\x00\x12\x1c\n\x18KT_ENCRYPTED_BY_DATA_KEY\x10\x01\x12\x1e\n\x1aKT_ENCRYPTED_BY_PUBLIC_KEY\x10\x02\x12 \n\x1cKT_ENCRYPTED_BY_DATA_KEY_GCM\x10\x03\x12\"\n\x1eKT_ENCRYPTED_BY_PUBLIC_KEY_ECC\x10\x04*\xb7\x02\n\x12\x45nterpriseFlagType\x12\x0b\n\x07INVALID\x10\x00\x12\x1a\n\x16\x41LLOW_PERSONAL_LICENSE\x10\x01\x12\x18\n\x14SPECIAL_PROVISIONING\x10\x02\x12\x10\n\x0cRECORD_TYPES\x10\x03\x12\x13\n\x0fSECRETS_MANAGER\x10\x04\x12\x15\n\x11\x45NTERPRISE_LOCKED\x10\x05\x12\x15\n\x11\x46ORBID_KEY_TYPE_2\x10\x06\x12\x15\n\x11\x43ONSOLE_ONBOARDED\x10\x07\x12\x1b\n\x17\x46ORBID_ACCOUNT_TRANSFER\x10\x08\x12\x15\n\x11NPS_POPUP_OPT_OUT\x10\t\x12\x15\n\x11SHOW_USER_ONBOARD\x10\n\x12\x15\n\x11\x46ORBID_KEY_TYPE_1\x10\x0b\x12\x10\n\x0cKEEPER_DRIVE\x10\x0c*E\n\x10UserUpdateStatus\x12\x12\n\x0eUSER_UPDATE_OK\x10\x00\x12\x1d\n\x19USER_UPDATE_ACCESS_DENIED\x10\x01*I\n\x0f\x41uditUserStatus\x12\x06\n\x02OK\x10\x00\x12\x11\n\rACCESS_DENIED\x10\x01\x12\x1b\n\x17NO_LONGER_IN_ENTERPRISE\x10\x02*3\n\x0cTeamUserType\x12\x08\n\x04USER\x10\x00\x12\t\n\x05\x41\x44MIN\x10\x01\x12\x0e\n\nADMIN_ONLY\x10\x02*x\n\rAppClientType\x12\x0c\n\x08NOT_USED\x10\x00\x12\x0b\n\x07GENERAL\x10\x01\x12%\n!DISCOVERY_AND_ROTATION_CONTROLLER\x10\x02\x12\x12\n\x0eKCM_CONTROLLER\x10\x03\x12\x11\n\rSELF_DESTRUCT\x10\x04*\x8f\x01\n\x1b\x44\x65leteEnterpriseUsersResult\x12\x0b\n\x07SUCCESS\x10\x00\x12\x1a\n\x16NOT_AN_ENTERPRISE_USER\x10\x01\x12\x16\n\x12\x43\x41NNOT_DELETE_SELF\x10\x02\x12$\n BRIDGE_CANNOT_DELETE_ACTIVE_USER\x10\x03\x12\t\n\x05\x45RROR\x10\x04*\x87\x01\n\x15\x43learSecurityDataType\x12\x1e\n\x1aRECALCULATE_SUMMARY_REPORT\x10\x00\x12\'\n#FORCE_CLIENT_CHECK_FOR_MISSING_DATA\x10\x01\x12%\n!FORCE_CLIENT_RESEND_SECURITY_DATA\x10\x02*J\n\x13ReserveDomainAction\x12\x10\n\x0c\x44OMAIN_TOKEN\x10\x00\x12\x0e\n\nDOMAIN_ADD\x10\x01\x12\x11\n\rDOMAIN_DELETE\x10\x02*s\n\x0eUserLockStatus\x12\x17\n\x13UNKNOWN_LOCK_STATUS\x10\x00\x12\n\n\x06LOCKED\x10\x01\x12\x0c\n\x08\x44ISABLED\x10\x02\x12\x0c\n\x08UNLOCKED\x10\x03\x12\x0b\n\x07\x44\x45LETED\x10\x04\x12\x13\n\x0f\x43\x41NT_BE_PENDING\x10\x05\x42&\n\x18\x63om.keepersecurity.protoB\nEnterpriseb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -27,40 +27,42 @@ _globals['_NODE'].fields_by_name['ssoServiceProviderIds']._serialized_options = b'\020\001' _globals['_TEAMSENTERPRISEUSERSADDUSERREQUEST'].fields_by_name['teamKey']._loaded_options = None _globals['_TEAMSENTERPRISEUSERSADDUSERREQUEST'].fields_by_name['teamKey']._serialized_options = b'\030\001' - _globals['_KEYTYPE']._serialized_start=20100 - _globals['_KEYTYPE']._serialized_end=20127 - _globals['_ROLEUSERMODIFYSTATUS']._serialized_start=20130 - _globals['_ROLEUSERMODIFYSTATUS']._serialized_end=20412 - _globals['_ENTERPRISETYPE']._serialized_start=20414 - _globals['_ENTERPRISETYPE']._serialized_end=20475 - _globals['_TRANSFERACCEPTANCESTATUS']._serialized_start=20477 - _globals['_TRANSFERACCEPTANCESTATUS']._serialized_end=20592 - _globals['_ENTERPRISEDATAENTITY']._serialized_start=20595 - _globals['_ENTERPRISEDATAENTITY']._serialized_end=21076 - _globals['_CACHESTATUS']._serialized_start=21078 - _globals['_CACHESTATUS']._serialized_end=21112 - _globals['_BACKUPKEYTYPE']._serialized_start=21115 - _globals['_BACKUPKEYTYPE']._serialized_end=21262 - _globals['_BACKUPUSERDATAKEYTYPE']._serialized_start=21264 - _globals['_BACKUPUSERDATAKEYTYPE']._serialized_end=21322 - _globals['_ENCRYPTEDKEYTYPE']._serialized_start=21325 - _globals['_ENCRYPTEDKEYTYPE']._serialized_end=21490 - _globals['_ENTERPRISEFLAGTYPE']._serialized_start=21493 - _globals['_ENTERPRISEFLAGTYPE']._serialized_end=21763 - _globals['_USERUPDATESTATUS']._serialized_start=21765 - _globals['_USERUPDATESTATUS']._serialized_end=21834 - _globals['_AUDITUSERSTATUS']._serialized_start=21836 - _globals['_AUDITUSERSTATUS']._serialized_end=21909 - _globals['_TEAMUSERTYPE']._serialized_start=21911 - _globals['_TEAMUSERTYPE']._serialized_end=21962 - _globals['_APPCLIENTTYPE']._serialized_start=21964 - _globals['_APPCLIENTTYPE']._serialized_end=22084 - _globals['_DELETEENTERPRISEUSERSRESULT']._serialized_start=22087 - _globals['_DELETEENTERPRISEUSERSRESULT']._serialized_end=22230 - _globals['_CLEARSECURITYDATATYPE']._serialized_start=22233 - _globals['_CLEARSECURITYDATATYPE']._serialized_end=22368 - _globals['_RESERVEDOMAINACTION']._serialized_start=22370 - _globals['_RESERVEDOMAINACTION']._serialized_end=22444 + _globals['_KEYTYPE']._serialized_start=20583 + _globals['_KEYTYPE']._serialized_end=20610 + _globals['_ROLEUSERMODIFYSTATUS']._serialized_start=20613 + _globals['_ROLEUSERMODIFYSTATUS']._serialized_end=20895 + _globals['_ENTERPRISETYPE']._serialized_start=20897 + _globals['_ENTERPRISETYPE']._serialized_end=20958 + _globals['_TRANSFERACCEPTANCESTATUS']._serialized_start=20960 + _globals['_TRANSFERACCEPTANCESTATUS']._serialized_end=21075 + _globals['_ENTERPRISEDATAENTITY']._serialized_start=21078 + _globals['_ENTERPRISEDATAENTITY']._serialized_end=21559 + _globals['_CACHESTATUS']._serialized_start=21561 + _globals['_CACHESTATUS']._serialized_end=21595 + _globals['_BACKUPKEYTYPE']._serialized_start=21598 + _globals['_BACKUPKEYTYPE']._serialized_end=21745 + _globals['_BACKUPUSERDATAKEYTYPE']._serialized_start=21747 + _globals['_BACKUPUSERDATAKEYTYPE']._serialized_end=21805 + _globals['_ENCRYPTEDKEYTYPE']._serialized_start=21808 + _globals['_ENCRYPTEDKEYTYPE']._serialized_end=21973 + _globals['_ENTERPRISEFLAGTYPE']._serialized_start=21976 + _globals['_ENTERPRISEFLAGTYPE']._serialized_end=22287 + _globals['_USERUPDATESTATUS']._serialized_start=22289 + _globals['_USERUPDATESTATUS']._serialized_end=22358 + _globals['_AUDITUSERSTATUS']._serialized_start=22360 + _globals['_AUDITUSERSTATUS']._serialized_end=22433 + _globals['_TEAMUSERTYPE']._serialized_start=22435 + _globals['_TEAMUSERTYPE']._serialized_end=22486 + _globals['_APPCLIENTTYPE']._serialized_start=22488 + _globals['_APPCLIENTTYPE']._serialized_end=22608 + _globals['_DELETEENTERPRISEUSERSRESULT']._serialized_start=22611 + _globals['_DELETEENTERPRISEUSERSRESULT']._serialized_end=22754 + _globals['_CLEARSECURITYDATATYPE']._serialized_start=22757 + _globals['_CLEARSECURITYDATATYPE']._serialized_end=22892 + _globals['_RESERVEDOMAINACTION']._serialized_start=22894 + _globals['_RESERVEDOMAINACTION']._serialized_end=22968 + _globals['_USERLOCKSTATUS']._serialized_start=22970 + _globals['_USERLOCKSTATUS']._serialized_end=23085 _globals['_ENTERPRISEKEYPAIRREQUEST']._serialized_start=33 _globals['_ENTERPRISEKEYPAIRREQUEST']._serialized_end=165 _globals['_GETTEAMMEMBERREQUEST']._serialized_start=167 @@ -118,271 +120,281 @@ _globals['_LOGINTOMCREQUEST']._serialized_start=2873 _globals['_LOGINTOMCREQUEST']._serialized_end=2942 _globals['_LOGINTOMCRESPONSE']._serialized_start=2944 - _globals['_LOGINTOMCRESPONSE']._serialized_end=3020 - _globals['_DOMAINPASSWORDRULESRESPONSE']._serialized_start=3022 - _globals['_DOMAINPASSWORDRULESRESPONSE']._serialized_end=3125 - _globals['_APPROVEUSERDEVICEREQUEST']._serialized_start=3128 - _globals['_APPROVEUSERDEVICEREQUEST']._serialized_end=3264 - _globals['_APPROVEUSERDEVICERESPONSE']._serialized_start=3266 - _globals['_APPROVEUSERDEVICERESPONSE']._serialized_end=3382 - _globals['_APPROVEUSERDEVICESREQUEST']._serialized_start=3384 - _globals['_APPROVEUSERDEVICESREQUEST']._serialized_end=3473 - _globals['_APPROVEUSERDEVICESRESPONSE']._serialized_start=3475 - _globals['_APPROVEUSERDEVICESRESPONSE']._serialized_end=3567 - _globals['_ENTERPRISEUSERDATAKEY']._serialized_start=3570 - _globals['_ENTERPRISEUSERDATAKEY']._serialized_end=3705 - _globals['_ENTERPRISEUSERDATAKEYS']._serialized_start=3707 - _globals['_ENTERPRISEUSERDATAKEYS']._serialized_end=3780 - _globals['_ENTERPRISEUSERDATAKEYLIGHT']._serialized_start=3782 - _globals['_ENTERPRISEUSERDATAKEYLIGHT']._serialized_end=3885 - _globals['_ENTERPRISEUSERDATAKEYSBYNODE']._serialized_start=3887 - _globals['_ENTERPRISEUSERDATAKEYSBYNODE']._serialized_end=3987 - _globals['_ENTERPRISEUSERDATAKEYSBYNODERESPONSE']._serialized_start=3989 - _globals['_ENTERPRISEUSERDATAKEYSBYNODERESPONSE']._serialized_end=4083 - _globals['_ENTERPRISEDATAREQUEST']._serialized_start=4085 - _globals['_ENTERPRISEDATAREQUEST']._serialized_end=4135 - _globals['_SPECIALPROVISIONING']._serialized_start=4137 - _globals['_SPECIALPROVISIONING']._serialized_end=4185 - _globals['_GENERALDATAENTITY']._serialized_start=4188 - _globals['_GENERALDATAENTITY']._serialized_end=4448 - _globals['_NODE']._serialized_start=4451 - _globals['_NODE']._serialized_end=4704 - _globals['_ROLE']._serialized_start=4707 - _globals['_ROLE']._serialized_end=4849 - _globals['_USER']._serialized_start=4852 - _globals['_USER']._serialized_end=5164 - _globals['_USERALIAS']._serialized_start=5166 - _globals['_USERALIAS']._serialized_end=5221 - _globals['_COMPLIANCEREPORTMETADATA']._serialized_start=5224 - _globals['_COMPLIANCEREPORTMETADATA']._serialized_end=5396 - _globals['_MANAGEDNODE']._serialized_start=5398 - _globals['_MANAGEDNODE']._serialized_end=5481 - _globals['_USERMANAGEDNODE']._serialized_start=5483 - _globals['_USERMANAGEDNODE']._serialized_end=5567 - _globals['_USERPRIVILEGE']._serialized_start=5569 - _globals['_USERPRIVILEGE']._serialized_end=5688 - _globals['_ROLEUSER']._serialized_start=5690 - _globals['_ROLEUSER']._serialized_end=5742 - _globals['_ROLEPRIVILEGE']._serialized_start=5744 - _globals['_ROLEPRIVILEGE']._serialized_end=5821 - _globals['_ROLEENFORCEMENT']._serialized_start=5823 - _globals['_ROLEENFORCEMENT']._serialized_end=5896 - _globals['_TEAM']._serialized_start=5899 - _globals['_TEAM']._serialized_end=6068 - _globals['_TEAMUSER']._serialized_start=6070 - _globals['_TEAMUSER']._serialized_end=6141 - _globals['_GETDISTRIBUTORINFORESPONSE']._serialized_start=6143 - _globals['_GETDISTRIBUTORINFORESPONSE']._serialized_end=6218 - _globals['_DISTRIBUTOR']._serialized_start=6220 - _globals['_DISTRIBUTOR']._serialized_end=6286 - _globals['_MSPINFO']._serialized_start=6289 - _globals['_MSPINFO']._serialized_end=6574 - _globals['_MANAGEDCOMPANY']._serialized_start=6577 - _globals['_MANAGEDCOMPANY']._serialized_end=6850 - _globals['_MSPPOOL']._serialized_start=6852 - _globals['_MSPPOOL']._serialized_end=6934 - _globals['_MSPCONTACT']._serialized_start=6936 - _globals['_MSPCONTACT']._serialized_end=6994 - _globals['_LICENSEADDON']._serialized_start=6997 - _globals['_LICENSEADDON']._serialized_end=7233 - _globals['_MCDEFAULT']._serialized_start=7235 - _globals['_MCDEFAULT']._serialized_end=7350 - _globals['_MSPPERMITS']._serialized_start=7353 - _globals['_MSPPERMITS']._serialized_end=7563 - _globals['_LICENSE']._serialized_start=7566 - _globals['_LICENSE']._serialized_end=8110 - _globals['_BRIDGE']._serialized_start=8112 - _globals['_BRIDGE']._serialized_end=8222 - _globals['_SCIM']._serialized_start=8224 - _globals['_SCIM']._serialized_end=8340 - _globals['_EMAILPROVISION']._serialized_start=8342 - _globals['_EMAILPROVISION']._serialized_end=8418 - _globals['_QUEUEDTEAM']._serialized_start=8420 - _globals['_QUEUEDTEAM']._serialized_end=8502 - _globals['_QUEUEDTEAMUSER']._serialized_start=8504 - _globals['_QUEUEDTEAMUSER']._serialized_end=8552 - _globals['_TEAMSADDRESULT']._serialized_start=8555 - _globals['_TEAMSADDRESULT']._serialized_end=8719 - _globals['_TEAMADDRESULT']._serialized_start=8721 - _globals['_TEAMADDRESULT']._serialized_end=8806 - _globals['_SSOSERVICE']._serialized_start=8809 - _globals['_SSOSERVICE']._serialized_end=8954 - _globals['_REPORTFILTERUSER']._serialized_start=8956 - _globals['_REPORTFILTERUSER']._serialized_end=9005 - _globals['_DEVICEREQUESTFORADMINAPPROVAL']._serialized_start=9008 - _globals['_DEVICEREQUESTFORADMINAPPROVAL']._serialized_end=9287 - _globals['_ENTERPRISEDATA']._serialized_start=9289 - _globals['_ENTERPRISEDATA']._serialized_end=9385 - _globals['_ENTERPRISEDATARESPONSE']._serialized_start=9388 - _globals['_ENTERPRISEDATARESPONSE']._serialized_end=9596 - _globals['_BACKUPREQUEST']._serialized_start=9598 - _globals['_BACKUPREQUEST']._serialized_end=9640 - _globals['_BACKUPRECORD']._serialized_start=9643 - _globals['_BACKUPRECORD']._serialized_end=9795 - _globals['_BACKUPKEY']._serialized_start=9797 - _globals['_BACKUPKEY']._serialized_end=9843 - _globals['_BACKUPUSER']._serialized_start=9846 - _globals['_BACKUPUSER']._serialized_end=10115 - _globals['_BACKUPRESPONSE']._serialized_start=10118 - _globals['_BACKUPRESPONSE']._serialized_end=10276 - _globals['_BACKUPFILE']._serialized_start=10278 - _globals['_BACKUPFILE']._serialized_end=10379 - _globals['_BACKUPSRESPONSE']._serialized_start=10381 - _globals['_BACKUPSRESPONSE']._serialized_end=10437 - _globals['_GETENTERPRISEDATAKEYSREQUEST']._serialized_start=10439 - _globals['_GETENTERPRISEDATAKEYSREQUEST']._serialized_end=10485 - _globals['_GETENTERPRISEDATAKEYSRESPONSE']._serialized_start=10488 - _globals['_GETENTERPRISEDATAKEYSRESPONSE']._serialized_end=10743 - _globals['_ROLEKEY']._serialized_start=10745 - _globals['_ROLEKEY']._serialized_end=10839 - _globals['_MSPKEY']._serialized_start=10841 - _globals['_MSPKEY']._serialized_end=10941 - _globals['_ENTERPRISEKEYS']._serialized_start=10943 - _globals['_ENTERPRISEKEYS']._serialized_end=11067 - _globals['_TREEKEY']._serialized_start=11069 - _globals['_TREEKEY']._serialized_end=11141 - _globals['_SHAREDRECORDRESPONSE']._serialized_start=11143 - _globals['_SHAREDRECORDRESPONSE']._serialized_end=11212 - _globals['_SHAREDRECORDEVENT']._serialized_start=11214 - _globals['_SHAREDRECORDEVENT']._serialized_end=11326 - _globals['_SETRESTRICTVISIBILITYREQUEST']._serialized_start=11328 - _globals['_SETRESTRICTVISIBILITYREQUEST']._serialized_end=11374 - _globals['_USERADDREQUEST']._serialized_start=11377 - _globals['_USERADDREQUEST']._serialized_end=11585 - _globals['_USERUPDATEREQUEST']._serialized_start=11587 - _globals['_USERUPDATEREQUEST']._serialized_end=11645 - _globals['_USERUPDATE']._serialized_start=11648 - _globals['_USERUPDATE']._serialized_end=11823 - _globals['_USERUPDATERESPONSE']._serialized_start=11825 - _globals['_USERUPDATERESPONSE']._serialized_end=11890 - _globals['_USERUPDATERESULT']._serialized_start=11892 - _globals['_USERUPDATERESULT']._serialized_end=11982 - _globals['_COMPLIANCERECORDOWNERSREQUEST']._serialized_start=11984 - _globals['_COMPLIANCERECORDOWNERSREQUEST']._serialized_end=12058 - _globals['_COMPLIANCERECORDOWNERSRESPONSE']._serialized_start=12060 - _globals['_COMPLIANCERECORDOWNERSRESPONSE']._serialized_end=12139 - _globals['_RECORDOWNER']._serialized_start=12141 - _globals['_RECORDOWNER']._serialized_end=12196 - _globals['_PRELIMINARYCOMPLIANCEDATAREQUEST']._serialized_start=12199 - _globals['_PRELIMINARYCOMPLIANCEDATAREQUEST']._serialized_end=12365 - _globals['_PRELIMINARYCOMPLIANCEDATARESPONSE']._serialized_start=12368 - _globals['_PRELIMINARYCOMPLIANCEDATARESPONSE']._serialized_end=12527 - _globals['_AUDITUSERRECORD']._serialized_start=12529 - _globals['_AUDITUSERRECORD']._serialized_end=12604 - _globals['_AUDITUSERDATA']._serialized_start=12607 - _globals['_AUDITUSERDATA']._serialized_end=12748 - _globals['_COMPLIANCEREPORTFILTERS']._serialized_start=12750 - _globals['_COMPLIANCEREPORTFILTERS']._serialized_end=12877 - _globals['_COMPLIANCEREPORTREQUEST']._serialized_start=12879 - _globals['_COMPLIANCEREPORTREQUEST']._serialized_end=13006 - _globals['_COMPLIANCEREPORTRUN']._serialized_start=13009 - _globals['_COMPLIANCEREPORTRUN']._serialized_end=13142 - _globals['_COMPLIANCEREPORTCRITERIAANDFILTER']._serialized_start=13145 - _globals['_COMPLIANCEREPORTCRITERIAANDFILTER']._serialized_end=13397 - _globals['_COMPLIANCEREPORTCRITERIA']._serialized_start=13399 - _globals['_COMPLIANCEREPORTCRITERIA']._serialized_end=13497 - _globals['_COMPLIANCEREPORTFILTER']._serialized_start=13499 - _globals['_COMPLIANCEREPORTFILTER']._serialized_end=13619 - _globals['_COMPLIANCEREPORTRESPONSE']._serialized_start=13622 - _globals['_COMPLIANCEREPORTRESPONSE']._serialized_end=14295 - _globals['_AUDITRECORD']._serialized_start=14298 - _globals['_AUDITRECORD']._serialized_end=14427 - _globals['_AUDITROLE']._serialized_start=14430 - _globals['_AUDITROLE']._serialized_end=14686 - _globals['_ROLENODEMANAGEMENT']._serialized_start=14688 - _globals['_ROLENODEMANAGEMENT']._serialized_end=14782 - _globals['_USERPROFILE']._serialized_start=14784 - _globals['_USERPROFILE']._serialized_end=14891 - _globals['_RECORDPERMISSION']._serialized_start=14893 - _globals['_RECORDPERMISSION']._serialized_end=14954 - _globals['_USERRECORD']._serialized_start=14956 - _globals['_USERRECORD']._serialized_end=15051 - _globals['_AUDITTEAM']._serialized_start=15053 - _globals['_AUDITTEAM']._serialized_end=15144 - _globals['_AUDITTEAMUSER']._serialized_start=15146 - _globals['_AUDITTEAMUSER']._serialized_end=15205 - _globals['_SHAREDFOLDERRECORD']._serialized_start=15208 - _globals['_SHAREDFOLDERRECORD']._serialized_end=15367 - _globals['_SHAREADMINRECORD']._serialized_start=15369 - _globals['_SHAREADMINRECORD']._serialized_end=15446 - _globals['_SHAREDFOLDERUSER']._serialized_start=15448 - _globals['_SHAREDFOLDERUSER']._serialized_end=15518 - _globals['_SHAREDFOLDERTEAM']._serialized_start=15520 - _globals['_SHAREDFOLDERTEAM']._serialized_end=15581 - _globals['_GETCOMPLIANCEREPORTREQUEST']._serialized_start=15583 - _globals['_GETCOMPLIANCEREPORTREQUEST']._serialized_end=15630 - _globals['_GETCOMPLIANCEREPORTRESPONSE']._serialized_start=15632 - _globals['_GETCOMPLIANCEREPORTRESPONSE']._serialized_end=15682 - _globals['_COMPLIANCEREPORTCRITERIAREQUEST']._serialized_start=15684 - _globals['_COMPLIANCEREPORTCRITERIAREQUEST']._serialized_end=15738 - _globals['_SAVECOMPLIANCEREPORTCRITERIARESPONSE']._serialized_start=15740 - _globals['_SAVECOMPLIANCEREPORTCRITERIARESPONSE']._serialized_end=15799 - _globals['_LINKEDRECORD']._serialized_start=15801 - _globals['_LINKEDRECORD']._serialized_end=15853 - _globals['_GETSHARINGADMINSREQUEST']._serialized_start=15855 - _globals['_GETSHARINGADMINSREQUEST']._serialized_end=15942 - _globals['_USERPROFILEEXT']._serialized_start=15945 - _globals['_USERPROFILEEXT']._serialized_end=16169 - _globals['_GETSHARINGADMINSRESPONSE']._serialized_start=16171 - _globals['_GETSHARINGADMINSRESPONSE']._serialized_end=16250 - _globals['_TEAMSENTERPRISEUSERSADDREQUEST']._serialized_start=16252 - _globals['_TEAMSENTERPRISEUSERSADDREQUEST']._serialized_end=16347 - _globals['_TEAMSENTERPRISEUSERSADDTEAMREQUEST']._serialized_start=16349 - _globals['_TEAMSENTERPRISEUSERSADDTEAMREQUEST']._serialized_end=16465 - _globals['_TEAMSENTERPRISEUSERSADDUSERREQUEST']._serialized_start=16468 - _globals['_TEAMSENTERPRISEUSERSADDUSERREQUEST']._serialized_end=16639 - _globals['_TYPEDKEY']._serialized_start=16641 - _globals['_TYPEDKEY']._serialized_end=16711 - _globals['_TEAMSENTERPRISEUSERSADDRESPONSE']._serialized_start=16713 - _globals['_TEAMSENTERPRISEUSERSADDRESPONSE']._serialized_end=16828 - _globals['_TEAMSENTERPRISEUSERSADDTEAMRESPONSE']._serialized_start=16831 - _globals['_TEAMSENTERPRISEUSERSADDTEAMRESPONSE']._serialized_end=17027 - _globals['_TEAMSENTERPRISEUSERSADDUSERRESPONSE']._serialized_start=17030 - _globals['_TEAMSENTERPRISEUSERSADDUSERRESPONSE']._serialized_end=17189 - _globals['_TEAMENTERPRISEUSERREMOVE']._serialized_start=17191 - _globals['_TEAMENTERPRISEUSERREMOVE']._serialized_end=17260 - _globals['_TEAMENTERPRISEUSERREMOVESREQUEST']._serialized_start=17262 - _globals['_TEAMENTERPRISEUSERREMOVESREQUEST']._serialized_end=17368 - _globals['_TEAMENTERPRISEUSERREMOVESRESPONSE']._serialized_start=17370 - _globals['_TEAMENTERPRISEUSERREMOVESRESPONSE']._serialized_end=17493 - _globals['_TEAMENTERPRISEUSERREMOVERESPONSE']._serialized_start=17496 - _globals['_TEAMENTERPRISEUSERREMOVERESPONSE']._serialized_end=17680 - _globals['_DOMAINALIAS']._serialized_start=17682 - _globals['_DOMAINALIAS']._serialized_end=17759 - _globals['_DOMAINALIASREQUEST']._serialized_start=17761 - _globals['_DOMAINALIASREQUEST']._serialized_end=17827 - _globals['_DOMAINALIASRESPONSE']._serialized_start=17829 - _globals['_DOMAINALIASRESPONSE']._serialized_end=17896 - _globals['_ENTERPRISEUSERSPROVISIONREQUEST']._serialized_start=17898 - _globals['_ENTERPRISEUSERSPROVISIONREQUEST']._serialized_end=18007 - _globals['_ENTERPRISEUSERSPROVISION']._serialized_start=18010 - _globals['_ENTERPRISEUSERSPROVISION']._serialized_end=18448 - _globals['_ENTERPRISEUSERSPROVISIONRESPONSE']._serialized_start=18450 - _globals['_ENTERPRISEUSERSPROVISIONRESPONSE']._serialized_end=18545 - _globals['_ENTERPRISEUSERSPROVISIONRESULT']._serialized_start=18547 - _globals['_ENTERPRISEUSERSPROVISIONRESULT']._serialized_end=18660 - _globals['_ENTERPRISEUSERSADDREQUEST']._serialized_start=18662 - _globals['_ENTERPRISEUSERSADDREQUEST']._serialized_end=18759 - _globals['_ENTERPRISEUSERSADD']._serialized_start=18762 - _globals['_ENTERPRISEUSERSADD']._serialized_end=19030 - _globals['_ENTERPRISEUSERSADDRESPONSE']._serialized_start=19033 - _globals['_ENTERPRISEUSERSADDRESPONSE']._serialized_end=19188 - _globals['_ENTERPRISEUSERSADDRESULT']._serialized_start=19191 - _globals['_ENTERPRISEUSERSADDRESULT']._serialized_end=19341 - _globals['_UPDATEMSPPERMITSREQUEST']._serialized_start=19344 - _globals['_UPDATEMSPPERMITSREQUEST']._serialized_end=19529 - _globals['_DELETEENTERPRISEUSERSREQUEST']._serialized_start=19531 - _globals['_DELETEENTERPRISEUSERSREQUEST']._serialized_end=19588 - _globals['_DELETEENTERPRISEUSERSTATUS']._serialized_start=19590 - _globals['_DELETEENTERPRISEUSERSTATUS']._serialized_end=19701 - _globals['_DELETEENTERPRISEUSERSRESPONSE']._serialized_start=19703 - _globals['_DELETEENTERPRISEUSERSRESPONSE']._serialized_end=19796 - _globals['_CLEARSECURITYDATAREQUEST']._serialized_start=19798 - _globals['_CLEARSECURITYDATAREQUEST']._serialized_end=19917 - _globals['_LISTDOMAINSRESPONSE']._serialized_start=19919 - _globals['_LISTDOMAINSRESPONSE']._serialized_end=19956 - _globals['_RESERVEDOMAINREQUEST']._serialized_start=19958 - _globals['_RESERVEDOMAINREQUEST']._serialized_end=20058 - _globals['_RESERVEDOMAINRESPONSE']._serialized_start=20060 - _globals['_RESERVEDOMAINRESPONSE']._serialized_end=20098 + _globals['_LOGINTOMCRESPONSE']._serialized_end=3044 + _globals['_DOMAINPASSWORDRULESRESPONSE']._serialized_start=3046 + _globals['_DOMAINPASSWORDRULESRESPONSE']._serialized_end=3149 + _globals['_APPROVEUSERDEVICEREQUEST']._serialized_start=3152 + _globals['_APPROVEUSERDEVICEREQUEST']._serialized_end=3288 + _globals['_APPROVEUSERDEVICERESPONSE']._serialized_start=3290 + _globals['_APPROVEUSERDEVICERESPONSE']._serialized_end=3406 + _globals['_APPROVEUSERDEVICESREQUEST']._serialized_start=3408 + _globals['_APPROVEUSERDEVICESREQUEST']._serialized_end=3497 + _globals['_APPROVEUSERDEVICESRESPONSE']._serialized_start=3499 + _globals['_APPROVEUSERDEVICESRESPONSE']._serialized_end=3591 + _globals['_ENTERPRISEUSERDATAKEY']._serialized_start=3594 + _globals['_ENTERPRISEUSERDATAKEY']._serialized_end=3729 + _globals['_ENTERPRISEUSERDATAKEYS']._serialized_start=3731 + _globals['_ENTERPRISEUSERDATAKEYS']._serialized_end=3804 + _globals['_ENTERPRISEUSERDATAKEYLIGHT']._serialized_start=3806 + _globals['_ENTERPRISEUSERDATAKEYLIGHT']._serialized_end=3909 + _globals['_ENTERPRISEUSERDATAKEYSBYNODE']._serialized_start=3911 + _globals['_ENTERPRISEUSERDATAKEYSBYNODE']._serialized_end=4011 + _globals['_ENTERPRISEUSERDATAKEYSBYNODERESPONSE']._serialized_start=4013 + _globals['_ENTERPRISEUSERDATAKEYSBYNODERESPONSE']._serialized_end=4107 + _globals['_ENTERPRISEDATAREQUEST']._serialized_start=4109 + _globals['_ENTERPRISEDATAREQUEST']._serialized_end=4159 + _globals['_SPECIALPROVISIONING']._serialized_start=4161 + _globals['_SPECIALPROVISIONING']._serialized_end=4209 + _globals['_GENERALDATAENTITY']._serialized_start=4212 + _globals['_GENERALDATAENTITY']._serialized_end=4472 + _globals['_NODE']._serialized_start=4475 + _globals['_NODE']._serialized_end=4728 + _globals['_ROLE']._serialized_start=4731 + _globals['_ROLE']._serialized_end=4873 + _globals['_USER']._serialized_start=4876 + _globals['_USER']._serialized_end=5188 + _globals['_USERALIAS']._serialized_start=5190 + _globals['_USERALIAS']._serialized_end=5245 + _globals['_COMPLIANCEREPORTMETADATA']._serialized_start=5248 + _globals['_COMPLIANCEREPORTMETADATA']._serialized_end=5420 + _globals['_MANAGEDNODE']._serialized_start=5422 + _globals['_MANAGEDNODE']._serialized_end=5505 + _globals['_USERMANAGEDNODE']._serialized_start=5507 + _globals['_USERMANAGEDNODE']._serialized_end=5591 + _globals['_USERPRIVILEGE']._serialized_start=5593 + _globals['_USERPRIVILEGE']._serialized_end=5712 + _globals['_ROLEUSER']._serialized_start=5714 + _globals['_ROLEUSER']._serialized_end=5766 + _globals['_ROLEPRIVILEGE']._serialized_start=5768 + _globals['_ROLEPRIVILEGE']._serialized_end=5845 + _globals['_PRIVILEGESBYMANAGEDNODE']._serialized_start=5847 + _globals['_PRIVILEGESBYMANAGEDNODE']._serialized_end=5931 + _globals['_ROLEENFORCEMENT']._serialized_start=5933 + _globals['_ROLEENFORCEMENT']._serialized_end=6006 + _globals['_TEAM']._serialized_start=6009 + _globals['_TEAM']._serialized_end=6178 + _globals['_TEAMUSER']._serialized_start=6180 + _globals['_TEAMUSER']._serialized_end=6251 + _globals['_GETDISTRIBUTORINFORESPONSE']._serialized_start=6253 + _globals['_GETDISTRIBUTORINFORESPONSE']._serialized_end=6328 + _globals['_DISTRIBUTOR']._serialized_start=6330 + _globals['_DISTRIBUTOR']._serialized_end=6396 + _globals['_MSPINFO']._serialized_start=6399 + _globals['_MSPINFO']._serialized_end=6684 + _globals['_MANAGEDCOMPANY']._serialized_start=6687 + _globals['_MANAGEDCOMPANY']._serialized_end=6960 + _globals['_MSPPOOL']._serialized_start=6962 + _globals['_MSPPOOL']._serialized_end=7044 + _globals['_MSPCONTACT']._serialized_start=7046 + _globals['_MSPCONTACT']._serialized_end=7104 + _globals['_LICENSEADDON']._serialized_start=7107 + _globals['_LICENSEADDON']._serialized_end=7343 + _globals['_MCDEFAULT']._serialized_start=7345 + _globals['_MCDEFAULT']._serialized_end=7460 + _globals['_MSPPERMITS']._serialized_start=7463 + _globals['_MSPPERMITS']._serialized_end=7673 + _globals['_LICENSE']._serialized_start=7676 + _globals['_LICENSE']._serialized_end=8220 + _globals['_BRIDGE']._serialized_start=8222 + _globals['_BRIDGE']._serialized_end=8332 + _globals['_SCIM']._serialized_start=8334 + _globals['_SCIM']._serialized_end=8450 + _globals['_EMAILPROVISION']._serialized_start=8452 + _globals['_EMAILPROVISION']._serialized_end=8528 + _globals['_QUEUEDTEAM']._serialized_start=8530 + _globals['_QUEUEDTEAM']._serialized_end=8612 + _globals['_QUEUEDTEAMUSER']._serialized_start=8614 + _globals['_QUEUEDTEAMUSER']._serialized_end=8662 + _globals['_TEAMSADDRESULT']._serialized_start=8665 + _globals['_TEAMSADDRESULT']._serialized_end=8829 + _globals['_TEAMADDRESULT']._serialized_start=8831 + _globals['_TEAMADDRESULT']._serialized_end=8916 + _globals['_SSOSERVICE']._serialized_start=8919 + _globals['_SSOSERVICE']._serialized_end=9064 + _globals['_REPORTFILTERUSER']._serialized_start=9066 + _globals['_REPORTFILTERUSER']._serialized_end=9115 + _globals['_DEVICEREQUESTFORADMINAPPROVAL']._serialized_start=9118 + _globals['_DEVICEREQUESTFORADMINAPPROVAL']._serialized_end=9397 + _globals['_ENTERPRISEDATA']._serialized_start=9399 + _globals['_ENTERPRISEDATA']._serialized_end=9495 + _globals['_ENTERPRISEDATARESPONSE']._serialized_start=9498 + _globals['_ENTERPRISEDATARESPONSE']._serialized_end=9706 + _globals['_BACKUPREQUEST']._serialized_start=9708 + _globals['_BACKUPREQUEST']._serialized_end=9750 + _globals['_BACKUPRECORD']._serialized_start=9753 + _globals['_BACKUPRECORD']._serialized_end=9905 + _globals['_BACKUPKEY']._serialized_start=9907 + _globals['_BACKUPKEY']._serialized_end=9953 + _globals['_BACKUPUSER']._serialized_start=9956 + _globals['_BACKUPUSER']._serialized_end=10225 + _globals['_BACKUPRESPONSE']._serialized_start=10228 + _globals['_BACKUPRESPONSE']._serialized_end=10386 + _globals['_BACKUPFILE']._serialized_start=10388 + _globals['_BACKUPFILE']._serialized_end=10489 + _globals['_BACKUPSRESPONSE']._serialized_start=10491 + _globals['_BACKUPSRESPONSE']._serialized_end=10547 + _globals['_GETENTERPRISEDATAKEYSREQUEST']._serialized_start=10549 + _globals['_GETENTERPRISEDATAKEYSREQUEST']._serialized_end=10595 + _globals['_GETENTERPRISEDATAKEYSRESPONSE']._serialized_start=10598 + _globals['_GETENTERPRISEDATAKEYSRESPONSE']._serialized_end=10853 + _globals['_ROLEKEY']._serialized_start=10855 + _globals['_ROLEKEY']._serialized_end=10949 + _globals['_MSPKEY']._serialized_start=10951 + _globals['_MSPKEY']._serialized_end=11051 + _globals['_ENTERPRISEKEYS']._serialized_start=11053 + _globals['_ENTERPRISEKEYS']._serialized_end=11177 + _globals['_TREEKEY']._serialized_start=11179 + _globals['_TREEKEY']._serialized_end=11251 + _globals['_SHAREDRECORDRESPONSE']._serialized_start=11253 + _globals['_SHAREDRECORDRESPONSE']._serialized_end=11322 + _globals['_SHAREDRECORDEVENT']._serialized_start=11324 + _globals['_SHAREDRECORDEVENT']._serialized_end=11436 + _globals['_SETRESTRICTVISIBILITYREQUEST']._serialized_start=11438 + _globals['_SETRESTRICTVISIBILITYREQUEST']._serialized_end=11484 + _globals['_USERADDREQUEST']._serialized_start=11487 + _globals['_USERADDREQUEST']._serialized_end=11695 + _globals['_USERUPDATEREQUEST']._serialized_start=11697 + _globals['_USERUPDATEREQUEST']._serialized_end=11755 + _globals['_USERUPDATE']._serialized_start=11758 + _globals['_USERUPDATE']._serialized_end=11933 + _globals['_USERUPDATERESPONSE']._serialized_start=11935 + _globals['_USERUPDATERESPONSE']._serialized_end=12000 + _globals['_USERUPDATERESULT']._serialized_start=12002 + _globals['_USERUPDATERESULT']._serialized_end=12092 + _globals['_COMPLIANCERECORDOWNERSREQUEST']._serialized_start=12094 + _globals['_COMPLIANCERECORDOWNERSREQUEST']._serialized_end=12168 + _globals['_COMPLIANCERECORDOWNERSRESPONSE']._serialized_start=12170 + _globals['_COMPLIANCERECORDOWNERSRESPONSE']._serialized_end=12249 + _globals['_RECORDOWNER']._serialized_start=12251 + _globals['_RECORDOWNER']._serialized_end=12306 + _globals['_PRELIMINARYCOMPLIANCEDATAREQUEST']._serialized_start=12309 + _globals['_PRELIMINARYCOMPLIANCEDATAREQUEST']._serialized_end=12475 + _globals['_PRELIMINARYCOMPLIANCEDATARESPONSE']._serialized_start=12478 + _globals['_PRELIMINARYCOMPLIANCEDATARESPONSE']._serialized_end=12637 + _globals['_AUDITUSERRECORD']._serialized_start=12639 + _globals['_AUDITUSERRECORD']._serialized_end=12714 + _globals['_AUDITUSERDATA']._serialized_start=12717 + _globals['_AUDITUSERDATA']._serialized_end=12858 + _globals['_COMPLIANCEREPORTFILTERS']._serialized_start=12860 + _globals['_COMPLIANCEREPORTFILTERS']._serialized_end=12987 + _globals['_COMPLIANCEREPORTREQUEST']._serialized_start=12989 + _globals['_COMPLIANCEREPORTREQUEST']._serialized_end=13116 + _globals['_COMPLIANCEREPORTRUN']._serialized_start=13119 + _globals['_COMPLIANCEREPORTRUN']._serialized_end=13252 + _globals['_COMPLIANCEREPORTCRITERIAANDFILTER']._serialized_start=13255 + _globals['_COMPLIANCEREPORTCRITERIAANDFILTER']._serialized_end=13507 + _globals['_COMPLIANCEREPORTCRITERIA']._serialized_start=13509 + _globals['_COMPLIANCEREPORTCRITERIA']._serialized_end=13607 + _globals['_COMPLIANCEREPORTFILTER']._serialized_start=13609 + _globals['_COMPLIANCEREPORTFILTER']._serialized_end=13729 + _globals['_COMPLIANCEREPORTRESPONSE']._serialized_start=13732 + _globals['_COMPLIANCEREPORTRESPONSE']._serialized_end=14405 + _globals['_AUDITRECORD']._serialized_start=14408 + _globals['_AUDITRECORD']._serialized_end=14537 + _globals['_AUDITROLE']._serialized_start=14540 + _globals['_AUDITROLE']._serialized_end=14796 + _globals['_ROLENODEMANAGEMENT']._serialized_start=14798 + _globals['_ROLENODEMANAGEMENT']._serialized_end=14892 + _globals['_USERPROFILE']._serialized_start=14894 + _globals['_USERPROFILE']._serialized_end=15001 + _globals['_RECORDPERMISSION']._serialized_start=15003 + _globals['_RECORDPERMISSION']._serialized_end=15064 + _globals['_USERRECORD']._serialized_start=15066 + _globals['_USERRECORD']._serialized_end=15161 + _globals['_AUDITTEAM']._serialized_start=15163 + _globals['_AUDITTEAM']._serialized_end=15254 + _globals['_AUDITTEAMUSER']._serialized_start=15256 + _globals['_AUDITTEAMUSER']._serialized_end=15315 + _globals['_SHAREDFOLDERRECORD']._serialized_start=15318 + _globals['_SHAREDFOLDERRECORD']._serialized_end=15477 + _globals['_SHAREADMINRECORD']._serialized_start=15479 + _globals['_SHAREADMINRECORD']._serialized_end=15556 + _globals['_SHAREDFOLDERUSER']._serialized_start=15558 + _globals['_SHAREDFOLDERUSER']._serialized_end=15628 + _globals['_SHAREDFOLDERTEAM']._serialized_start=15630 + _globals['_SHAREDFOLDERTEAM']._serialized_end=15691 + _globals['_GETCOMPLIANCEREPORTREQUEST']._serialized_start=15693 + _globals['_GETCOMPLIANCEREPORTREQUEST']._serialized_end=15740 + _globals['_GETCOMPLIANCEREPORTRESPONSE']._serialized_start=15742 + _globals['_GETCOMPLIANCEREPORTRESPONSE']._serialized_end=15792 + _globals['_COMPLIANCEREPORTCRITERIAREQUEST']._serialized_start=15794 + _globals['_COMPLIANCEREPORTCRITERIAREQUEST']._serialized_end=15848 + _globals['_SAVECOMPLIANCEREPORTCRITERIARESPONSE']._serialized_start=15850 + _globals['_SAVECOMPLIANCEREPORTCRITERIARESPONSE']._serialized_end=15909 + _globals['_LINKEDRECORD']._serialized_start=15911 + _globals['_LINKEDRECORD']._serialized_end=15963 + _globals['_GETSHARINGADMINSREQUEST']._serialized_start=15965 + _globals['_GETSHARINGADMINSREQUEST']._serialized_end=16052 + _globals['_USERPROFILEEXT']._serialized_start=16055 + _globals['_USERPROFILEEXT']._serialized_end=16279 + _globals['_GETSHARINGADMINSRESPONSE']._serialized_start=16281 + _globals['_GETSHARINGADMINSRESPONSE']._serialized_end=16360 + _globals['_TEAMSENTERPRISEUSERSADDREQUEST']._serialized_start=16362 + _globals['_TEAMSENTERPRISEUSERSADDREQUEST']._serialized_end=16457 + _globals['_TEAMSENTERPRISEUSERSADDTEAMREQUEST']._serialized_start=16459 + _globals['_TEAMSENTERPRISEUSERSADDTEAMREQUEST']._serialized_end=16575 + _globals['_TEAMSENTERPRISEUSERSADDUSERREQUEST']._serialized_start=16578 + _globals['_TEAMSENTERPRISEUSERSADDUSERREQUEST']._serialized_end=16749 + _globals['_TYPEDKEY']._serialized_start=16751 + _globals['_TYPEDKEY']._serialized_end=16821 + _globals['_TEAMSENTERPRISEUSERSADDRESPONSE']._serialized_start=16823 + _globals['_TEAMSENTERPRISEUSERSADDRESPONSE']._serialized_end=16938 + _globals['_TEAMSENTERPRISEUSERSADDTEAMRESPONSE']._serialized_start=16941 + _globals['_TEAMSENTERPRISEUSERSADDTEAMRESPONSE']._serialized_end=17137 + _globals['_TEAMSENTERPRISEUSERSADDUSERRESPONSE']._serialized_start=17140 + _globals['_TEAMSENTERPRISEUSERSADDUSERRESPONSE']._serialized_end=17299 + _globals['_TEAMENTERPRISEUSERREMOVE']._serialized_start=17301 + _globals['_TEAMENTERPRISEUSERREMOVE']._serialized_end=17370 + _globals['_TEAMENTERPRISEUSERREMOVESREQUEST']._serialized_start=17372 + _globals['_TEAMENTERPRISEUSERREMOVESREQUEST']._serialized_end=17478 + _globals['_TEAMENTERPRISEUSERREMOVESRESPONSE']._serialized_start=17480 + _globals['_TEAMENTERPRISEUSERREMOVESRESPONSE']._serialized_end=17603 + _globals['_TEAMENTERPRISEUSERREMOVERESPONSE']._serialized_start=17606 + _globals['_TEAMENTERPRISEUSERREMOVERESPONSE']._serialized_end=17790 + _globals['_DOMAINALIAS']._serialized_start=17792 + _globals['_DOMAINALIAS']._serialized_end=17869 + _globals['_DOMAINALIASREQUEST']._serialized_start=17871 + _globals['_DOMAINALIASREQUEST']._serialized_end=17937 + _globals['_DOMAINALIASRESPONSE']._serialized_start=17939 + _globals['_DOMAINALIASRESPONSE']._serialized_end=18006 + _globals['_ENTERPRISEUSERSPROVISIONREQUEST']._serialized_start=18008 + _globals['_ENTERPRISEUSERSPROVISIONREQUEST']._serialized_end=18117 + _globals['_ENTERPRISEUSERSPROVISION']._serialized_start=18120 + _globals['_ENTERPRISEUSERSPROVISION']._serialized_end=18558 + _globals['_ENTERPRISEUSERSPROVISIONRESPONSE']._serialized_start=18560 + _globals['_ENTERPRISEUSERSPROVISIONRESPONSE']._serialized_end=18655 + _globals['_ENTERPRISEUSERSPROVISIONRESULT']._serialized_start=18657 + _globals['_ENTERPRISEUSERSPROVISIONRESULT']._serialized_end=18770 + _globals['_ENTERPRISEUSERSADDREQUEST']._serialized_start=18772 + _globals['_ENTERPRISEUSERSADDREQUEST']._serialized_end=18869 + _globals['_ENTERPRISEUSERSADD']._serialized_start=18872 + _globals['_ENTERPRISEUSERSADD']._serialized_end=19140 + _globals['_ENTERPRISEUSERSADDRESPONSE']._serialized_start=19143 + _globals['_ENTERPRISEUSERSADDRESPONSE']._serialized_end=19298 + _globals['_ENTERPRISEUSERSADDRESULT']._serialized_start=19301 + _globals['_ENTERPRISEUSERSADDRESULT']._serialized_end=19451 + _globals['_UPDATEMSPPERMITSREQUEST']._serialized_start=19454 + _globals['_UPDATEMSPPERMITSREQUEST']._serialized_end=19639 + _globals['_DELETEENTERPRISEUSERSREQUEST']._serialized_start=19641 + _globals['_DELETEENTERPRISEUSERSREQUEST']._serialized_end=19698 + _globals['_DELETEENTERPRISEUSERSTATUS']._serialized_start=19700 + _globals['_DELETEENTERPRISEUSERSTATUS']._serialized_end=19811 + _globals['_DELETEENTERPRISEUSERSRESPONSE']._serialized_start=19813 + _globals['_DELETEENTERPRISEUSERSRESPONSE']._serialized_end=19906 + _globals['_CLEARSECURITYDATAREQUEST']._serialized_start=19908 + _globals['_CLEARSECURITYDATAREQUEST']._serialized_end=20027 + _globals['_LISTDOMAINSRESPONSE']._serialized_start=20029 + _globals['_LISTDOMAINSRESPONSE']._serialized_end=20066 + _globals['_RESERVEDOMAINREQUEST']._serialized_start=20068 + _globals['_RESERVEDOMAINREQUEST']._serialized_end=20168 + _globals['_RESERVEDOMAINRESPONSE']._serialized_start=20170 + _globals['_RESERVEDOMAINRESPONSE']._serialized_end=20208 + _globals['_ROLESBYTEAM']._serialized_start=20210 + _globals['_ROLESBYTEAM']._serialized_end=20256 + _globals['_LOCKUSERSREQUEST']._serialized_start=20259 + _globals['_LOCKUSERSREQUEST']._serialized_end=20400 + _globals['_LOCKUSERSRESPONSE']._serialized_start=20402 + _globals['_LOCKUSERSRESPONSE']._serialized_end=20469 + _globals['_LOCKUSERRESPONSE']._serialized_start=20471 + _globals['_LOCKUSERRESPONSE']._serialized_end=20581 # @@protoc_insertion_point(module_scope) diff --git a/keepercommander/proto/enterprise_pb2.pyi b/keepercommander/proto/enterprise_pb2.pyi index 5de2bd3e2..c09888285 100644 --- a/keepercommander/proto/enterprise_pb2.pyi +++ b/keepercommander/proto/enterprise_pb2.pyi @@ -2,7 +2,8 @@ from google.protobuf.internal import containers as _containers from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message -from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union +from collections.abc import Iterable as _Iterable, Mapping as _Mapping +from typing import ClassVar as _ClassVar, Optional as _Optional, Union as _Union DESCRIPTOR: _descriptor.FileDescriptor @@ -103,6 +104,8 @@ class EnterpriseFlagType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): FORBID_ACCOUNT_TRANSFER: _ClassVar[EnterpriseFlagType] NPS_POPUP_OPT_OUT: _ClassVar[EnterpriseFlagType] SHOW_USER_ONBOARD: _ClassVar[EnterpriseFlagType] + FORBID_KEY_TYPE_1: _ClassVar[EnterpriseFlagType] + KEEPER_DRIVE: _ClassVar[EnterpriseFlagType] class UserUpdateStatus(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): __slots__ = () @@ -148,6 +151,15 @@ class ReserveDomainAction(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): DOMAIN_TOKEN: _ClassVar[ReserveDomainAction] DOMAIN_ADD: _ClassVar[ReserveDomainAction] DOMAIN_DELETE: _ClassVar[ReserveDomainAction] + +class UserLockStatus(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = () + UNKNOWN_LOCK_STATUS: _ClassVar[UserLockStatus] + LOCKED: _ClassVar[UserLockStatus] + DISABLED: _ClassVar[UserLockStatus] + UNLOCKED: _ClassVar[UserLockStatus] + DELETED: _ClassVar[UserLockStatus] + CANT_BE_PENDING: _ClassVar[UserLockStatus] RSA: KeyType ECC: KeyType ROLE_EXISTS: RoleUserModifyStatus @@ -216,6 +228,8 @@ CONSOLE_ONBOARDED: EnterpriseFlagType FORBID_ACCOUNT_TRANSFER: EnterpriseFlagType NPS_POPUP_OPT_OUT: EnterpriseFlagType SHOW_USER_ONBOARD: EnterpriseFlagType +FORBID_KEY_TYPE_1: EnterpriseFlagType +KEEPER_DRIVE: EnterpriseFlagType USER_UPDATE_OK: UserUpdateStatus USER_UPDATE_ACCESS_DENIED: UserUpdateStatus OK: AuditUserStatus @@ -240,6 +254,12 @@ FORCE_CLIENT_RESEND_SECURITY_DATA: ClearSecurityDataType DOMAIN_TOKEN: ReserveDomainAction DOMAIN_ADD: ReserveDomainAction DOMAIN_DELETE: ReserveDomainAction +UNKNOWN_LOCK_STATUS: UserLockStatus +LOCKED: UserLockStatus +DISABLED: UserLockStatus +UNLOCKED: UserLockStatus +DELETED: UserLockStatus +CANT_BE_PENDING: UserLockStatus class EnterpriseKeyPairRequest(_message.Message): __slots__ = ("enterprisePublicKey", "encryptedEnterprisePrivateKey", "keyType") @@ -269,7 +289,7 @@ class EnterpriseUser(_message.Message): enterpriseUsername: str isShareAdmin: bool username: str - def __init__(self, enterpriseUserId: _Optional[int] = ..., email: _Optional[str] = ..., enterpriseUsername: _Optional[str] = ..., isShareAdmin: bool = ..., username: _Optional[str] = ...) -> None: ... + def __init__(self, enterpriseUserId: _Optional[int] = ..., email: _Optional[str] = ..., enterpriseUsername: _Optional[str] = ..., isShareAdmin: _Optional[bool] = ..., username: _Optional[str] = ...) -> None: ... class GetTeamMemberResponse(_message.Message): __slots__ = ("enterpriseUser",) @@ -299,7 +319,7 @@ class EncryptedTeamKeyRequest(_message.Message): teamUid: bytes encryptedTeamKey: bytes force: bool - def __init__(self, teamUid: _Optional[bytes] = ..., encryptedTeamKey: _Optional[bytes] = ..., force: bool = ...) -> None: ... + def __init__(self, teamUid: _Optional[bytes] = ..., encryptedTeamKey: _Optional[bytes] = ..., force: _Optional[bool] = ...) -> None: ... class ReEncryptedData(_message.Message): __slots__ = ("id", "data") @@ -501,7 +521,7 @@ class DomainPasswordRulesFields(_message.Message): minimum: int maximum: int allowed: bool - def __init__(self, type: _Optional[str] = ..., minimum: _Optional[int] = ..., maximum: _Optional[int] = ..., allowed: bool = ...) -> None: ... + def __init__(self, type: _Optional[str] = ..., minimum: _Optional[int] = ..., maximum: _Optional[int] = ..., allowed: _Optional[bool] = ...) -> None: ... class LoginToMcRequest(_message.Message): __slots__ = ("mcEnterpriseId", "messageSessionUid") @@ -512,12 +532,14 @@ class LoginToMcRequest(_message.Message): def __init__(self, mcEnterpriseId: _Optional[int] = ..., messageSessionUid: _Optional[bytes] = ...) -> None: ... class LoginToMcResponse(_message.Message): - __slots__ = ("encryptedSessionToken", "encryptedTreeKey") + __slots__ = ("encryptedSessionToken", "encryptedTreeKey", "forbidKeyType2") ENCRYPTEDSESSIONTOKEN_FIELD_NUMBER: _ClassVar[int] ENCRYPTEDTREEKEY_FIELD_NUMBER: _ClassVar[int] + FORBIDKEYTYPE2_FIELD_NUMBER: _ClassVar[int] encryptedSessionToken: bytes encryptedTreeKey: str - def __init__(self, encryptedSessionToken: _Optional[bytes] = ..., encryptedTreeKey: _Optional[str] = ...) -> None: ... + forbidKeyType2: bool + def __init__(self, encryptedSessionToken: _Optional[bytes] = ..., encryptedTreeKey: _Optional[str] = ..., forbidKeyType2: _Optional[bool] = ...) -> None: ... class DomainPasswordRulesResponse(_message.Message): __slots__ = ("domainPasswordRulesFields",) @@ -535,7 +557,7 @@ class ApproveUserDeviceRequest(_message.Message): encryptedDeviceToken: bytes encryptedDeviceDataKey: bytes denyApproval: bool - def __init__(self, enterpriseUserId: _Optional[int] = ..., encryptedDeviceToken: _Optional[bytes] = ..., encryptedDeviceDataKey: _Optional[bytes] = ..., denyApproval: bool = ...) -> None: ... + def __init__(self, enterpriseUserId: _Optional[int] = ..., encryptedDeviceToken: _Optional[bytes] = ..., encryptedDeviceDataKey: _Optional[bytes] = ..., denyApproval: _Optional[bool] = ...) -> None: ... class ApproveUserDeviceResponse(_message.Message): __slots__ = ("enterpriseUserId", "encryptedDeviceToken", "failed", "message") @@ -547,7 +569,7 @@ class ApproveUserDeviceResponse(_message.Message): encryptedDeviceToken: bytes failed: bool message: str - def __init__(self, enterpriseUserId: _Optional[int] = ..., encryptedDeviceToken: _Optional[bytes] = ..., failed: bool = ..., message: _Optional[str] = ...) -> None: ... + def __init__(self, enterpriseUserId: _Optional[int] = ..., encryptedDeviceToken: _Optional[bytes] = ..., failed: _Optional[bool] = ..., message: _Optional[str] = ...) -> None: ... class ApproveUserDevicesRequest(_message.Message): __slots__ = ("deviceRequests",) @@ -635,7 +657,7 @@ class GeneralDataEntity(_message.Message): distributor: bool forbidAccountTransfer: bool showUserOnboard: bool - def __init__(self, enterpriseName: _Optional[str] = ..., restrictVisibility: bool = ..., specialProvisioning: _Optional[_Union[SpecialProvisioning, _Mapping]] = ..., userPrivilege: _Optional[_Union[UserPrivilege, _Mapping]] = ..., distributor: bool = ..., forbidAccountTransfer: bool = ..., showUserOnboard: bool = ...) -> None: ... + def __init__(self, enterpriseName: _Optional[str] = ..., restrictVisibility: _Optional[bool] = ..., specialProvisioning: _Optional[_Union[SpecialProvisioning, _Mapping]] = ..., userPrivilege: _Optional[_Union[UserPrivilege, _Mapping]] = ..., distributor: _Optional[bool] = ..., forbidAccountTransfer: _Optional[bool] = ..., showUserOnboard: _Optional[bool] = ...) -> None: ... class Node(_message.Message): __slots__ = ("nodeId", "parentId", "bridgeId", "scimId", "licenseId", "encryptedData", "duoEnabled", "rsaEnabled", "ssoServiceProviderId", "restrictVisibility", "ssoServiceProviderIds") @@ -661,7 +683,7 @@ class Node(_message.Message): ssoServiceProviderId: int restrictVisibility: bool ssoServiceProviderIds: _containers.RepeatedScalarFieldContainer[int] - def __init__(self, nodeId: _Optional[int] = ..., parentId: _Optional[int] = ..., bridgeId: _Optional[int] = ..., scimId: _Optional[int] = ..., licenseId: _Optional[int] = ..., encryptedData: _Optional[str] = ..., duoEnabled: bool = ..., rsaEnabled: bool = ..., ssoServiceProviderId: _Optional[int] = ..., restrictVisibility: bool = ..., ssoServiceProviderIds: _Optional[_Iterable[int]] = ...) -> None: ... + def __init__(self, nodeId: _Optional[int] = ..., parentId: _Optional[int] = ..., bridgeId: _Optional[int] = ..., scimId: _Optional[int] = ..., licenseId: _Optional[int] = ..., encryptedData: _Optional[str] = ..., duoEnabled: _Optional[bool] = ..., rsaEnabled: _Optional[bool] = ..., ssoServiceProviderId: _Optional[int] = ..., restrictVisibility: _Optional[bool] = ..., ssoServiceProviderIds: _Optional[_Iterable[int]] = ...) -> None: ... class Role(_message.Message): __slots__ = ("roleId", "nodeId", "encryptedData", "keyType", "visibleBelow", "newUserInherit", "roleType") @@ -679,7 +701,7 @@ class Role(_message.Message): visibleBelow: bool newUserInherit: bool roleType: str - def __init__(self, roleId: _Optional[int] = ..., nodeId: _Optional[int] = ..., encryptedData: _Optional[str] = ..., keyType: _Optional[str] = ..., visibleBelow: bool = ..., newUserInherit: bool = ..., roleType: _Optional[str] = ...) -> None: ... + def __init__(self, roleId: _Optional[int] = ..., nodeId: _Optional[int] = ..., encryptedData: _Optional[str] = ..., keyType: _Optional[str] = ..., visibleBelow: _Optional[bool] = ..., newUserInherit: _Optional[bool] = ..., roleType: _Optional[str] = ...) -> None: ... class User(_message.Message): __slots__ = ("enterpriseUserId", "nodeId", "encryptedData", "keyType", "username", "status", "lock", "userId", "accountShareExpiration", "fullName", "jobTitle", "tfaEnabled", "transferAcceptanceStatus") @@ -709,7 +731,7 @@ class User(_message.Message): jobTitle: str tfaEnabled: bool transferAcceptanceStatus: TransferAcceptanceStatus - def __init__(self, enterpriseUserId: _Optional[int] = ..., nodeId: _Optional[int] = ..., encryptedData: _Optional[str] = ..., keyType: _Optional[str] = ..., username: _Optional[str] = ..., status: _Optional[str] = ..., lock: _Optional[int] = ..., userId: _Optional[int] = ..., accountShareExpiration: _Optional[int] = ..., fullName: _Optional[str] = ..., jobTitle: _Optional[str] = ..., tfaEnabled: bool = ..., transferAcceptanceStatus: _Optional[_Union[TransferAcceptanceStatus, str]] = ...) -> None: ... + def __init__(self, enterpriseUserId: _Optional[int] = ..., nodeId: _Optional[int] = ..., encryptedData: _Optional[str] = ..., keyType: _Optional[str] = ..., username: _Optional[str] = ..., status: _Optional[str] = ..., lock: _Optional[int] = ..., userId: _Optional[int] = ..., accountShareExpiration: _Optional[int] = ..., fullName: _Optional[str] = ..., jobTitle: _Optional[str] = ..., tfaEnabled: _Optional[bool] = ..., transferAcceptanceStatus: _Optional[_Union[TransferAcceptanceStatus, str]] = ...) -> None: ... class UserAlias(_message.Message): __slots__ = ("enterpriseUserId", "username") @@ -745,7 +767,7 @@ class ManagedNode(_message.Message): roleId: int managedNodeId: int cascadeNodeManagement: bool - def __init__(self, roleId: _Optional[int] = ..., managedNodeId: _Optional[int] = ..., cascadeNodeManagement: bool = ...) -> None: ... + def __init__(self, roleId: _Optional[int] = ..., managedNodeId: _Optional[int] = ..., cascadeNodeManagement: _Optional[bool] = ...) -> None: ... class UserManagedNode(_message.Message): __slots__ = ("nodeId", "cascadeNodeManagement", "privileges") @@ -755,7 +777,7 @@ class UserManagedNode(_message.Message): nodeId: int cascadeNodeManagement: bool privileges: _containers.RepeatedScalarFieldContainer[str] - def __init__(self, nodeId: _Optional[int] = ..., cascadeNodeManagement: bool = ..., privileges: _Optional[_Iterable[str]] = ...) -> None: ... + def __init__(self, nodeId: _Optional[int] = ..., cascadeNodeManagement: _Optional[bool] = ..., privileges: _Optional[_Iterable[str]] = ...) -> None: ... class UserPrivilege(_message.Message): __slots__ = ("userManagedNodes", "enterpriseUserId", "encryptedData") @@ -785,6 +807,16 @@ class RolePrivilege(_message.Message): privilegeType: str def __init__(self, managedNodeId: _Optional[int] = ..., roleId: _Optional[int] = ..., privilegeType: _Optional[str] = ...) -> None: ... +class PrivilegesByManagedNode(_message.Message): + __slots__ = ("managedNodeId", "roleId", "privileges") + MANAGEDNODEID_FIELD_NUMBER: _ClassVar[int] + ROLEID_FIELD_NUMBER: _ClassVar[int] + PRIVILEGES_FIELD_NUMBER: _ClassVar[int] + managedNodeId: int + roleId: int + privileges: _containers.RepeatedScalarFieldContainer[str] + def __init__(self, managedNodeId: _Optional[int] = ..., roleId: _Optional[int] = ..., privileges: _Optional[_Iterable[str]] = ...) -> None: ... + class RoleEnforcement(_message.Message): __slots__ = ("roleId", "enforcementType", "value") ROLEID_FIELD_NUMBER: _ClassVar[int] @@ -813,7 +845,7 @@ class Team(_message.Message): restrictView: bool encryptedData: str encryptedTeamKey: str - def __init__(self, teamUid: _Optional[bytes] = ..., name: _Optional[str] = ..., nodeId: _Optional[int] = ..., restrictEdit: bool = ..., restrictShare: bool = ..., restrictView: bool = ..., encryptedData: _Optional[str] = ..., encryptedTeamKey: _Optional[str] = ...) -> None: ... + def __init__(self, teamUid: _Optional[bytes] = ..., name: _Optional[str] = ..., nodeId: _Optional[int] = ..., restrictEdit: _Optional[bool] = ..., restrictShare: _Optional[bool] = ..., restrictView: _Optional[bool] = ..., encryptedData: _Optional[str] = ..., encryptedTeamKey: _Optional[str] = ...) -> None: ... class TeamUser(_message.Message): __slots__ = ("teamUid", "enterpriseUserId", "userType") @@ -859,7 +891,7 @@ class MspInfo(_message.Message): managedCompanies: _containers.RepeatedCompositeFieldContainer[ManagedCompany] allowUnlimitedLicenses: bool addOns: _containers.RepeatedCompositeFieldContainer[LicenseAddOn] - def __init__(self, enterpriseId: _Optional[int] = ..., enterpriseName: _Optional[str] = ..., allocatedLicenses: _Optional[int] = ..., allowedMcProducts: _Optional[_Iterable[str]] = ..., allowedAddOns: _Optional[_Iterable[str]] = ..., maxFilePlanType: _Optional[str] = ..., managedCompanies: _Optional[_Iterable[_Union[ManagedCompany, _Mapping]]] = ..., allowUnlimitedLicenses: bool = ..., addOns: _Optional[_Iterable[_Union[LicenseAddOn, _Mapping]]] = ...) -> None: ... + def __init__(self, enterpriseId: _Optional[int] = ..., enterpriseName: _Optional[str] = ..., allocatedLicenses: _Optional[int] = ..., allowedMcProducts: _Optional[_Iterable[str]] = ..., allowedAddOns: _Optional[_Iterable[str]] = ..., maxFilePlanType: _Optional[str] = ..., managedCompanies: _Optional[_Iterable[_Union[ManagedCompany, _Mapping]]] = ..., allowUnlimitedLicenses: _Optional[bool] = ..., addOns: _Optional[_Iterable[_Union[LicenseAddOn, _Mapping]]] = ...) -> None: ... class ManagedCompany(_message.Message): __slots__ = ("mcEnterpriseId", "mcEnterpriseName", "mspNodeId", "numberOfSeats", "numberOfUsers", "productId", "isExpired", "treeKey", "tree_key_role", "filePlanType", "addOns") @@ -885,7 +917,7 @@ class ManagedCompany(_message.Message): tree_key_role: int filePlanType: str addOns: _containers.RepeatedCompositeFieldContainer[LicenseAddOn] - def __init__(self, mcEnterpriseId: _Optional[int] = ..., mcEnterpriseName: _Optional[str] = ..., mspNodeId: _Optional[int] = ..., numberOfSeats: _Optional[int] = ..., numberOfUsers: _Optional[int] = ..., productId: _Optional[str] = ..., isExpired: bool = ..., treeKey: _Optional[str] = ..., tree_key_role: _Optional[int] = ..., filePlanType: _Optional[str] = ..., addOns: _Optional[_Iterable[_Union[LicenseAddOn, _Mapping]]] = ...) -> None: ... + def __init__(self, mcEnterpriseId: _Optional[int] = ..., mcEnterpriseName: _Optional[str] = ..., mspNodeId: _Optional[int] = ..., numberOfSeats: _Optional[int] = ..., numberOfUsers: _Optional[int] = ..., productId: _Optional[str] = ..., isExpired: _Optional[bool] = ..., treeKey: _Optional[str] = ..., tree_key_role: _Optional[int] = ..., filePlanType: _Optional[str] = ..., addOns: _Optional[_Iterable[_Union[LicenseAddOn, _Mapping]]] = ...) -> None: ... class MSPPool(_message.Message): __slots__ = ("productId", "seats", "availableSeats", "stash") @@ -931,7 +963,7 @@ class LicenseAddOn(_message.Message): apiCallCount: int tierDescription: str seatsAllocated: int - def __init__(self, name: _Optional[str] = ..., enabled: bool = ..., isTrial: bool = ..., expiration: _Optional[int] = ..., created: _Optional[int] = ..., seats: _Optional[int] = ..., activationTime: _Optional[int] = ..., includedInProduct: bool = ..., apiCallCount: _Optional[int] = ..., tierDescription: _Optional[str] = ..., seatsAllocated: _Optional[int] = ...) -> None: ... + def __init__(self, name: _Optional[str] = ..., enabled: _Optional[bool] = ..., isTrial: _Optional[bool] = ..., expiration: _Optional[int] = ..., created: _Optional[int] = ..., seats: _Optional[int] = ..., activationTime: _Optional[int] = ..., includedInProduct: _Optional[bool] = ..., apiCallCount: _Optional[int] = ..., tierDescription: _Optional[str] = ..., seatsAllocated: _Optional[int] = ...) -> None: ... class MCDefault(_message.Message): __slots__ = ("mcProduct", "addOns", "filePlanType", "maxLicenses", "fixedMaxLicenses") @@ -945,7 +977,7 @@ class MCDefault(_message.Message): filePlanType: str maxLicenses: int fixedMaxLicenses: bool - def __init__(self, mcProduct: _Optional[str] = ..., addOns: _Optional[_Iterable[str]] = ..., filePlanType: _Optional[str] = ..., maxLicenses: _Optional[int] = ..., fixedMaxLicenses: bool = ...) -> None: ... + def __init__(self, mcProduct: _Optional[str] = ..., addOns: _Optional[_Iterable[str]] = ..., filePlanType: _Optional[str] = ..., maxLicenses: _Optional[int] = ..., fixedMaxLicenses: _Optional[bool] = ...) -> None: ... class MSPPermits(_message.Message): __slots__ = ("restricted", "maxAllowedLicenses", "allowedMcProducts", "allowedAddOns", "maxFilePlanType", "allowUnlimitedLicenses", "mcDefaults") @@ -963,7 +995,7 @@ class MSPPermits(_message.Message): maxFilePlanType: str allowUnlimitedLicenses: bool mcDefaults: _containers.RepeatedCompositeFieldContainer[MCDefault] - def __init__(self, restricted: bool = ..., maxAllowedLicenses: _Optional[int] = ..., allowedMcProducts: _Optional[_Iterable[str]] = ..., allowedAddOns: _Optional[_Iterable[str]] = ..., maxFilePlanType: _Optional[str] = ..., allowUnlimitedLicenses: bool = ..., mcDefaults: _Optional[_Iterable[_Union[MCDefault, _Mapping]]] = ...) -> None: ... + def __init__(self, restricted: _Optional[bool] = ..., maxAllowedLicenses: _Optional[int] = ..., allowedMcProducts: _Optional[_Iterable[str]] = ..., allowedAddOns: _Optional[_Iterable[str]] = ..., maxFilePlanType: _Optional[str] = ..., allowUnlimitedLicenses: _Optional[bool] = ..., mcDefaults: _Optional[_Iterable[_Union[MCDefault, _Mapping]]] = ...) -> None: ... class License(_message.Message): __slots__ = ("paid", "numberOfSeats", "expiration", "licenseKeyId", "productTypeId", "name", "enterpriseLicenseId", "seatsAllocated", "seatsPending", "tier", "filePlanTypeId", "maxBytes", "storageExpiration", "licenseStatus", "mspPool", "managedBy", "addOns", "nextBillingDate", "hasMSPLegacyLog", "mspPermits", "distributor") @@ -1009,7 +1041,7 @@ class License(_message.Message): hasMSPLegacyLog: bool mspPermits: MSPPermits distributor: bool - def __init__(self, paid: bool = ..., numberOfSeats: _Optional[int] = ..., expiration: _Optional[int] = ..., licenseKeyId: _Optional[int] = ..., productTypeId: _Optional[int] = ..., name: _Optional[str] = ..., enterpriseLicenseId: _Optional[int] = ..., seatsAllocated: _Optional[int] = ..., seatsPending: _Optional[int] = ..., tier: _Optional[int] = ..., filePlanTypeId: _Optional[int] = ..., maxBytes: _Optional[int] = ..., storageExpiration: _Optional[int] = ..., licenseStatus: _Optional[str] = ..., mspPool: _Optional[_Iterable[_Union[MSPPool, _Mapping]]] = ..., managedBy: _Optional[_Union[MSPContact, _Mapping]] = ..., addOns: _Optional[_Iterable[_Union[LicenseAddOn, _Mapping]]] = ..., nextBillingDate: _Optional[int] = ..., hasMSPLegacyLog: bool = ..., mspPermits: _Optional[_Union[MSPPermits, _Mapping]] = ..., distributor: bool = ...) -> None: ... + def __init__(self, paid: _Optional[bool] = ..., numberOfSeats: _Optional[int] = ..., expiration: _Optional[int] = ..., licenseKeyId: _Optional[int] = ..., productTypeId: _Optional[int] = ..., name: _Optional[str] = ..., enterpriseLicenseId: _Optional[int] = ..., seatsAllocated: _Optional[int] = ..., seatsPending: _Optional[int] = ..., tier: _Optional[int] = ..., filePlanTypeId: _Optional[int] = ..., maxBytes: _Optional[int] = ..., storageExpiration: _Optional[int] = ..., licenseStatus: _Optional[str] = ..., mspPool: _Optional[_Iterable[_Union[MSPPool, _Mapping]]] = ..., managedBy: _Optional[_Union[MSPContact, _Mapping]] = ..., addOns: _Optional[_Iterable[_Union[LicenseAddOn, _Mapping]]] = ..., nextBillingDate: _Optional[int] = ..., hasMSPLegacyLog: _Optional[bool] = ..., mspPermits: _Optional[_Union[MSPPermits, _Mapping]] = ..., distributor: _Optional[bool] = ...) -> None: ... class Bridge(_message.Message): __slots__ = ("bridgeId", "nodeId", "wanIpEnforcement", "lanIpEnforcement", "status") @@ -1039,7 +1071,7 @@ class Scim(_message.Message): lastSynced: int rolePrefix: str uniqueGroups: bool - def __init__(self, scimId: _Optional[int] = ..., nodeId: _Optional[int] = ..., status: _Optional[str] = ..., lastSynced: _Optional[int] = ..., rolePrefix: _Optional[str] = ..., uniqueGroups: bool = ...) -> None: ... + def __init__(self, scimId: _Optional[int] = ..., nodeId: _Optional[int] = ..., status: _Optional[str] = ..., lastSynced: _Optional[int] = ..., rolePrefix: _Optional[str] = ..., uniqueGroups: _Optional[bool] = ...) -> None: ... class EmailProvision(_message.Message): __slots__ = ("id", "nodeId", "domain", "method") @@ -1111,7 +1143,7 @@ class SsoService(_message.Message): inviteNewUsers: bool active: bool isCloud: bool - def __init__(self, ssoServiceProviderId: _Optional[int] = ..., nodeId: _Optional[int] = ..., name: _Optional[str] = ..., sp_url: _Optional[str] = ..., inviteNewUsers: bool = ..., active: bool = ..., isCloud: bool = ...) -> None: ... + def __init__(self, ssoServiceProviderId: _Optional[int] = ..., nodeId: _Optional[int] = ..., name: _Optional[str] = ..., sp_url: _Optional[str] = ..., inviteNewUsers: _Optional[bool] = ..., active: _Optional[bool] = ..., isCloud: _Optional[bool] = ...) -> None: ... class ReportFilterUser(_message.Message): __slots__ = ("userId", "email") @@ -1157,7 +1189,7 @@ class EnterpriseData(_message.Message): entity: EnterpriseDataEntity delete: bool data: _containers.RepeatedScalarFieldContainer[bytes] - def __init__(self, entity: _Optional[_Union[EnterpriseDataEntity, str]] = ..., delete: bool = ..., data: _Optional[_Iterable[bytes]] = ...) -> None: ... + def __init__(self, entity: _Optional[_Union[EnterpriseDataEntity, str]] = ..., delete: _Optional[bool] = ..., data: _Optional[_Iterable[bytes]] = ...) -> None: ... class EnterpriseDataResponse(_message.Message): __slots__ = ("continuationToken", "hasMore", "cacheStatus", "data", "generalData") @@ -1171,7 +1203,7 @@ class EnterpriseDataResponse(_message.Message): cacheStatus: CacheStatus data: _containers.RepeatedCompositeFieldContainer[EnterpriseData] generalData: GeneralDataEntity - def __init__(self, continuationToken: _Optional[bytes] = ..., hasMore: bool = ..., cacheStatus: _Optional[_Union[CacheStatus, str]] = ..., data: _Optional[_Iterable[_Union[EnterpriseData, _Mapping]]] = ..., generalData: _Optional[_Union[GeneralDataEntity, _Mapping]] = ...) -> None: ... + def __init__(self, continuationToken: _Optional[bytes] = ..., hasMore: _Optional[bool] = ..., cacheStatus: _Optional[_Union[CacheStatus, str]] = ..., data: _Optional[_Iterable[_Union[EnterpriseData, _Mapping]]] = ..., generalData: _Optional[_Union[GeneralDataEntity, _Mapping]] = ...) -> None: ... class BackupRequest(_message.Message): __slots__ = ("continuationToken",) @@ -1335,7 +1367,7 @@ class SharedRecordEvent(_message.Message): canEdit: bool canReshare: bool shareFrom: int - def __init__(self, recordUid: _Optional[bytes] = ..., userName: _Optional[str] = ..., canEdit: bool = ..., canReshare: bool = ..., shareFrom: _Optional[int] = ...) -> None: ... + def __init__(self, recordUid: _Optional[bytes] = ..., userName: _Optional[str] = ..., canEdit: _Optional[bool] = ..., canReshare: _Optional[bool] = ..., shareFrom: _Optional[int] = ...) -> None: ... class SetRestrictVisibilityRequest(_message.Message): __slots__ = ("nodeId",) @@ -1361,7 +1393,7 @@ class UserAddRequest(_message.Message): jobTitle: str email: str suppressEmailInvite: bool - def __init__(self, enterpriseUserId: _Optional[int] = ..., nodeId: _Optional[int] = ..., encryptedData: _Optional[bytes] = ..., keyType: _Optional[_Union[EncryptedKeyType, str]] = ..., fullName: _Optional[str] = ..., jobTitle: _Optional[str] = ..., email: _Optional[str] = ..., suppressEmailInvite: bool = ...) -> None: ... + def __init__(self, enterpriseUserId: _Optional[int] = ..., nodeId: _Optional[int] = ..., encryptedData: _Optional[bytes] = ..., keyType: _Optional[_Union[EncryptedKeyType, str]] = ..., fullName: _Optional[str] = ..., jobTitle: _Optional[str] = ..., email: _Optional[str] = ..., suppressEmailInvite: _Optional[bool] = ...) -> None: ... class UserUpdateRequest(_message.Message): __slots__ = ("users",) @@ -1407,7 +1439,7 @@ class ComplianceRecordOwnersRequest(_message.Message): INCLUDENONSHARED_FIELD_NUMBER: _ClassVar[int] nodeIds: _containers.RepeatedScalarFieldContainer[int] includeNonShared: bool - def __init__(self, nodeIds: _Optional[_Iterable[int]] = ..., includeNonShared: bool = ...) -> None: ... + def __init__(self, nodeIds: _Optional[_Iterable[int]] = ..., includeNonShared: _Optional[bool] = ...) -> None: ... class ComplianceRecordOwnersResponse(_message.Message): __slots__ = ("recordOwners",) @@ -1421,7 +1453,7 @@ class RecordOwner(_message.Message): SHARED_FIELD_NUMBER: _ClassVar[int] enterpriseUserId: int shared: bool - def __init__(self, enterpriseUserId: _Optional[int] = ..., shared: bool = ...) -> None: ... + def __init__(self, enterpriseUserId: _Optional[int] = ..., shared: _Optional[bool] = ...) -> None: ... class PreliminaryComplianceDataRequest(_message.Message): __slots__ = ("enterpriseUserIds", "includeNonShared", "continuationToken", "includeTotalMatchingRecordsInFirstResponse") @@ -1433,7 +1465,7 @@ class PreliminaryComplianceDataRequest(_message.Message): includeNonShared: bool continuationToken: bytes includeTotalMatchingRecordsInFirstResponse: bool - def __init__(self, enterpriseUserIds: _Optional[_Iterable[int]] = ..., includeNonShared: bool = ..., continuationToken: _Optional[bytes] = ..., includeTotalMatchingRecordsInFirstResponse: bool = ...) -> None: ... + def __init__(self, enterpriseUserIds: _Optional[_Iterable[int]] = ..., includeNonShared: _Optional[bool] = ..., continuationToken: _Optional[bytes] = ..., includeTotalMatchingRecordsInFirstResponse: _Optional[bool] = ...) -> None: ... class PreliminaryComplianceDataResponse(_message.Message): __slots__ = ("auditUserData", "continuationToken", "hasMore", "totalMatchingRecords") @@ -1445,7 +1477,7 @@ class PreliminaryComplianceDataResponse(_message.Message): continuationToken: bytes hasMore: bool totalMatchingRecords: int - def __init__(self, auditUserData: _Optional[_Iterable[_Union[AuditUserData, _Mapping]]] = ..., continuationToken: _Optional[bytes] = ..., hasMore: bool = ..., totalMatchingRecords: _Optional[int] = ...) -> None: ... + def __init__(self, auditUserData: _Optional[_Iterable[_Union[AuditUserData, _Mapping]]] = ..., continuationToken: _Optional[bytes] = ..., hasMore: _Optional[bool] = ..., totalMatchingRecords: _Optional[int] = ...) -> None: ... class AuditUserRecord(_message.Message): __slots__ = ("recordUid", "encryptedData", "shared") @@ -1455,7 +1487,7 @@ class AuditUserRecord(_message.Message): recordUid: bytes encryptedData: bytes shared: bool - def __init__(self, recordUid: _Optional[bytes] = ..., encryptedData: _Optional[bytes] = ..., shared: bool = ...) -> None: ... + def __init__(self, recordUid: _Optional[bytes] = ..., encryptedData: _Optional[bytes] = ..., shared: _Optional[bool] = ...) -> None: ... class AuditUserData(_message.Message): __slots__ = ("enterpriseUserId", "auditUserRecords", "status") @@ -1489,7 +1521,7 @@ class ComplianceReportRequest(_message.Message): complianceReportRun: ComplianceReportRun reportName: str saveReport: bool - def __init__(self, complianceReportRun: _Optional[_Union[ComplianceReportRun, _Mapping]] = ..., reportName: _Optional[str] = ..., saveReport: bool = ...) -> None: ... + def __init__(self, complianceReportRun: _Optional[_Union[ComplianceReportRun, _Mapping]] = ..., reportName: _Optional[str] = ..., saveReport: _Optional[bool] = ...) -> None: ... class ComplianceReportRun(_message.Message): __slots__ = ("reportCriteriaAndFilter", "users", "records") @@ -1527,7 +1559,7 @@ class ComplianceReportCriteria(_message.Message): jobTitles: _containers.RepeatedScalarFieldContainer[str] enterpriseUserIds: _containers.RepeatedScalarFieldContainer[int] includeNonShared: bool - def __init__(self, jobTitles: _Optional[_Iterable[str]] = ..., enterpriseUserIds: _Optional[_Iterable[int]] = ..., includeNonShared: bool = ...) -> None: ... + def __init__(self, jobTitles: _Optional[_Iterable[str]] = ..., enterpriseUserIds: _Optional[_Iterable[int]] = ..., includeNonShared: _Optional[bool] = ...) -> None: ... class ComplianceReportFilter(_message.Message): __slots__ = ("recordTitles", "recordUids", "jobTitles", "urls", "recordTypes") @@ -1591,7 +1623,7 @@ class AuditRecord(_message.Message): inTrash: bool treeLeft: int treeRight: int - def __init__(self, recordUid: _Optional[bytes] = ..., auditData: _Optional[bytes] = ..., hasAttachments: bool = ..., inTrash: bool = ..., treeLeft: _Optional[int] = ..., treeRight: _Optional[int] = ...) -> None: ... + def __init__(self, recordUid: _Optional[bytes] = ..., auditData: _Optional[bytes] = ..., hasAttachments: _Optional[bool] = ..., inTrash: _Optional[bool] = ..., treeLeft: _Optional[int] = ..., treeRight: _Optional[int] = ...) -> None: ... class AuditRole(_message.Message): __slots__ = ("roleId", "encryptedData", "restrictShareOutsideEnterprise", "restrictShareAll", "restrictShareOfAttachments", "restrictMaskPasswordsWhileEditing", "roleNodeManagements") @@ -1609,7 +1641,7 @@ class AuditRole(_message.Message): restrictShareOfAttachments: bool restrictMaskPasswordsWhileEditing: bool roleNodeManagements: _containers.RepeatedCompositeFieldContainer[RoleNodeManagement] - def __init__(self, roleId: _Optional[int] = ..., encryptedData: _Optional[bytes] = ..., restrictShareOutsideEnterprise: bool = ..., restrictShareAll: bool = ..., restrictShareOfAttachments: bool = ..., restrictMaskPasswordsWhileEditing: bool = ..., roleNodeManagements: _Optional[_Iterable[_Union[RoleNodeManagement, _Mapping]]] = ...) -> None: ... + def __init__(self, roleId: _Optional[int] = ..., encryptedData: _Optional[bytes] = ..., restrictShareOutsideEnterprise: _Optional[bool] = ..., restrictShareAll: _Optional[bool] = ..., restrictShareOfAttachments: _Optional[bool] = ..., restrictMaskPasswordsWhileEditing: _Optional[bool] = ..., roleNodeManagements: _Optional[_Iterable[_Union[RoleNodeManagement, _Mapping]]] = ...) -> None: ... class RoleNodeManagement(_message.Message): __slots__ = ("treeLeft", "treeRight", "cascade", "privileges") @@ -1621,7 +1653,7 @@ class RoleNodeManagement(_message.Message): treeRight: int cascade: bool privileges: int - def __init__(self, treeLeft: _Optional[int] = ..., treeRight: _Optional[int] = ..., cascade: bool = ..., privileges: _Optional[int] = ...) -> None: ... + def __init__(self, treeLeft: _Optional[int] = ..., treeRight: _Optional[int] = ..., cascade: _Optional[bool] = ..., privileges: _Optional[int] = ...) -> None: ... class UserProfile(_message.Message): __slots__ = ("enterpriseUserId", "fullName", "jobTitle", "email", "roleIds") @@ -1663,7 +1695,7 @@ class AuditTeam(_message.Message): teamName: str restrictEdit: bool restrictShare: bool - def __init__(self, teamUid: _Optional[bytes] = ..., teamName: _Optional[str] = ..., restrictEdit: bool = ..., restrictShare: bool = ...) -> None: ... + def __init__(self, teamUid: _Optional[bytes] = ..., teamName: _Optional[str] = ..., restrictEdit: _Optional[bool] = ..., restrictShare: _Optional[bool] = ...) -> None: ... class AuditTeamUser(_message.Message): __slots__ = ("teamUid", "enterpriseUserIds") @@ -1767,7 +1799,7 @@ class UserProfileExt(_message.Message): isShareAdminForRequestedObject: bool isShareAdminForSharedFolderOwner: bool hasAccessToObject: bool - def __init__(self, email: _Optional[str] = ..., fullName: _Optional[str] = ..., jobTitle: _Optional[str] = ..., isMSPMCAdmin: bool = ..., isInSharedFolder: bool = ..., isShareAdminForRequestedObject: bool = ..., isShareAdminForSharedFolderOwner: bool = ..., hasAccessToObject: bool = ...) -> None: ... + def __init__(self, email: _Optional[str] = ..., fullName: _Optional[str] = ..., jobTitle: _Optional[str] = ..., isMSPMCAdmin: _Optional[bool] = ..., isInSharedFolder: _Optional[bool] = ..., isShareAdminForRequestedObject: _Optional[bool] = ..., isShareAdminForSharedFolderOwner: _Optional[bool] = ..., hasAccessToObject: _Optional[bool] = ...) -> None: ... class GetSharingAdminsResponse(_message.Message): __slots__ = ("userProfileExts",) @@ -1831,7 +1863,7 @@ class TeamsEnterpriseUsersAddTeamResponse(_message.Message): message: str resultCode: str additionalInfo: str - def __init__(self, teamUid: _Optional[bytes] = ..., users: _Optional[_Iterable[_Union[TeamsEnterpriseUsersAddUserResponse, _Mapping]]] = ..., success: bool = ..., message: _Optional[str] = ..., resultCode: _Optional[str] = ..., additionalInfo: _Optional[str] = ...) -> None: ... + def __init__(self, teamUid: _Optional[bytes] = ..., users: _Optional[_Iterable[_Union[TeamsEnterpriseUsersAddUserResponse, _Mapping]]] = ..., success: _Optional[bool] = ..., message: _Optional[str] = ..., resultCode: _Optional[str] = ..., additionalInfo: _Optional[str] = ...) -> None: ... class TeamsEnterpriseUsersAddUserResponse(_message.Message): __slots__ = ("enterpriseUserId", "revision", "success", "message", "resultCode", "additionalInfo") @@ -1847,7 +1879,7 @@ class TeamsEnterpriseUsersAddUserResponse(_message.Message): message: str resultCode: str additionalInfo: str - def __init__(self, enterpriseUserId: _Optional[int] = ..., revision: _Optional[int] = ..., success: bool = ..., message: _Optional[str] = ..., resultCode: _Optional[str] = ..., additionalInfo: _Optional[str] = ...) -> None: ... + def __init__(self, enterpriseUserId: _Optional[int] = ..., revision: _Optional[int] = ..., success: _Optional[bool] = ..., message: _Optional[str] = ..., resultCode: _Optional[str] = ..., additionalInfo: _Optional[str] = ...) -> None: ... class TeamEnterpriseUserRemove(_message.Message): __slots__ = ("teamUid", "enterpriseUserId") @@ -1881,7 +1913,7 @@ class TeamEnterpriseUserRemoveResponse(_message.Message): resultCode: str message: str additionalInfo: str - def __init__(self, teamEnterpriseUserRemove: _Optional[_Union[TeamEnterpriseUserRemove, _Mapping]] = ..., success: bool = ..., resultCode: _Optional[str] = ..., message: _Optional[str] = ..., additionalInfo: _Optional[str] = ...) -> None: ... + def __init__(self, teamEnterpriseUserRemove: _Optional[_Union[TeamEnterpriseUserRemove, _Mapping]] = ..., success: _Optional[bool] = ..., resultCode: _Optional[str] = ..., message: _Optional[str] = ..., additionalInfo: _Optional[str] = ...) -> None: ... class DomainAlias(_message.Message): __slots__ = ("domain", "alias", "status", "message") @@ -2001,7 +2033,7 @@ class EnterpriseUsersAdd(_message.Message): inviteeLocale: str move: bool roleId: int - def __init__(self, enterpriseUserId: _Optional[int] = ..., username: _Optional[str] = ..., nodeId: _Optional[int] = ..., encryptedData: _Optional[str] = ..., keyType: _Optional[_Union[EncryptedKeyType, str]] = ..., fullName: _Optional[str] = ..., jobTitle: _Optional[str] = ..., suppressEmailInvite: bool = ..., inviteeLocale: _Optional[str] = ..., move: bool = ..., roleId: _Optional[int] = ...) -> None: ... + def __init__(self, enterpriseUserId: _Optional[int] = ..., username: _Optional[str] = ..., nodeId: _Optional[int] = ..., encryptedData: _Optional[str] = ..., keyType: _Optional[_Union[EncryptedKeyType, str]] = ..., fullName: _Optional[str] = ..., jobTitle: _Optional[str] = ..., suppressEmailInvite: _Optional[bool] = ..., inviteeLocale: _Optional[str] = ..., move: _Optional[bool] = ..., roleId: _Optional[int] = ...) -> None: ... class EnterpriseUsersAddResponse(_message.Message): __slots__ = ("results", "success", "code", "message", "additionalInfo") @@ -2015,7 +2047,7 @@ class EnterpriseUsersAddResponse(_message.Message): code: str message: str additionalInfo: str - def __init__(self, results: _Optional[_Iterable[_Union[EnterpriseUsersAddResult, _Mapping]]] = ..., success: bool = ..., code: _Optional[str] = ..., message: _Optional[str] = ..., additionalInfo: _Optional[str] = ...) -> None: ... + def __init__(self, results: _Optional[_Iterable[_Union[EnterpriseUsersAddResult, _Mapping]]] = ..., success: _Optional[bool] = ..., code: _Optional[str] = ..., message: _Optional[str] = ..., additionalInfo: _Optional[str] = ...) -> None: ... class EnterpriseUsersAddResult(_message.Message): __slots__ = ("enterpriseUserId", "success", "verificationCode", "code", "message", "additionalInfo") @@ -2031,7 +2063,7 @@ class EnterpriseUsersAddResult(_message.Message): code: str message: str additionalInfo: str - def __init__(self, enterpriseUserId: _Optional[int] = ..., success: bool = ..., verificationCode: _Optional[str] = ..., code: _Optional[str] = ..., message: _Optional[str] = ..., additionalInfo: _Optional[str] = ...) -> None: ... + def __init__(self, enterpriseUserId: _Optional[int] = ..., success: _Optional[bool] = ..., verificationCode: _Optional[str] = ..., code: _Optional[str] = ..., message: _Optional[str] = ..., additionalInfo: _Optional[str] = ...) -> None: ... class UpdateMSPPermitsRequest(_message.Message): __slots__ = ("mspEnterpriseId", "maxAllowedLicenses", "allowedMcProducts", "allowedAddOns", "maxFilePlanType", "allowUnlimitedLicenses") @@ -2047,7 +2079,7 @@ class UpdateMSPPermitsRequest(_message.Message): allowedAddOns: _containers.RepeatedScalarFieldContainer[str] maxFilePlanType: str allowUnlimitedLicenses: bool - def __init__(self, mspEnterpriseId: _Optional[int] = ..., maxAllowedLicenses: _Optional[int] = ..., allowedMcProducts: _Optional[_Iterable[str]] = ..., allowedAddOns: _Optional[_Iterable[str]] = ..., maxFilePlanType: _Optional[str] = ..., allowUnlimitedLicenses: bool = ...) -> None: ... + def __init__(self, mspEnterpriseId: _Optional[int] = ..., maxAllowedLicenses: _Optional[int] = ..., allowedMcProducts: _Optional[_Iterable[str]] = ..., allowedAddOns: _Optional[_Iterable[str]] = ..., maxFilePlanType: _Optional[str] = ..., allowUnlimitedLicenses: _Optional[bool] = ...) -> None: ... class DeleteEnterpriseUsersRequest(_message.Message): __slots__ = ("enterpriseUserIds",) @@ -2077,7 +2109,7 @@ class ClearSecurityDataRequest(_message.Message): enterpriseUserId: _containers.RepeatedScalarFieldContainer[int] allUsers: bool type: ClearSecurityDataType - def __init__(self, enterpriseUserId: _Optional[_Iterable[int]] = ..., allUsers: bool = ..., type: _Optional[_Union[ClearSecurityDataType, str]] = ...) -> None: ... + def __init__(self, enterpriseUserId: _Optional[_Iterable[int]] = ..., allUsers: _Optional[bool] = ..., type: _Optional[_Union[ClearSecurityDataType, str]] = ...) -> None: ... class ListDomainsResponse(_message.Message): __slots__ = ("domain",) @@ -2098,3 +2130,39 @@ class ReserveDomainResponse(_message.Message): TOKEN_FIELD_NUMBER: _ClassVar[int] token: str def __init__(self, token: _Optional[str] = ...) -> None: ... + +class RolesByTeam(_message.Message): + __slots__ = ("teamUid", "roleId") + TEAMUID_FIELD_NUMBER: _ClassVar[int] + ROLEID_FIELD_NUMBER: _ClassVar[int] + teamUid: bytes + roleId: _containers.RepeatedScalarFieldContainer[int] + def __init__(self, teamUid: _Optional[bytes] = ..., roleId: _Optional[_Iterable[int]] = ...) -> None: ... + +class LockUsersRequest(_message.Message): + __slots__ = ("lockEnterpriseUserIds", "disableEnterpriseUserIds", "unlockEnterpriseUserIds", "deleteIfPending") + LOCKENTERPRISEUSERIDS_FIELD_NUMBER: _ClassVar[int] + DISABLEENTERPRISEUSERIDS_FIELD_NUMBER: _ClassVar[int] + UNLOCKENTERPRISEUSERIDS_FIELD_NUMBER: _ClassVar[int] + DELETEIFPENDING_FIELD_NUMBER: _ClassVar[int] + lockEnterpriseUserIds: _containers.RepeatedScalarFieldContainer[int] + disableEnterpriseUserIds: _containers.RepeatedScalarFieldContainer[int] + unlockEnterpriseUserIds: _containers.RepeatedScalarFieldContainer[int] + deleteIfPending: bool + def __init__(self, lockEnterpriseUserIds: _Optional[_Iterable[int]] = ..., disableEnterpriseUserIds: _Optional[_Iterable[int]] = ..., unlockEnterpriseUserIds: _Optional[_Iterable[int]] = ..., deleteIfPending: _Optional[bool] = ...) -> None: ... + +class LockUsersResponse(_message.Message): + __slots__ = ("response",) + RESPONSE_FIELD_NUMBER: _ClassVar[int] + response: _containers.RepeatedCompositeFieldContainer[LockUserResponse] + def __init__(self, response: _Optional[_Iterable[_Union[LockUserResponse, _Mapping]]] = ...) -> None: ... + +class LockUserResponse(_message.Message): + __slots__ = ("enterpriseUserId", "status", "errorMessage") + ENTERPRISEUSERID_FIELD_NUMBER: _ClassVar[int] + STATUS_FIELD_NUMBER: _ClassVar[int] + ERRORMESSAGE_FIELD_NUMBER: _ClassVar[int] + enterpriseUserId: int + status: UserLockStatus + errorMessage: str + def __init__(self, enterpriseUserId: _Optional[int] = ..., status: _Optional[_Union[UserLockStatus, str]] = ..., errorMessage: _Optional[str] = ...) -> None: ... diff --git a/keepercommander/proto/folder_access_pb2.py b/keepercommander/proto/folder_access_pb2.py new file mode 100644 index 000000000..4fff617fb --- /dev/null +++ b/keepercommander/proto/folder_access_pb2.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: folder_access.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import folder_pb2 as folder__pb2 +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x13\x66older_access.proto\x12\tfolder.v3\x1a\x0c\x66older.proto\x1a\x1cgoogle/api/annotations.proto\"\xa3\x01\n\x16GetFolderAccessRequest\x12\x11\n\tfolderUid\x18\x01 \x03(\x0c\x12<\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.folder.v3.ContinuationTokenH\x00\x88\x01\x01\x12\x15\n\x08pageSize\x18\x03 \x01(\x05H\x01\x88\x01\x01\x42\x14\n\x12_continuationTokenB\x0b\n\t_pageSize\"\xbd\x01\n\x17GetFolderAccessResponse\x12=\n\x13\x66olderAccessResults\x18\x01 \x03(\x0b\x32 .folder.v3.GetFolderAccessResult\x12<\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.folder.v3.ContinuationTokenH\x00\x88\x01\x01\x12\x0f\n\x07hasMore\x18\x03 \x01(\x08\x42\x14\n\x12_continuationToken\")\n\x11\x43ontinuationToken\x12\x14\n\x0clastModified\x18\x01 \x01(\x03\"\x93\x01\n\x15GetFolderAccessResult\x12\x11\n\tfolderUid\x18\x01 \x01(\x0c\x12+\n\taccessors\x18\x02 \x03(\x0b\x32\x18.Folder.FolderAccessData\x12\x30\n\x05\x65rror\x18\x03 \x01(\x0b\x32\x1c.folder.v3.FolderAccessErrorH\x00\x88\x01\x01\x42\x08\n\x06_error\"P\n\x11\x46olderAccessError\x12*\n\x06status\x18\x01 \x01(\x0e\x32\x1a.Folder.FolderModifyStatus\x12\x0f\n\x07message\x18\x02 \x01(\t2\x98\x01\n\rFolderService\x12\x86\x01\n\x0fGetFolderAccess\x12!.folder.v3.GetFolderAccessRequest\x1a\".folder.v3.GetFolderAccessResponse\",\x82\xd3\xe4\x93\x02&\"!/api/rest/vault/folders/v3/access:\x01*B*\n&com.keepersecurity.proto.api.folder.v3P\x01\x62\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'folder_access_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n&com.keepersecurity.proto.api.folder.v3P\001' + _globals['_FOLDERSERVICE'].methods_by_name['GetFolderAccess']._loaded_options = None + _globals['_FOLDERSERVICE'].methods_by_name['GetFolderAccess']._serialized_options = b'\202\323\344\223\002&\"!/api/rest/vault/folders/v3/access:\001*' + _globals['_GETFOLDERACCESSREQUEST']._serialized_start=79 + _globals['_GETFOLDERACCESSREQUEST']._serialized_end=242 + _globals['_GETFOLDERACCESSRESPONSE']._serialized_start=245 + _globals['_GETFOLDERACCESSRESPONSE']._serialized_end=434 + _globals['_CONTINUATIONTOKEN']._serialized_start=436 + _globals['_CONTINUATIONTOKEN']._serialized_end=477 + _globals['_GETFOLDERACCESSRESULT']._serialized_start=480 + _globals['_GETFOLDERACCESSRESULT']._serialized_end=627 + _globals['_FOLDERACCESSERROR']._serialized_start=629 + _globals['_FOLDERACCESSERROR']._serialized_end=709 + _globals['_FOLDERSERVICE']._serialized_start=712 + _globals['_FOLDERSERVICE']._serialized_end=864 +# @@protoc_insertion_point(module_scope) diff --git a/keepercommander/proto/folder_access_pb2.pyi b/keepercommander/proto/folder_access_pb2.pyi new file mode 100644 index 000000000..f555cb6e8 --- /dev/null +++ b/keepercommander/proto/folder_access_pb2.pyi @@ -0,0 +1,53 @@ +import folder_pb2 as _folder_pb2 +from google.api import annotations_pb2 as _annotations_pb2 +from google.protobuf.internal import containers as _containers +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from collections.abc import Iterable as _Iterable, Mapping as _Mapping +from typing import ClassVar as _ClassVar, Optional as _Optional, Union as _Union + +DESCRIPTOR: _descriptor.FileDescriptor + +class GetFolderAccessRequest(_message.Message): + __slots__ = ("folderUid", "continuationToken", "pageSize") + FOLDERUID_FIELD_NUMBER: _ClassVar[int] + CONTINUATIONTOKEN_FIELD_NUMBER: _ClassVar[int] + PAGESIZE_FIELD_NUMBER: _ClassVar[int] + folderUid: _containers.RepeatedScalarFieldContainer[bytes] + continuationToken: ContinuationToken + pageSize: int + def __init__(self, folderUid: _Optional[_Iterable[bytes]] = ..., continuationToken: _Optional[_Union[ContinuationToken, _Mapping]] = ..., pageSize: _Optional[int] = ...) -> None: ... + +class GetFolderAccessResponse(_message.Message): + __slots__ = ("folderAccessResults", "continuationToken", "hasMore") + FOLDERACCESSRESULTS_FIELD_NUMBER: _ClassVar[int] + CONTINUATIONTOKEN_FIELD_NUMBER: _ClassVar[int] + HASMORE_FIELD_NUMBER: _ClassVar[int] + folderAccessResults: _containers.RepeatedCompositeFieldContainer[GetFolderAccessResult] + continuationToken: ContinuationToken + hasMore: bool + def __init__(self, folderAccessResults: _Optional[_Iterable[_Union[GetFolderAccessResult, _Mapping]]] = ..., continuationToken: _Optional[_Union[ContinuationToken, _Mapping]] = ..., hasMore: _Optional[bool] = ...) -> None: ... + +class ContinuationToken(_message.Message): + __slots__ = ("lastModified",) + LASTMODIFIED_FIELD_NUMBER: _ClassVar[int] + lastModified: int + def __init__(self, lastModified: _Optional[int] = ...) -> None: ... + +class GetFolderAccessResult(_message.Message): + __slots__ = ("folderUid", "accessors", "error") + FOLDERUID_FIELD_NUMBER: _ClassVar[int] + ACCESSORS_FIELD_NUMBER: _ClassVar[int] + ERROR_FIELD_NUMBER: _ClassVar[int] + folderUid: bytes + accessors: _containers.RepeatedCompositeFieldContainer[_folder_pb2.FolderAccessData] + error: FolderAccessError + def __init__(self, folderUid: _Optional[bytes] = ..., accessors: _Optional[_Iterable[_Union[_folder_pb2.FolderAccessData, _Mapping]]] = ..., error: _Optional[_Union[FolderAccessError, _Mapping]] = ...) -> None: ... + +class FolderAccessError(_message.Message): + __slots__ = ("status", "message") + STATUS_FIELD_NUMBER: _ClassVar[int] + MESSAGE_FIELD_NUMBER: _ClassVar[int] + status: _folder_pb2.FolderModifyStatus + message: str + def __init__(self, status: _Optional[_Union[_folder_pb2.FolderModifyStatus, str]] = ..., message: _Optional[str] = ...) -> None: ... diff --git a/keepercommander/proto/folder_pb2.py b/keepercommander/proto/folder_pb2.py index c4991dd2f..52c30223b 100644 --- a/keepercommander/proto/folder_pb2.py +++ b/keepercommander/proto/folder_pb2.py @@ -11,87 +11,152 @@ _sym_db = _symbol_database.Default() -from . import record_pb2 as record__pb2 +from .import record_pb2 as record__pb2 +from . import tla_pb2 as tla__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0c\x66older.proto\x12\x06\x46older\x1a\x0crecord.proto\"\\\n\x10\x45ncryptedDataKey\x12\x14\n\x0c\x65ncryptedKey\x18\x01 \x01(\x0c\x12\x32\n\x10\x65ncryptedKeyType\x18\x02 \x01(\x0e\x32\x18.Folder.EncryptedKeyType\"\x82\x01\n\x16SharedFolderRecordData\x12\x11\n\tfolderUid\x18\x01 \x01(\x0c\x12\x11\n\trecordUid\x18\x02 \x01(\x0c\x12\x0e\n\x06userId\x18\x03 \x01(\x05\x12\x32\n\x10\x65ncryptedDataKey\x18\x04 \x03(\x0b\x32\x18.Folder.EncryptedDataKey\"\\\n\x1aSharedFolderRecordDataList\x12>\n\x16sharedFolderRecordData\x18\x01 \x03(\x0b\x32\x1e.Folder.SharedFolderRecordData\"_\n\x15SharedFolderRecordFix\x12\x11\n\tfolderUid\x18\x01 \x01(\x0c\x12\x11\n\trecordUid\x18\x02 \x01(\x0c\x12 \n\x18\x65ncryptedRecordFolderKey\x18\x03 \x01(\x0c\"Y\n\x19SharedFolderRecordFixList\x12<\n\x15sharedFolderRecordFix\x18\x01 \x03(\x0b\x32\x1d.Folder.SharedFolderRecordFix\"\xa2\x02\n\rRecordRequest\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12&\n\nrecordType\x18\x02 \x01(\x0e\x32\x12.Folder.RecordType\x12\x12\n\nrecordData\x18\x03 \x01(\x0c\x12\x1a\n\x12\x65ncryptedRecordKey\x18\x04 \x01(\x0c\x12&\n\nfolderType\x18\x05 \x01(\x0e\x32\x12.Folder.FolderType\x12\x12\n\nhowLongAgo\x18\x06 \x01(\x03\x12\x11\n\tfolderUid\x18\x07 \x01(\x0c\x12 \n\x18\x65ncryptedRecordFolderKey\x18\x08 \x01(\x0c\x12\r\n\x05\x65xtra\x18\t \x01(\x0c\x12\x15\n\rnonSharedData\x18\n \x01(\x0c\x12\x0f\n\x07\x66ileIds\x18\x0b \x03(\x03\"E\n\x0eRecordResponse\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x10\n\x08revision\x18\x02 \x01(\x03\x12\x0e\n\x06status\x18\x03 \x01(\t\"\x80\x01\n\x12SharedFolderFields\x12\x1b\n\x13\x65ncryptedFolderName\x18\x01 \x01(\x0c\x12\x13\n\x0bmanageUsers\x18\x02 \x01(\x08\x12\x15\n\rmanageRecords\x18\x03 \x01(\x08\x12\x0f\n\x07\x63\x61nEdit\x18\x04 \x01(\x08\x12\x10\n\x08\x63\x61nShare\x18\x05 \x01(\x08\"3\n\x18SharedFolderFolderFields\x12\x17\n\x0fsharedFolderUid\x18\x01 \x01(\x0c\"\x8f\x02\n\rFolderRequest\x12\x11\n\tfolderUid\x18\x01 \x01(\x0c\x12&\n\nfolderType\x18\x02 \x01(\x0e\x32\x12.Folder.FolderType\x12\x17\n\x0fparentFolderUid\x18\x03 \x01(\x0c\x12\x12\n\nfolderData\x18\x04 \x01(\x0c\x12\x1a\n\x12\x65ncryptedFolderKey\x18\x05 \x01(\x0c\x12\x36\n\x12sharedFolderFields\x18\x06 \x01(\x0b\x32\x1a.Folder.SharedFolderFields\x12\x42\n\x18sharedFolderFolderFields\x18\x07 \x01(\x0b\x32 .Folder.SharedFolderFolderFields\"E\n\x0e\x46olderResponse\x12\x11\n\tfolderUid\x18\x01 \x01(\x0c\x12\x10\n\x08revision\x18\x02 \x01(\x03\x12\x0e\n\x06status\x18\x03 \x01(\t\"w\n\x19ImportFolderRecordRequest\x12,\n\rfolderRequest\x18\x01 \x03(\x0b\x32\x15.Folder.FolderRequest\x12,\n\rrecordRequest\x18\x02 \x03(\x0b\x32\x15.Folder.RecordRequest\"|\n\x1aImportFolderRecordResponse\x12.\n\x0e\x66olderResponse\x18\x01 \x03(\x0b\x32\x16.Folder.FolderResponse\x12.\n\x0erecordResponse\x18\x02 \x03(\x0b\x32\x16.Folder.RecordResponse\"\xc9\x02\n\x18SharedFolderUpdateRecord\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x17\n\x0fsharedFolderUid\x18\x02 \x01(\x0c\x12\x0f\n\x07teamUid\x18\x03 \x01(\x0c\x12(\n\x07\x63\x61nEdit\x18\x04 \x01(\x0e\x32\x17.Folder.SetBooleanValue\x12)\n\x08\x63\x61nShare\x18\x05 \x01(\x0e\x32\x17.Folder.SetBooleanValue\x12\x1a\n\x12\x65ncryptedRecordKey\x18\x06 \x01(\x0c\x12\x10\n\x08revision\x18\x07 \x01(\x05\x12\x12\n\nexpiration\x18\x08 \x01(\x12\x12=\n\x15timerNotificationType\x18\t \x01(\x0e\x32\x1e.Records.TimerNotificationType\x12\x1a\n\x12rotateOnExpiration\x18\n \x01(\x08\"\xcc\x02\n\x16SharedFolderUpdateUser\x12\x10\n\x08username\x18\x01 \x01(\t\x12,\n\x0bmanageUsers\x18\x02 \x01(\x0e\x32\x17.Folder.SetBooleanValue\x12.\n\rmanageRecords\x18\x03 \x01(\x0e\x32\x17.Folder.SetBooleanValue\x12\x1b\n\x0fsharedFolderKey\x18\x04 \x01(\x0c\x42\x02\x18\x01\x12\x12\n\nexpiration\x18\x05 \x01(\x12\x12=\n\x15timerNotificationType\x18\x06 \x01(\x0e\x32\x1e.Records.TimerNotificationType\x12\x36\n\x14typedSharedFolderKey\x18\x07 \x01(\x0b\x32\x18.Folder.EncryptedDataKey\x12\x1a\n\x12rotateOnExpiration\x18\x08 \x01(\x08\"\x99\x02\n\x16SharedFolderUpdateTeam\x12\x0f\n\x07teamUid\x18\x01 \x01(\x0c\x12\x13\n\x0bmanageUsers\x18\x02 \x01(\x08\x12\x15\n\rmanageRecords\x18\x03 \x01(\x08\x12\x1b\n\x0fsharedFolderKey\x18\x04 \x01(\x0c\x42\x02\x18\x01\x12\x12\n\nexpiration\x18\x05 \x01(\x12\x12=\n\x15timerNotificationType\x18\x06 \x01(\x0e\x32\x1e.Records.TimerNotificationType\x12\x36\n\x14typedSharedFolderKey\x18\x07 \x01(\x0b\x32\x18.Folder.EncryptedDataKey\x12\x1a\n\x12rotateOnExpiration\x18\x08 \x01(\x08\"\x8e\x07\n\x1bSharedFolderUpdateV3Request\x12,\n$sharedFolderUpdateOperation_dont_use\x18\x01 \x01(\x05\x12\x17\n\x0fsharedFolderUid\x18\x02 \x01(\x0c\x12!\n\x19\x65ncryptedSharedFolderName\x18\x03 \x01(\x0c\x12\x10\n\x08revision\x18\x04 \x01(\x03\x12\x13\n\x0b\x66orceUpdate\x18\x05 \x01(\x08\x12\x13\n\x0b\x66romTeamUid\x18\x06 \x01(\x0c\x12\x33\n\x12\x64\x65\x66\x61ultManageUsers\x18\x07 \x01(\x0e\x32\x17.Folder.SetBooleanValue\x12\x35\n\x14\x64\x65\x66\x61ultManageRecords\x18\x08 \x01(\x0e\x32\x17.Folder.SetBooleanValue\x12/\n\x0e\x64\x65\x66\x61ultCanEdit\x18\t \x01(\x0e\x32\x17.Folder.SetBooleanValue\x12\x30\n\x0f\x64\x65\x66\x61ultCanShare\x18\n \x01(\x0e\x32\x17.Folder.SetBooleanValue\x12?\n\x15sharedFolderAddRecord\x18\x0b \x03(\x0b\x32 .Folder.SharedFolderUpdateRecord\x12;\n\x13sharedFolderAddUser\x18\x0c \x03(\x0b\x32\x1e.Folder.SharedFolderUpdateUser\x12;\n\x13sharedFolderAddTeam\x18\r \x03(\x0b\x32\x1e.Folder.SharedFolderUpdateTeam\x12\x42\n\x18sharedFolderUpdateRecord\x18\x0e \x03(\x0b\x32 .Folder.SharedFolderUpdateRecord\x12>\n\x16sharedFolderUpdateUser\x18\x0f \x03(\x0b\x32\x1e.Folder.SharedFolderUpdateUser\x12>\n\x16sharedFolderUpdateTeam\x18\x10 \x03(\x0b\x32\x1e.Folder.SharedFolderUpdateTeam\x12 \n\x18sharedFolderRemoveRecord\x18\x11 \x03(\x0c\x12\x1e\n\x16sharedFolderRemoveUser\x18\x12 \x03(\t\x12\x1e\n\x16sharedFolderRemoveTeam\x18\x13 \x03(\x0c\x12\x19\n\x11sharedFolderOwner\x18\x14 \x01(\t\"c\n\x1dSharedFolderUpdateV3RequestV2\x12\x42\n\x15sharedFoldersUpdateV3\x18\x01 \x03(\x0b\x32#.Folder.SharedFolderUpdateV3Request\"C\n\x1eSharedFolderUpdateRecordStatus\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x0e\n\x06status\x18\x02 \x01(\t\"@\n\x1cSharedFolderUpdateUserStatus\x12\x10\n\x08username\x18\x01 \x01(\t\x12\x0e\n\x06status\x18\x02 \x01(\t\"?\n\x1cSharedFolderUpdateTeamStatus\x12\x0f\n\x07teamUid\x18\x01 \x01(\x0c\x12\x0e\n\x06status\x18\x02 \x01(\t\"\x88\x06\n\x1cSharedFolderUpdateV3Response\x12\x10\n\x08revision\x18\x01 \x01(\x03\x12K\n\x1bsharedFolderAddRecordStatus\x18\x02 \x03(\x0b\x32&.Folder.SharedFolderUpdateRecordStatus\x12G\n\x19sharedFolderAddUserStatus\x18\x03 \x03(\x0b\x32$.Folder.SharedFolderUpdateUserStatus\x12G\n\x19sharedFolderAddTeamStatus\x18\x04 \x03(\x0b\x32$.Folder.SharedFolderUpdateTeamStatus\x12N\n\x1esharedFolderUpdateRecordStatus\x18\x05 \x03(\x0b\x32&.Folder.SharedFolderUpdateRecordStatus\x12J\n\x1csharedFolderUpdateUserStatus\x18\x06 \x03(\x0b\x32$.Folder.SharedFolderUpdateUserStatus\x12J\n\x1csharedFolderUpdateTeamStatus\x18\x07 \x03(\x0b\x32$.Folder.SharedFolderUpdateTeamStatus\x12N\n\x1esharedFolderRemoveRecordStatus\x18\x08 \x03(\x0b\x32&.Folder.SharedFolderUpdateRecordStatus\x12J\n\x1csharedFolderRemoveUserStatus\x18\t \x03(\x0b\x32$.Folder.SharedFolderUpdateUserStatus\x12J\n\x1csharedFolderRemoveTeamStatus\x18\n \x03(\x0b\x32$.Folder.SharedFolderUpdateTeamStatus\x12\x17\n\x0fsharedFolderUid\x18\x0c \x01(\x0c\x12\x0e\n\x06status\x18\r \x01(\t\"m\n\x1eSharedFolderUpdateV3ResponseV2\x12K\n\x1dsharedFoldersUpdateV3Response\x18\x01 \x03(\x0b\x32$.Folder.SharedFolderUpdateV3Response\"\xfa\x01\n)GetDeletedSharedFoldersAndRecordsResponse\x12\x32\n\rsharedFolders\x18\x01 \x03(\x0b\x32\x1b.Folder.DeletedSharedFolder\x12>\n\x13sharedFolderRecords\x18\x02 \x03(\x0b\x32!.Folder.DeletedSharedFolderRecord\x12\x34\n\x11\x64\x65letedRecordData\x18\x03 \x03(\x0b\x32\x19.Folder.DeletedRecordData\x12#\n\tusernames\x18\x04 \x03(\x0b\x32\x10.Folder.Username\"\xd1\x01\n\x13\x44\x65letedSharedFolder\x12\x17\n\x0fsharedFolderUid\x18\x01 \x01(\x0c\x12\x11\n\tfolderUid\x18\x02 \x01(\x0c\x12\x11\n\tparentUid\x18\x03 \x01(\x0c\x12\x17\n\x0fsharedFolderKey\x18\x04 \x01(\x0c\x12-\n\rfolderKeyType\x18\x05 \x01(\x0e\x32\x16.Records.RecordKeyType\x12\x0c\n\x04\x64\x61ta\x18\x06 \x01(\x0c\x12\x13\n\x0b\x64\x61teDeleted\x18\x07 \x01(\x03\x12\x10\n\x08revision\x18\x08 \x01(\x03\"\x81\x01\n\x19\x44\x65letedSharedFolderRecord\x12\x11\n\tfolderUid\x18\x01 \x01(\x0c\x12\x11\n\trecordUid\x18\x02 \x01(\x0c\x12\x17\n\x0fsharedRecordKey\x18\x03 \x01(\x0c\x12\x13\n\x0b\x64\x61teDeleted\x18\x04 \x01(\x03\x12\x10\n\x08revision\x18\x05 \x01(\x03\"\x85\x01\n\x11\x44\x65letedRecordData\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x10\n\x08ownerUid\x18\x02 \x01(\x0c\x12\x10\n\x08revision\x18\x03 \x01(\x03\x12\x1a\n\x12\x63lientModifiedTime\x18\x04 \x01(\x03\x12\x0c\n\x04\x64\x61ta\x18\x05 \x01(\x0c\x12\x0f\n\x07version\x18\x06 \x01(\x05\"0\n\x08Username\x12\x12\n\naccountUid\x18\x01 \x01(\x0c\x12\x10\n\x08username\x18\x02 \x01(\t\"\x8a\x01\n,RestoreDeletedSharedFoldersAndRecordsRequest\x12,\n\x07\x66olders\x18\x01 \x03(\x0b\x32\x1b.Folder.RestoreSharedObject\x12,\n\x07records\x18\x02 \x03(\x0b\x32\x1b.Folder.RestoreSharedObject\"<\n\x13RestoreSharedObject\x12\x11\n\tfolderUid\x18\x01 \x01(\x0c\x12\x12\n\nrecordUids\x18\x02 \x03(\x0c*\x1a\n\nRecordType\x12\x0c\n\x08password\x10\x00*^\n\nFolderType\x12\x12\n\x0e\x64\x65\x66\x61ult_folder\x10\x00\x12\x0f\n\x0buser_folder\x10\x01\x12\x11\n\rshared_folder\x10\x02\x12\x18\n\x14shared_folder_folder\x10\x03*\x96\x01\n\x10\x45ncryptedKeyType\x12\n\n\x06no_key\x10\x00\x12\x19\n\x15\x65ncrypted_by_data_key\x10\x01\x12\x1b\n\x17\x65ncrypted_by_public_key\x10\x02\x12\x1d\n\x19\x65ncrypted_by_data_key_gcm\x10\x03\x12\x1f\n\x1b\x65ncrypted_by_public_key_ecc\x10\x04*M\n\x0fSetBooleanValue\x12\x15\n\x11\x42OOLEAN_NO_CHANGE\x10\x00\x12\x10\n\x0c\x42OOLEAN_TRUE\x10\x01\x12\x11\n\rBOOLEAN_FALSE\x10\x02\x42\"\n\x18\x63om.keepersecurity.protoB\x06\x46olderb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0c\x66older.proto\x12\x06\x46older\x1a\x0crecord.proto\x1a\ttla.proto\"\\\n\x10\x45ncryptedDataKey\x12\x14\n\x0c\x65ncryptedKey\x18\x01 \x01(\x0c\x12\x32\n\x10\x65ncryptedKeyType\x18\x02 \x01(\x0e\x32\x18.Folder.EncryptedKeyType\"\x82\x01\n\x16SharedFolderRecordData\x12\x11\n\tfolderUid\x18\x01 \x01(\x0c\x12\x11\n\trecordUid\x18\x02 \x01(\x0c\x12\x0e\n\x06userId\x18\x03 \x01(\x05\x12\x32\n\x10\x65ncryptedDataKey\x18\x04 \x03(\x0b\x32\x18.Folder.EncryptedDataKey\"\\\n\x1aSharedFolderRecordDataList\x12>\n\x16sharedFolderRecordData\x18\x01 \x03(\x0b\x32\x1e.Folder.SharedFolderRecordData\"_\n\x15SharedFolderRecordFix\x12\x11\n\tfolderUid\x18\x01 \x01(\x0c\x12\x11\n\trecordUid\x18\x02 \x01(\x0c\x12 \n\x18\x65ncryptedRecordFolderKey\x18\x03 \x01(\x0c\"Y\n\x19SharedFolderRecordFixList\x12<\n\x15sharedFolderRecordFix\x18\x01 \x03(\x0b\x32\x1d.Folder.SharedFolderRecordFix\"\xa2\x02\n\rRecordRequest\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12&\n\nrecordType\x18\x02 \x01(\x0e\x32\x12.Folder.RecordType\x12\x12\n\nrecordData\x18\x03 \x01(\x0c\x12\x1a\n\x12\x65ncryptedRecordKey\x18\x04 \x01(\x0c\x12&\n\nfolderType\x18\x05 \x01(\x0e\x32\x12.Folder.FolderType\x12\x12\n\nhowLongAgo\x18\x06 \x01(\x03\x12\x11\n\tfolderUid\x18\x07 \x01(\x0c\x12 \n\x18\x65ncryptedRecordFolderKey\x18\x08 \x01(\x0c\x12\r\n\x05\x65xtra\x18\t \x01(\x0c\x12\x15\n\rnonSharedData\x18\n \x01(\x0c\x12\x0f\n\x07\x66ileIds\x18\x0b \x03(\x03\"E\n\x0eRecordResponse\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x10\n\x08revision\x18\x02 \x01(\x03\x12\x0e\n\x06status\x18\x03 \x01(\t\"\x80\x01\n\x12SharedFolderFields\x12\x1b\n\x13\x65ncryptedFolderName\x18\x01 \x01(\x0c\x12\x13\n\x0bmanageUsers\x18\x02 \x01(\x08\x12\x15\n\rmanageRecords\x18\x03 \x01(\x08\x12\x0f\n\x07\x63\x61nEdit\x18\x04 \x01(\x08\x12\x10\n\x08\x63\x61nShare\x18\x05 \x01(\x08\"3\n\x18SharedFolderFolderFields\x12\x17\n\x0fsharedFolderUid\x18\x01 \x01(\x0c\"\x8f\x02\n\rFolderRequest\x12\x11\n\tfolderUid\x18\x01 \x01(\x0c\x12&\n\nfolderType\x18\x02 \x01(\x0e\x32\x12.Folder.FolderType\x12\x17\n\x0fparentFolderUid\x18\x03 \x01(\x0c\x12\x12\n\nfolderData\x18\x04 \x01(\x0c\x12\x1a\n\x12\x65ncryptedFolderKey\x18\x05 \x01(\x0c\x12\x36\n\x12sharedFolderFields\x18\x06 \x01(\x0b\x32\x1a.Folder.SharedFolderFields\x12\x42\n\x18sharedFolderFolderFields\x18\x07 \x01(\x0b\x32 .Folder.SharedFolderFolderFields\"E\n\x0e\x46olderResponse\x12\x11\n\tfolderUid\x18\x01 \x01(\x0c\x12\x10\n\x08revision\x18\x02 \x01(\x03\x12\x0e\n\x06status\x18\x03 \x01(\t\"w\n\x19ImportFolderRecordRequest\x12,\n\rfolderRequest\x18\x01 \x03(\x0b\x32\x15.Folder.FolderRequest\x12,\n\rrecordRequest\x18\x02 \x03(\x0b\x32\x15.Folder.RecordRequest\"|\n\x1aImportFolderRecordResponse\x12.\n\x0e\x66olderResponse\x18\x01 \x03(\x0b\x32\x16.Folder.FolderResponse\x12.\n\x0erecordResponse\x18\x02 \x03(\x0b\x32\x16.Folder.RecordResponse\"\xc9\x02\n\x18SharedFolderUpdateRecord\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x17\n\x0fsharedFolderUid\x18\x02 \x01(\x0c\x12\x0f\n\x07teamUid\x18\x03 \x01(\x0c\x12(\n\x07\x63\x61nEdit\x18\x04 \x01(\x0e\x32\x17.Folder.SetBooleanValue\x12)\n\x08\x63\x61nShare\x18\x05 \x01(\x0e\x32\x17.Folder.SetBooleanValue\x12\x1a\n\x12\x65ncryptedRecordKey\x18\x06 \x01(\x0c\x12\x10\n\x08revision\x18\x07 \x01(\x05\x12\x12\n\nexpiration\x18\x08 \x01(\x12\x12=\n\x15timerNotificationType\x18\t \x01(\x0e\x32\x1e.Records.TimerNotificationType\x12\x1a\n\x12rotateOnExpiration\x18\n \x01(\x08\"\xcc\x02\n\x16SharedFolderUpdateUser\x12\x10\n\x08username\x18\x01 \x01(\t\x12,\n\x0bmanageUsers\x18\x02 \x01(\x0e\x32\x17.Folder.SetBooleanValue\x12.\n\rmanageRecords\x18\x03 \x01(\x0e\x32\x17.Folder.SetBooleanValue\x12\x1b\n\x0fsharedFolderKey\x18\x04 \x01(\x0c\x42\x02\x18\x01\x12\x12\n\nexpiration\x18\x05 \x01(\x12\x12=\n\x15timerNotificationType\x18\x06 \x01(\x0e\x32\x1e.Records.TimerNotificationType\x12\x36\n\x14typedSharedFolderKey\x18\x07 \x01(\x0b\x32\x18.Folder.EncryptedDataKey\x12\x1a\n\x12rotateOnExpiration\x18\x08 \x01(\x08\"\x99\x02\n\x16SharedFolderUpdateTeam\x12\x0f\n\x07teamUid\x18\x01 \x01(\x0c\x12\x13\n\x0bmanageUsers\x18\x02 \x01(\x08\x12\x15\n\rmanageRecords\x18\x03 \x01(\x08\x12\x1b\n\x0fsharedFolderKey\x18\x04 \x01(\x0c\x42\x02\x18\x01\x12\x12\n\nexpiration\x18\x05 \x01(\x12\x12=\n\x15timerNotificationType\x18\x06 \x01(\x0e\x32\x1e.Records.TimerNotificationType\x12\x36\n\x14typedSharedFolderKey\x18\x07 \x01(\x0b\x32\x18.Folder.EncryptedDataKey\x12\x1a\n\x12rotateOnExpiration\x18\x08 \x01(\x08\"\x8e\x07\n\x1bSharedFolderUpdateV3Request\x12,\n$sharedFolderUpdateOperation_dont_use\x18\x01 \x01(\x05\x12\x17\n\x0fsharedFolderUid\x18\x02 \x01(\x0c\x12!\n\x19\x65ncryptedSharedFolderName\x18\x03 \x01(\x0c\x12\x10\n\x08revision\x18\x04 \x01(\x03\x12\x13\n\x0b\x66orceUpdate\x18\x05 \x01(\x08\x12\x13\n\x0b\x66romTeamUid\x18\x06 \x01(\x0c\x12\x33\n\x12\x64\x65\x66\x61ultManageUsers\x18\x07 \x01(\x0e\x32\x17.Folder.SetBooleanValue\x12\x35\n\x14\x64\x65\x66\x61ultManageRecords\x18\x08 \x01(\x0e\x32\x17.Folder.SetBooleanValue\x12/\n\x0e\x64\x65\x66\x61ultCanEdit\x18\t \x01(\x0e\x32\x17.Folder.SetBooleanValue\x12\x30\n\x0f\x64\x65\x66\x61ultCanShare\x18\n \x01(\x0e\x32\x17.Folder.SetBooleanValue\x12?\n\x15sharedFolderAddRecord\x18\x0b \x03(\x0b\x32 .Folder.SharedFolderUpdateRecord\x12;\n\x13sharedFolderAddUser\x18\x0c \x03(\x0b\x32\x1e.Folder.SharedFolderUpdateUser\x12;\n\x13sharedFolderAddTeam\x18\r \x03(\x0b\x32\x1e.Folder.SharedFolderUpdateTeam\x12\x42\n\x18sharedFolderUpdateRecord\x18\x0e \x03(\x0b\x32 .Folder.SharedFolderUpdateRecord\x12>\n\x16sharedFolderUpdateUser\x18\x0f \x03(\x0b\x32\x1e.Folder.SharedFolderUpdateUser\x12>\n\x16sharedFolderUpdateTeam\x18\x10 \x03(\x0b\x32\x1e.Folder.SharedFolderUpdateTeam\x12 \n\x18sharedFolderRemoveRecord\x18\x11 \x03(\x0c\x12\x1e\n\x16sharedFolderRemoveUser\x18\x12 \x03(\t\x12\x1e\n\x16sharedFolderRemoveTeam\x18\x13 \x03(\x0c\x12\x19\n\x11sharedFolderOwner\x18\x14 \x01(\t\"c\n\x1dSharedFolderUpdateV3RequestV2\x12\x42\n\x15sharedFoldersUpdateV3\x18\x01 \x03(\x0b\x32#.Folder.SharedFolderUpdateV3Request\"C\n\x1eSharedFolderUpdateRecordStatus\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x0e\n\x06status\x18\x02 \x01(\t\"@\n\x1cSharedFolderUpdateUserStatus\x12\x10\n\x08username\x18\x01 \x01(\t\x12\x0e\n\x06status\x18\x02 \x01(\t\"?\n\x1cSharedFolderUpdateTeamStatus\x12\x0f\n\x07teamUid\x18\x01 \x01(\x0c\x12\x0e\n\x06status\x18\x02 \x01(\t\"\x88\x06\n\x1cSharedFolderUpdateV3Response\x12\x10\n\x08revision\x18\x01 \x01(\x03\x12K\n\x1bsharedFolderAddRecordStatus\x18\x02 \x03(\x0b\x32&.Folder.SharedFolderUpdateRecordStatus\x12G\n\x19sharedFolderAddUserStatus\x18\x03 \x03(\x0b\x32$.Folder.SharedFolderUpdateUserStatus\x12G\n\x19sharedFolderAddTeamStatus\x18\x04 \x03(\x0b\x32$.Folder.SharedFolderUpdateTeamStatus\x12N\n\x1esharedFolderUpdateRecordStatus\x18\x05 \x03(\x0b\x32&.Folder.SharedFolderUpdateRecordStatus\x12J\n\x1csharedFolderUpdateUserStatus\x18\x06 \x03(\x0b\x32$.Folder.SharedFolderUpdateUserStatus\x12J\n\x1csharedFolderUpdateTeamStatus\x18\x07 \x03(\x0b\x32$.Folder.SharedFolderUpdateTeamStatus\x12N\n\x1esharedFolderRemoveRecordStatus\x18\x08 \x03(\x0b\x32&.Folder.SharedFolderUpdateRecordStatus\x12J\n\x1csharedFolderRemoveUserStatus\x18\t \x03(\x0b\x32$.Folder.SharedFolderUpdateUserStatus\x12J\n\x1csharedFolderRemoveTeamStatus\x18\n \x03(\x0b\x32$.Folder.SharedFolderUpdateTeamStatus\x12\x17\n\x0fsharedFolderUid\x18\x0c \x01(\x0c\x12\x0e\n\x06status\x18\r \x01(\t\"m\n\x1eSharedFolderUpdateV3ResponseV2\x12K\n\x1dsharedFoldersUpdateV3Response\x18\x01 \x03(\x0b\x32$.Folder.SharedFolderUpdateV3Response\"\xfa\x01\n)GetDeletedSharedFoldersAndRecordsResponse\x12\x32\n\rsharedFolders\x18\x01 \x03(\x0b\x32\x1b.Folder.DeletedSharedFolder\x12>\n\x13sharedFolderRecords\x18\x02 \x03(\x0b\x32!.Folder.DeletedSharedFolderRecord\x12\x34\n\x11\x64\x65letedRecordData\x18\x03 \x03(\x0b\x32\x19.Folder.DeletedRecordData\x12#\n\tusernames\x18\x04 \x03(\x0b\x32\x10.Folder.Username\"\xd1\x01\n\x13\x44\x65letedSharedFolder\x12\x17\n\x0fsharedFolderUid\x18\x01 \x01(\x0c\x12\x11\n\tfolderUid\x18\x02 \x01(\x0c\x12\x11\n\tparentUid\x18\x03 \x01(\x0c\x12\x17\n\x0fsharedFolderKey\x18\x04 \x01(\x0c\x12-\n\rfolderKeyType\x18\x05 \x01(\x0e\x32\x16.Records.RecordKeyType\x12\x0c\n\x04\x64\x61ta\x18\x06 \x01(\x0c\x12\x13\n\x0b\x64\x61teDeleted\x18\x07 \x01(\x03\x12\x10\n\x08revision\x18\x08 \x01(\x03\"\x81\x01\n\x19\x44\x65letedSharedFolderRecord\x12\x11\n\tfolderUid\x18\x01 \x01(\x0c\x12\x11\n\trecordUid\x18\x02 \x01(\x0c\x12\x17\n\x0fsharedRecordKey\x18\x03 \x01(\x0c\x12\x13\n\x0b\x64\x61teDeleted\x18\x04 \x01(\x03\x12\x10\n\x08revision\x18\x05 \x01(\x03\"\x85\x01\n\x11\x44\x65letedRecordData\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x10\n\x08ownerUid\x18\x02 \x01(\x0c\x12\x10\n\x08revision\x18\x03 \x01(\x03\x12\x1a\n\x12\x63lientModifiedTime\x18\x04 \x01(\x03\x12\x0c\n\x04\x64\x61ta\x18\x05 \x01(\x0c\x12\x0f\n\x07version\x18\x06 \x01(\x05\"0\n\x08Username\x12\x12\n\naccountUid\x18\x01 \x01(\x0c\x12\x10\n\x08username\x18\x02 \x01(\t\"\x8a\x01\n,RestoreDeletedSharedFoldersAndRecordsRequest\x12,\n\x07\x66olders\x18\x01 \x03(\x0b\x32\x1b.Folder.RestoreSharedObject\x12,\n\x07records\x18\x02 \x03(\x0b\x32\x1b.Folder.RestoreSharedObject\"<\n\x13RestoreSharedObject\x12\x11\n\tfolderUid\x18\x01 \x01(\x0c\x12\x12\n\nrecordUids\x18\x02 \x03(\x0c\"\x83\x02\n\nFolderData\x12\x11\n\tfolderUid\x18\x01 \x01(\x0c\x12\x11\n\tparentUid\x18\x02 \x01(\x0c\x12\x0c\n\x04\x64\x61ta\x18\x03 \x01(\x0c\x12%\n\x04type\x18\x04 \x01(\x0e\x32\x17.Folder.FolderUsageType\x12\x37\n\x16inheritUserPermissions\x18\x05 \x01(\x0e\x32\x17.Folder.SetBooleanValue\x12\x11\n\tfolderKey\x18\x06 \x01(\x0c\x12#\n\townerInfo\x18\x07 \x01(\x0b\x32\x10.Folder.UserInfo\x12\x13\n\x0b\x64\x61teCreated\x18\x08 \x01(\x03\x12\x14\n\x0clastModified\x18\t \x01(\x03\"z\n\tFolderKey\x12\x11\n\tfolderUid\x18\x01 \x01(\x0c\x12\x11\n\tparentUid\x18\x02 \x01(\x0c\x12\x11\n\tfolderKey\x18\x03 \x01(\x0c\x12\x34\n\x0b\x65ncryptedBy\x18\x04 \x01(\x0e\x32\x1f.Folder.FolderKeyEncryptionType\":\n\x10\x46olderAddRequest\x12&\n\nfolderData\x18\x01 \x03(\x0b\x32\x12.Folder.FolderData\"d\n\x12\x46olderModifyResult\x12\x11\n\tfolderUid\x18\x01 \x01(\x0c\x12*\n\x06status\x18\x02 \x01(\x0e\x32\x1a.Folder.FolderModifyStatus\x12\x0f\n\x07message\x18\x03 \x01(\t\"I\n\x11\x46olderAddResponse\x12\x34\n\x10\x66olderAddResults\x18\x01 \x03(\x0b\x32\x1a.Folder.FolderModifyResult\"=\n\x13\x46olderUpdateRequest\x12&\n\nfolderData\x18\x01 \x03(\x0b\x32\x12.Folder.FolderData\"O\n\x14\x46olderUpdateResponse\x12\x37\n\x13\x66olderUpdateResults\x18\x01 \x03(\x0b\x32\x1a.Folder.FolderModifyResult\"\xc3\x02\n\x11\x46olderPermissions\x12\x0e\n\x06\x63\x61nAdd\x18\x01 \x01(\x08\x12\x11\n\tcanRemove\x18\x02 \x01(\x08\x12\x11\n\tcanDelete\x18\x03 \x01(\x08\x12\x15\n\rcanListAccess\x18\x04 \x01(\x08\x12\x17\n\x0f\x63\x61nUpdateAccess\x18\x05 \x01(\x08\x12\x1a\n\x12\x63\x61nChangeOwnership\x18\x06 \x01(\x08\x12\x16\n\x0e\x63\x61nEditRecords\x18\x07 \x01(\x08\x12\x16\n\x0e\x63\x61nViewRecords\x18\x08 \x01(\x08\x12\x18\n\x10\x63\x61nApproveAccess\x18\t \x01(\x08\x12\x18\n\x10\x63\x61nRequestAccess\x18\n \x01(\x08\x12\x18\n\x10\x63\x61nUpdateSetting\x18\x0b \x01(\x08\x12\x16\n\x0e\x63\x61nListRecords\x18\x0c \x01(\x08\x12\x16\n\x0e\x63\x61nListFolders\x18\r \x01(\x08\"\x83\x05\n\x0c\x43\x61pabilities\x12\'\n\x06\x63\x61nAdd\x18\x01 \x01(\x0e\x32\x17.Folder.SetBooleanValue\x12*\n\tcanRemove\x18\x02 \x01(\x0e\x32\x17.Folder.SetBooleanValue\x12*\n\tcanDelete\x18\x03 \x01(\x0e\x32\x17.Folder.SetBooleanValue\x12.\n\rcanListAccess\x18\x04 \x01(\x0e\x32\x17.Folder.SetBooleanValue\x12\x30\n\x0f\x63\x61nUpdateAccess\x18\x05 \x01(\x0e\x32\x17.Folder.SetBooleanValue\x12\x33\n\x12\x63\x61nChangeOwnership\x18\x06 \x01(\x0e\x32\x17.Folder.SetBooleanValue\x12/\n\x0e\x63\x61nEditRecords\x18\x07 \x01(\x0e\x32\x17.Folder.SetBooleanValue\x12/\n\x0e\x63\x61nViewRecords\x18\x08 \x01(\x0e\x32\x17.Folder.SetBooleanValue\x12\x31\n\x10\x63\x61nApproveAccess\x18\t \x01(\x0e\x32\x17.Folder.SetBooleanValue\x12\x31\n\x10\x63\x61nRequestAccess\x18\n \x01(\x0e\x32\x17.Folder.SetBooleanValue\x12\x31\n\x10\x63\x61nUpdateSetting\x18\x0b \x01(\x0e\x32\x17.Folder.SetBooleanValue\x12/\n\x0e\x63\x61nListRecords\x18\x0c \x01(\x0e\x32\x17.Folder.SetBooleanValue\x12/\n\x0e\x63\x61nListFolders\x18\r \x01(\x0e\x32\x17.Folder.SetBooleanValue\"\xb8\x01\n\x19\x46olderRecordUpdateRequest\x12\x11\n\tfolderUid\x18\x01 \x01(\x0c\x12*\n\naddRecords\x18\x02 \x03(\x0b\x32\x16.Folder.RecordMetadata\x12-\n\rupdateRecords\x18\x03 \x03(\x0b\x32\x16.Folder.RecordMetadata\x12-\n\rremoveRecords\x18\x04 \x03(\x0b\x32\x16.Folder.RecordMetadata\"\xab\x01\n\x0eRecordMetadata\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x1a\n\x12\x65ncryptedRecordKey\x18\x02 \x01(\x0c\x12\x38\n\x16\x65ncryptedRecordKeyType\x18\x03 \x01(\x0e\x32\x18.Folder.EncryptedKeyType\x12\x30\n\rtlaProperties\x18\x05 \x01(\x0b\x32\x19.common.tla.TLAProperties\"\x93\x01\n\x0c\x46olderRecord\x12\x11\n\tfolderUid\x18\x01 \x01(\x0c\x12.\n\x0erecordMetadata\x18\x02 \x01(\x0b\x32\x16.Folder.RecordMetadata\x12@\n\x17\x66olderKeyEncryptionType\x18\x03 \x01(\x0e\x32\x1f.Folder.FolderKeyEncryptionType\"s\n\x1a\x46olderRecordUpdateResponse\x12\x11\n\tfolderUid\x18\x01 \x01(\x0c\x12\x42\n\x18\x66olderRecordUpdateResult\x18\x04 \x03(\x0b\x32 .Folder.FolderRecordUpdateResult\"j\n\x18\x46olderRecordUpdateResult\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12*\n\x06status\x18\x02 \x01(\x0e\x32\x1a.Folder.FolderModifyStatus\x12\x0f\n\x07message\x18\x03 \x01(\t\"\x87\x03\n\x10\x46olderAccessData\x12\x11\n\tfolderUid\x18\x01 \x01(\x0c\x12\x15\n\raccessTypeUid\x18\x02 \x01(\x0c\x12&\n\naccessType\x18\x03 \x01(\x0e\x32\x12.Folder.AccessType\x12.\n\x0e\x61\x63\x63\x65ssRoleType\x18\x04 \x01(\x0e\x32\x16.Folder.AccessRoleType\x12+\n\tfolderKey\x18\x05 \x01(\x0b\x32\x18.Folder.EncryptedDataKey\x12\x11\n\tinherited\x18\x06 \x01(\x08\x12\x0e\n\x06hidden\x18\x07 \x01(\x08\x12.\n\x0bpermissions\x18\x08 \x01(\x0b\x32\x19.Folder.FolderPermissions\x12\x30\n\rtlaProperties\x18\t \x01(\x0b\x32\x19.common.tla.TLAProperties\x12\x13\n\x0b\x64\x61teCreated\x18\n \x01(\x03\x12\x14\n\x0clastModified\x18\x0b \x01(\x03\x12\x14\n\x0c\x64\x65niedAccess\x18\x0c \x01(\x08\"\\\n\rRevokedAccess\x12\x11\n\tfolderUid\x18\x01 \x01(\x0c\x12\x10\n\x08\x61\x63torUid\x18\x02 \x01(\x0c\x12&\n\naccessType\x18\x03 \x01(\x0e\x32\x12.Folder.AccessType\"#\n\rFolderRemoved\x12\x12\n\nfolder_uid\x18\x01 \x01(\x0c\"\x93\x04\n\x10RecordAccessData\x12\x15\n\raccessTypeUid\x18\x01 \x01(\x0c\x12&\n\naccessType\x18\x02 \x01(\x0e\x32\x12.Folder.AccessType\x12\x11\n\trecordUid\x18\x03 \x01(\x0c\x12.\n\x0e\x61\x63\x63\x65ssRoleType\x18\x04 \x01(\x0e\x32\x16.Folder.AccessRoleType\x12\r\n\x05owner\x18\x05 \x01(\x08\x12\x11\n\tinherited\x18\x06 \x01(\x08\x12\x0e\n\x06hidden\x18\x07 \x01(\x08\x12\x14\n\x0c\x64\x65niedAccess\x18\x08 \x01(\x08\x12\x16\n\x0e\x63\x61n_view_title\x18\t \x01(\x08\x12\x10\n\x08\x63\x61n_edit\x18\n \x01(\x08\x12\x10\n\x08\x63\x61n_view\x18\x0b \x01(\x08\x12\x17\n\x0f\x63\x61n_list_access\x18\x0c \x01(\x08\x12\x19\n\x11\x63\x61n_update_access\x18\r \x01(\x08\x12\x12\n\ncan_delete\x18\x0e \x01(\x08\x12\x1c\n\x14\x63\x61n_change_ownership\x18\x0f \x01(\x08\x12\x1a\n\x12\x63\x61n_request_access\x18\x10 \x01(\x08\x12\x1a\n\x12\x63\x61n_approve_access\x18\x11 \x01(\x08\x12\x13\n\x0b\x64\x61teCreated\x18\x12 \x01(\x03\x12\x14\n\x0clastModified\x18\x13 \x01(\x03\x12\x30\n\rtlaProperties\x18\x14 \x01(\x0b\x32\x19.common.tla.TLAProperties\"\xb8\x01\n\nAccessData\x12\x15\n\raccessTypeUid\x18\x01 \x01(\x0c\x12.\n\x0e\x61\x63\x63\x65ssRoleType\x18\x02 \x01(\x0e\x32\x16.Folder.AccessRoleType\x12\x14\n\x0c\x64\x65niedAccess\x18\x03 \x01(\x08\x12\x11\n\tinherited\x18\x04 \x01(\x08\x12\x0e\n\x06hidden\x18\x05 \x01(\x08\x12*\n\x0c\x63\x61pabilities\x18\x06 \x01(\x0b\x32\x14.Folder.Capabilities\"\xb7\x01\n\x13\x46olderAccessRequest\x12\x32\n\x10\x66olderAccessAdds\x18\x01 \x03(\x0b\x32\x18.Folder.FolderAccessData\x12\x35\n\x13\x66olderAccessUpdates\x18\x02 \x03(\x0b\x32\x18.Folder.FolderAccessData\x12\x35\n\x13\x66olderAccessRemoves\x18\x03 \x03(\x0b\x32\x18.Folder.FolderAccessData\"\x9f\x01\n\x12\x46olderAccessResult\x12\x11\n\tfolderUid\x18\x01 \x01(\x0c\x12\x11\n\taccessUid\x18\x02 \x01(\x0c\x12&\n\naccessType\x18\x03 \x01(\x0e\x32\x12.Folder.AccessType\x12*\n\x06status\x18\x04 \x01(\x0e\x32\x1a.Folder.FolderModifyStatus\x12\x0f\n\x07message\x18\x05 \x01(\t\"O\n\x14\x46olderAccessResponse\x12\x37\n\x13\x66olderAccessResults\x18\x01 \x03(\x0b\x32\x1a.Folder.FolderAccessResult\"0\n\x08UserInfo\x12\x12\n\naccountUid\x18\x01 \x01(\x0c\x12\x10\n\x08username\x18\x02 \x01(\t\"M\n\nRecordData\x12\x1e\n\x04user\x18\x01 \x01(\x0b\x32\x10.Folder.UserInfo\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x12\x11\n\trecordUid\x18\x03 \x01(\x0c\"{\n\tRecordKey\x12\x10\n\x08user_uid\x18\x01 \x01(\x0c\x12\x12\n\nrecord_uid\x18\x02 \x01(\x0c\x12\x12\n\nrecord_key\x18\x03 \x01(\x0c\x12\x34\n\x12\x65ncrypted_key_type\x18\x04 \x01(\x0e\x32\x18.Folder.EncryptedKeyType*\x1a\n\nRecordType\x12\x0c\n\x08password\x10\x00*^\n\nFolderType\x12\x12\n\x0e\x64\x65\x66\x61ult_folder\x10\x00\x12\x0f\n\x0buser_folder\x10\x01\x12\x11\n\rshared_folder\x10\x02\x12\x18\n\x14shared_folder_folder\x10\x03*\x96\x01\n\x10\x45ncryptedKeyType\x12\n\n\x06no_key\x10\x00\x12\x19\n\x15\x65ncrypted_by_data_key\x10\x01\x12\x1b\n\x17\x65ncrypted_by_public_key\x10\x02\x12\x1d\n\x19\x65ncrypted_by_data_key_gcm\x10\x03\x12\x1f\n\x1b\x65ncrypted_by_public_key_ecc\x10\x04*M\n\x0fSetBooleanValue\x12\x15\n\x11\x42OOLEAN_NO_CHANGE\x10\x00\x12\x10\n\x0c\x42OOLEAN_TRUE\x10\x01\x12\x11\n\rBOOLEAN_FALSE\x10\x02*R\n\x0f\x46olderUsageType\x12\x0e\n\nUT_UNKNOWN\x10\x00\x12\r\n\tUT_NORMAL\x10\x01\x12\x0f\n\x0bUT_WORKFLOW\x10\x02\x12\x0f\n\x0bUT_TRASHCAN\x10\x03*l\n\x17\x46olderKeyEncryptionType\x12\x19\n\x15\x45NCRYPTED_BY_USER_KEY\x10\x00\x12\x1b\n\x17\x45NCRYPTED_BY_PARENT_KEY\x10\x01\x12\x19\n\x15\x45NCRYPTED_BY_TEAM_KEY\x10\x02*T\n\x12\x46olderModifyStatus\x12\x0b\n\x07SUCCESS\x10\x00\x12\x0f\n\x0b\x42\x41\x44_REQUEST\x10\x01\x12\x11\n\rACCESS_DENIED\x10\x02\x12\r\n\tNOT_FOUND\x10\x03*\xa4\x02\n\x14\x46olderPermissionBits\x12\n\n\x06noBits\x10\x00\x12\n\n\x06\x63\x61nAdd\x10\x01\x12\r\n\tcanRemove\x10\x02\x12\r\n\tcanDelete\x10\x04\x12\x11\n\rcanListAccess\x10\x08\x12\x13\n\x0f\x63\x61nUpdateAccess\x10\x10\x12\x16\n\x12\x63\x61nChangeOwnership\x10 \x12\x12\n\x0e\x63\x61nEditRecords\x10@\x12\x13\n\x0e\x63\x61nViewRecords\x10\x80\x01\x12\x15\n\x10\x63\x61nApproveAccess\x10\x80\x02\x12\x15\n\x10\x63\x61nRequestAccess\x10\x80\x04\x12\x15\n\x10\x63\x61nUpdateSetting\x10\x80\x08\x12\x13\n\x0e\x63\x61nListRecords\x10\x80\x10\x12\x13\n\x0e\x63\x61nListFolders\x10\x80 *\x9b\x01\n\x0e\x41\x63\x63\x65ssRoleType\x12\r\n\tNAVIGATOR\x10\x00\x12\r\n\tREQUESTOR\x10\x01\x12\n\n\x06VIEWER\x10\x02\x12\x12\n\x0eSHARED_MANAGER\x10\x03\x12\x13\n\x0f\x43ONTENT_MANAGER\x10\x04\x12\x19\n\x15\x43ONTENT_SHARE_MANAGER\x10\x05\x12\x0b\n\x07MANAGER\x10\x06\x12\x0e\n\nUNRESOLVED\x10\x07*z\n\nAccessType\x12\x0e\n\nAT_UNKNOWN\x10\x00\x12\x0c\n\x08\x41T_OWNER\x10\x01\x12\x0b\n\x07\x41T_USER\x10\x02\x12\x0b\n\x07\x41T_TEAM\x10\x03\x12\x11\n\rAT_ENTERPRISE\x10\x04\x12\r\n\tAT_FOLDER\x10\x05\x12\x12\n\x0e\x41T_APPLICATION\x10\x06*:\n\nObjectType\x12\x0e\n\nOT_UNKNOWN\x10\x00\x12\r\n\tOT_RECORD\x10\x01\x12\r\n\tOT_FOLDER\x10\x02\x42\"\n\x18\x63om.keepersecurity.protoB\x06\x46olderb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'folder_pb2', _globals) -if _descriptor._USE_C_DESCRIPTORS == False: - _globals['DESCRIPTOR']._options = None +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None _globals['DESCRIPTOR']._serialized_options = b'\n\030com.keepersecurity.protoB\006Folder' - _globals['_SHAREDFOLDERUPDATEUSER'].fields_by_name['sharedFolderKey']._options = None + _globals['_SHAREDFOLDERUPDATEUSER'].fields_by_name['sharedFolderKey']._loaded_options = None _globals['_SHAREDFOLDERUPDATEUSER'].fields_by_name['sharedFolderKey']._serialized_options = b'\030\001' - _globals['_SHAREDFOLDERUPDATETEAM'].fields_by_name['sharedFolderKey']._options = None + _globals['_SHAREDFOLDERUPDATETEAM'].fields_by_name['sharedFolderKey']._loaded_options = None _globals['_SHAREDFOLDERUPDATETEAM'].fields_by_name['sharedFolderKey']._serialized_options = b'\030\001' - _globals['_RECORDTYPE']._serialized_start=5728 - _globals['_RECORDTYPE']._serialized_end=5754 - _globals['_FOLDERTYPE']._serialized_start=5756 - _globals['_FOLDERTYPE']._serialized_end=5850 - _globals['_ENCRYPTEDKEYTYPE']._serialized_start=5853 - _globals['_ENCRYPTEDKEYTYPE']._serialized_end=6003 - _globals['_SETBOOLEANVALUE']._serialized_start=6005 - _globals['_SETBOOLEANVALUE']._serialized_end=6082 - _globals['_ENCRYPTEDDATAKEY']._serialized_start=38 - _globals['_ENCRYPTEDDATAKEY']._serialized_end=130 - _globals['_SHAREDFOLDERRECORDDATA']._serialized_start=133 - _globals['_SHAREDFOLDERRECORDDATA']._serialized_end=263 - _globals['_SHAREDFOLDERRECORDDATALIST']._serialized_start=265 - _globals['_SHAREDFOLDERRECORDDATALIST']._serialized_end=357 - _globals['_SHAREDFOLDERRECORDFIX']._serialized_start=359 - _globals['_SHAREDFOLDERRECORDFIX']._serialized_end=454 - _globals['_SHAREDFOLDERRECORDFIXLIST']._serialized_start=456 - _globals['_SHAREDFOLDERRECORDFIXLIST']._serialized_end=545 - _globals['_RECORDREQUEST']._serialized_start=548 - _globals['_RECORDREQUEST']._serialized_end=838 - _globals['_RECORDRESPONSE']._serialized_start=840 - _globals['_RECORDRESPONSE']._serialized_end=909 - _globals['_SHAREDFOLDERFIELDS']._serialized_start=912 - _globals['_SHAREDFOLDERFIELDS']._serialized_end=1040 - _globals['_SHAREDFOLDERFOLDERFIELDS']._serialized_start=1042 - _globals['_SHAREDFOLDERFOLDERFIELDS']._serialized_end=1093 - _globals['_FOLDERREQUEST']._serialized_start=1096 - _globals['_FOLDERREQUEST']._serialized_end=1367 - _globals['_FOLDERRESPONSE']._serialized_start=1369 - _globals['_FOLDERRESPONSE']._serialized_end=1438 - _globals['_IMPORTFOLDERRECORDREQUEST']._serialized_start=1440 - _globals['_IMPORTFOLDERRECORDREQUEST']._serialized_end=1559 - _globals['_IMPORTFOLDERRECORDRESPONSE']._serialized_start=1561 - _globals['_IMPORTFOLDERRECORDRESPONSE']._serialized_end=1685 - _globals['_SHAREDFOLDERUPDATERECORD']._serialized_start=1688 - _globals['_SHAREDFOLDERUPDATERECORD']._serialized_end=2017 - _globals['_SHAREDFOLDERUPDATEUSER']._serialized_start=2020 - _globals['_SHAREDFOLDERUPDATEUSER']._serialized_end=2352 - _globals['_SHAREDFOLDERUPDATETEAM']._serialized_start=2355 - _globals['_SHAREDFOLDERUPDATETEAM']._serialized_end=2636 - _globals['_SHAREDFOLDERUPDATEV3REQUEST']._serialized_start=2639 - _globals['_SHAREDFOLDERUPDATEV3REQUEST']._serialized_end=3549 - _globals['_SHAREDFOLDERUPDATEV3REQUESTV2']._serialized_start=3551 - _globals['_SHAREDFOLDERUPDATEV3REQUESTV2']._serialized_end=3650 - _globals['_SHAREDFOLDERUPDATERECORDSTATUS']._serialized_start=3652 - _globals['_SHAREDFOLDERUPDATERECORDSTATUS']._serialized_end=3719 - _globals['_SHAREDFOLDERUPDATEUSERSTATUS']._serialized_start=3721 - _globals['_SHAREDFOLDERUPDATEUSERSTATUS']._serialized_end=3785 - _globals['_SHAREDFOLDERUPDATETEAMSTATUS']._serialized_start=3787 - _globals['_SHAREDFOLDERUPDATETEAMSTATUS']._serialized_end=3850 - _globals['_SHAREDFOLDERUPDATEV3RESPONSE']._serialized_start=3853 - _globals['_SHAREDFOLDERUPDATEV3RESPONSE']._serialized_end=4629 - _globals['_SHAREDFOLDERUPDATEV3RESPONSEV2']._serialized_start=4631 - _globals['_SHAREDFOLDERUPDATEV3RESPONSEV2']._serialized_end=4740 - _globals['_GETDELETEDSHAREDFOLDERSANDRECORDSRESPONSE']._serialized_start=4743 - _globals['_GETDELETEDSHAREDFOLDERSANDRECORDSRESPONSE']._serialized_end=4993 - _globals['_DELETEDSHAREDFOLDER']._serialized_start=4996 - _globals['_DELETEDSHAREDFOLDER']._serialized_end=5205 - _globals['_DELETEDSHAREDFOLDERRECORD']._serialized_start=5208 - _globals['_DELETEDSHAREDFOLDERRECORD']._serialized_end=5337 - _globals['_DELETEDRECORDDATA']._serialized_start=5340 - _globals['_DELETEDRECORDDATA']._serialized_end=5473 - _globals['_USERNAME']._serialized_start=5475 - _globals['_USERNAME']._serialized_end=5523 - _globals['_RESTOREDELETEDSHAREDFOLDERSANDRECORDSREQUEST']._serialized_start=5526 - _globals['_RESTOREDELETEDSHAREDFOLDERSANDRECORDSREQUEST']._serialized_end=5664 - _globals['_RESTORESHAREDOBJECT']._serialized_start=5666 - _globals['_RESTORESHAREDOBJECT']._serialized_end=5726 + _globals['_RECORDTYPE']._serialized_start=10143 + _globals['_RECORDTYPE']._serialized_end=10169 + _globals['_FOLDERTYPE']._serialized_start=10171 + _globals['_FOLDERTYPE']._serialized_end=10265 + _globals['_ENCRYPTEDKEYTYPE']._serialized_start=10268 + _globals['_ENCRYPTEDKEYTYPE']._serialized_end=10418 + _globals['_SETBOOLEANVALUE']._serialized_start=10420 + _globals['_SETBOOLEANVALUE']._serialized_end=10497 + _globals['_FOLDERUSAGETYPE']._serialized_start=10499 + _globals['_FOLDERUSAGETYPE']._serialized_end=10581 + _globals['_FOLDERKEYENCRYPTIONTYPE']._serialized_start=10583 + _globals['_FOLDERKEYENCRYPTIONTYPE']._serialized_end=10691 + _globals['_FOLDERMODIFYSTATUS']._serialized_start=10693 + _globals['_FOLDERMODIFYSTATUS']._serialized_end=10777 + _globals['_FOLDERPERMISSIONBITS']._serialized_start=10780 + _globals['_FOLDERPERMISSIONBITS']._serialized_end=11072 + _globals['_ACCESSROLETYPE']._serialized_start=11075 + _globals['_ACCESSROLETYPE']._serialized_end=11230 + _globals['_ACCESSTYPE']._serialized_start=11232 + _globals['_ACCESSTYPE']._serialized_end=11354 + _globals['_OBJECTTYPE']._serialized_start=11356 + _globals['_OBJECTTYPE']._serialized_end=11414 + _globals['_ENCRYPTEDDATAKEY']._serialized_start=49 + _globals['_ENCRYPTEDDATAKEY']._serialized_end=141 + _globals['_SHAREDFOLDERRECORDDATA']._serialized_start=144 + _globals['_SHAREDFOLDERRECORDDATA']._serialized_end=274 + _globals['_SHAREDFOLDERRECORDDATALIST']._serialized_start=276 + _globals['_SHAREDFOLDERRECORDDATALIST']._serialized_end=368 + _globals['_SHAREDFOLDERRECORDFIX']._serialized_start=370 + _globals['_SHAREDFOLDERRECORDFIX']._serialized_end=465 + _globals['_SHAREDFOLDERRECORDFIXLIST']._serialized_start=467 + _globals['_SHAREDFOLDERRECORDFIXLIST']._serialized_end=556 + _globals['_RECORDREQUEST']._serialized_start=559 + _globals['_RECORDREQUEST']._serialized_end=849 + _globals['_RECORDRESPONSE']._serialized_start=851 + _globals['_RECORDRESPONSE']._serialized_end=920 + _globals['_SHAREDFOLDERFIELDS']._serialized_start=923 + _globals['_SHAREDFOLDERFIELDS']._serialized_end=1051 + _globals['_SHAREDFOLDERFOLDERFIELDS']._serialized_start=1053 + _globals['_SHAREDFOLDERFOLDERFIELDS']._serialized_end=1104 + _globals['_FOLDERREQUEST']._serialized_start=1107 + _globals['_FOLDERREQUEST']._serialized_end=1378 + _globals['_FOLDERRESPONSE']._serialized_start=1380 + _globals['_FOLDERRESPONSE']._serialized_end=1449 + _globals['_IMPORTFOLDERRECORDREQUEST']._serialized_start=1451 + _globals['_IMPORTFOLDERRECORDREQUEST']._serialized_end=1570 + _globals['_IMPORTFOLDERRECORDRESPONSE']._serialized_start=1572 + _globals['_IMPORTFOLDERRECORDRESPONSE']._serialized_end=1696 + _globals['_SHAREDFOLDERUPDATERECORD']._serialized_start=1699 + _globals['_SHAREDFOLDERUPDATERECORD']._serialized_end=2028 + _globals['_SHAREDFOLDERUPDATEUSER']._serialized_start=2031 + _globals['_SHAREDFOLDERUPDATEUSER']._serialized_end=2363 + _globals['_SHAREDFOLDERUPDATETEAM']._serialized_start=2366 + _globals['_SHAREDFOLDERUPDATETEAM']._serialized_end=2647 + _globals['_SHAREDFOLDERUPDATEV3REQUEST']._serialized_start=2650 + _globals['_SHAREDFOLDERUPDATEV3REQUEST']._serialized_end=3560 + _globals['_SHAREDFOLDERUPDATEV3REQUESTV2']._serialized_start=3562 + _globals['_SHAREDFOLDERUPDATEV3REQUESTV2']._serialized_end=3661 + _globals['_SHAREDFOLDERUPDATERECORDSTATUS']._serialized_start=3663 + _globals['_SHAREDFOLDERUPDATERECORDSTATUS']._serialized_end=3730 + _globals['_SHAREDFOLDERUPDATEUSERSTATUS']._serialized_start=3732 + _globals['_SHAREDFOLDERUPDATEUSERSTATUS']._serialized_end=3796 + _globals['_SHAREDFOLDERUPDATETEAMSTATUS']._serialized_start=3798 + _globals['_SHAREDFOLDERUPDATETEAMSTATUS']._serialized_end=3861 + _globals['_SHAREDFOLDERUPDATEV3RESPONSE']._serialized_start=3864 + _globals['_SHAREDFOLDERUPDATEV3RESPONSE']._serialized_end=4640 + _globals['_SHAREDFOLDERUPDATEV3RESPONSEV2']._serialized_start=4642 + _globals['_SHAREDFOLDERUPDATEV3RESPONSEV2']._serialized_end=4751 + _globals['_GETDELETEDSHAREDFOLDERSANDRECORDSRESPONSE']._serialized_start=4754 + _globals['_GETDELETEDSHAREDFOLDERSANDRECORDSRESPONSE']._serialized_end=5004 + _globals['_DELETEDSHAREDFOLDER']._serialized_start=5007 + _globals['_DELETEDSHAREDFOLDER']._serialized_end=5216 + _globals['_DELETEDSHAREDFOLDERRECORD']._serialized_start=5219 + _globals['_DELETEDSHAREDFOLDERRECORD']._serialized_end=5348 + _globals['_DELETEDRECORDDATA']._serialized_start=5351 + _globals['_DELETEDRECORDDATA']._serialized_end=5484 + _globals['_USERNAME']._serialized_start=5486 + _globals['_USERNAME']._serialized_end=5534 + _globals['_RESTOREDELETEDSHAREDFOLDERSANDRECORDSREQUEST']._serialized_start=5537 + _globals['_RESTOREDELETEDSHAREDFOLDERSANDRECORDSREQUEST']._serialized_end=5675 + _globals['_RESTORESHAREDOBJECT']._serialized_start=5677 + _globals['_RESTORESHAREDOBJECT']._serialized_end=5737 + _globals['_FOLDERDATA']._serialized_start=5740 + _globals['_FOLDERDATA']._serialized_end=5999 + _globals['_FOLDERKEY']._serialized_start=6001 + _globals['_FOLDERKEY']._serialized_end=6123 + _globals['_FOLDERADDREQUEST']._serialized_start=6125 + _globals['_FOLDERADDREQUEST']._serialized_end=6183 + _globals['_FOLDERMODIFYRESULT']._serialized_start=6185 + _globals['_FOLDERMODIFYRESULT']._serialized_end=6285 + _globals['_FOLDERADDRESPONSE']._serialized_start=6287 + _globals['_FOLDERADDRESPONSE']._serialized_end=6360 + _globals['_FOLDERUPDATEREQUEST']._serialized_start=6362 + _globals['_FOLDERUPDATEREQUEST']._serialized_end=6423 + _globals['_FOLDERUPDATERESPONSE']._serialized_start=6425 + _globals['_FOLDERUPDATERESPONSE']._serialized_end=6504 + _globals['_FOLDERPERMISSIONS']._serialized_start=6507 + _globals['_FOLDERPERMISSIONS']._serialized_end=6830 + _globals['_CAPABILITIES']._serialized_start=6833 + _globals['_CAPABILITIES']._serialized_end=7476 + _globals['_FOLDERRECORDUPDATEREQUEST']._serialized_start=7479 + _globals['_FOLDERRECORDUPDATEREQUEST']._serialized_end=7663 + _globals['_RECORDMETADATA']._serialized_start=7666 + _globals['_RECORDMETADATA']._serialized_end=7837 + _globals['_FOLDERRECORD']._serialized_start=7840 + _globals['_FOLDERRECORD']._serialized_end=7987 + _globals['_FOLDERRECORDUPDATERESPONSE']._serialized_start=7989 + _globals['_FOLDERRECORDUPDATERESPONSE']._serialized_end=8104 + _globals['_FOLDERRECORDUPDATERESULT']._serialized_start=8106 + _globals['_FOLDERRECORDUPDATERESULT']._serialized_end=8212 + _globals['_FOLDERACCESSDATA']._serialized_start=8215 + _globals['_FOLDERACCESSDATA']._serialized_end=8606 + _globals['_REVOKEDACCESS']._serialized_start=8608 + _globals['_REVOKEDACCESS']._serialized_end=8700 + _globals['_FOLDERREMOVED']._serialized_start=8702 + _globals['_FOLDERREMOVED']._serialized_end=8737 + _globals['_RECORDACCESSDATA']._serialized_start=8740 + _globals['_RECORDACCESSDATA']._serialized_end=9271 + _globals['_ACCESSDATA']._serialized_start=9274 + _globals['_ACCESSDATA']._serialized_end=9458 + _globals['_FOLDERACCESSREQUEST']._serialized_start=9461 + _globals['_FOLDERACCESSREQUEST']._serialized_end=9644 + _globals['_FOLDERACCESSRESULT']._serialized_start=9647 + _globals['_FOLDERACCESSRESULT']._serialized_end=9806 + _globals['_FOLDERACCESSRESPONSE']._serialized_start=9808 + _globals['_FOLDERACCESSRESPONSE']._serialized_end=9887 + _globals['_USERINFO']._serialized_start=9889 + _globals['_USERINFO']._serialized_end=9937 + _globals['_RECORDDATA']._serialized_start=9939 + _globals['_RECORDDATA']._serialized_end=10016 + _globals['_RECORDKEY']._serialized_start=10018 + _globals['_RECORDKEY']._serialized_end=10141 # @@protoc_insertion_point(module_scope) diff --git a/keepercommander/proto/folder_pb2.pyi b/keepercommander/proto/folder_pb2.pyi index d3c17b439..f9a2fb97a 100644 --- a/keepercommander/proto/folder_pb2.pyi +++ b/keepercommander/proto/folder_pb2.pyi @@ -1,25 +1,27 @@ import record_pb2 as _record_pb2 +import tla_pb2 as _tla_pb2 from google.protobuf.internal import containers as _containers from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message -from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union +from collections.abc import Iterable as _Iterable, Mapping as _Mapping +from typing import ClassVar as _ClassVar, Optional as _Optional, Union as _Union DESCRIPTOR: _descriptor.FileDescriptor class RecordType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = [] + __slots__ = () password: _ClassVar[RecordType] class FolderType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = [] + __slots__ = () default_folder: _ClassVar[FolderType] user_folder: _ClassVar[FolderType] shared_folder: _ClassVar[FolderType] shared_folder_folder: _ClassVar[FolderType] class EncryptedKeyType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = [] + __slots__ = () no_key: _ClassVar[EncryptedKeyType] encrypted_by_data_key: _ClassVar[EncryptedKeyType] encrypted_by_public_key: _ClassVar[EncryptedKeyType] @@ -27,10 +29,74 @@ class EncryptedKeyType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): encrypted_by_public_key_ecc: _ClassVar[EncryptedKeyType] class SetBooleanValue(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = [] + __slots__ = () BOOLEAN_NO_CHANGE: _ClassVar[SetBooleanValue] BOOLEAN_TRUE: _ClassVar[SetBooleanValue] BOOLEAN_FALSE: _ClassVar[SetBooleanValue] + +class FolderUsageType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = () + UT_UNKNOWN: _ClassVar[FolderUsageType] + UT_NORMAL: _ClassVar[FolderUsageType] + UT_WORKFLOW: _ClassVar[FolderUsageType] + UT_TRASHCAN: _ClassVar[FolderUsageType] + +class FolderKeyEncryptionType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = () + ENCRYPTED_BY_USER_KEY: _ClassVar[FolderKeyEncryptionType] + ENCRYPTED_BY_PARENT_KEY: _ClassVar[FolderKeyEncryptionType] + ENCRYPTED_BY_TEAM_KEY: _ClassVar[FolderKeyEncryptionType] + +class FolderModifyStatus(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = () + SUCCESS: _ClassVar[FolderModifyStatus] + BAD_REQUEST: _ClassVar[FolderModifyStatus] + ACCESS_DENIED: _ClassVar[FolderModifyStatus] + NOT_FOUND: _ClassVar[FolderModifyStatus] + +class FolderPermissionBits(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = () + noBits: _ClassVar[FolderPermissionBits] + canAdd: _ClassVar[FolderPermissionBits] + canRemove: _ClassVar[FolderPermissionBits] + canDelete: _ClassVar[FolderPermissionBits] + canListAccess: _ClassVar[FolderPermissionBits] + canUpdateAccess: _ClassVar[FolderPermissionBits] + canChangeOwnership: _ClassVar[FolderPermissionBits] + canEditRecords: _ClassVar[FolderPermissionBits] + canViewRecords: _ClassVar[FolderPermissionBits] + canApproveAccess: _ClassVar[FolderPermissionBits] + canRequestAccess: _ClassVar[FolderPermissionBits] + canUpdateSetting: _ClassVar[FolderPermissionBits] + canListRecords: _ClassVar[FolderPermissionBits] + canListFolders: _ClassVar[FolderPermissionBits] + +class AccessRoleType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = () + NAVIGATOR: _ClassVar[AccessRoleType] + REQUESTOR: _ClassVar[AccessRoleType] + VIEWER: _ClassVar[AccessRoleType] + SHARED_MANAGER: _ClassVar[AccessRoleType] + CONTENT_MANAGER: _ClassVar[AccessRoleType] + CONTENT_SHARE_MANAGER: _ClassVar[AccessRoleType] + MANAGER: _ClassVar[AccessRoleType] + UNRESOLVED: _ClassVar[AccessRoleType] + +class AccessType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = () + AT_UNKNOWN: _ClassVar[AccessType] + AT_OWNER: _ClassVar[AccessType] + AT_USER: _ClassVar[AccessType] + AT_TEAM: _ClassVar[AccessType] + AT_ENTERPRISE: _ClassVar[AccessType] + AT_FOLDER: _ClassVar[AccessType] + AT_APPLICATION: _ClassVar[AccessType] + +class ObjectType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = () + OT_UNKNOWN: _ClassVar[ObjectType] + OT_RECORD: _ClassVar[ObjectType] + OT_FOLDER: _ClassVar[ObjectType] password: RecordType default_folder: FolderType user_folder: FolderType @@ -44,9 +110,52 @@ encrypted_by_public_key_ecc: EncryptedKeyType BOOLEAN_NO_CHANGE: SetBooleanValue BOOLEAN_TRUE: SetBooleanValue BOOLEAN_FALSE: SetBooleanValue +UT_UNKNOWN: FolderUsageType +UT_NORMAL: FolderUsageType +UT_WORKFLOW: FolderUsageType +UT_TRASHCAN: FolderUsageType +ENCRYPTED_BY_USER_KEY: FolderKeyEncryptionType +ENCRYPTED_BY_PARENT_KEY: FolderKeyEncryptionType +ENCRYPTED_BY_TEAM_KEY: FolderKeyEncryptionType +SUCCESS: FolderModifyStatus +BAD_REQUEST: FolderModifyStatus +ACCESS_DENIED: FolderModifyStatus +NOT_FOUND: FolderModifyStatus +noBits: FolderPermissionBits +canAdd: FolderPermissionBits +canRemove: FolderPermissionBits +canDelete: FolderPermissionBits +canListAccess: FolderPermissionBits +canUpdateAccess: FolderPermissionBits +canChangeOwnership: FolderPermissionBits +canEditRecords: FolderPermissionBits +canViewRecords: FolderPermissionBits +canApproveAccess: FolderPermissionBits +canRequestAccess: FolderPermissionBits +canUpdateSetting: FolderPermissionBits +canListRecords: FolderPermissionBits +canListFolders: FolderPermissionBits +NAVIGATOR: AccessRoleType +REQUESTOR: AccessRoleType +VIEWER: AccessRoleType +SHARED_MANAGER: AccessRoleType +CONTENT_MANAGER: AccessRoleType +CONTENT_SHARE_MANAGER: AccessRoleType +MANAGER: AccessRoleType +UNRESOLVED: AccessRoleType +AT_UNKNOWN: AccessType +AT_OWNER: AccessType +AT_USER: AccessType +AT_TEAM: AccessType +AT_ENTERPRISE: AccessType +AT_FOLDER: AccessType +AT_APPLICATION: AccessType +OT_UNKNOWN: ObjectType +OT_RECORD: ObjectType +OT_FOLDER: ObjectType class EncryptedDataKey(_message.Message): - __slots__ = ["encryptedKey", "encryptedKeyType"] + __slots__ = ("encryptedKey", "encryptedKeyType") ENCRYPTEDKEY_FIELD_NUMBER: _ClassVar[int] ENCRYPTEDKEYTYPE_FIELD_NUMBER: _ClassVar[int] encryptedKey: bytes @@ -54,7 +163,7 @@ class EncryptedDataKey(_message.Message): def __init__(self, encryptedKey: _Optional[bytes] = ..., encryptedKeyType: _Optional[_Union[EncryptedKeyType, str]] = ...) -> None: ... class SharedFolderRecordData(_message.Message): - __slots__ = ["folderUid", "recordUid", "userId", "encryptedDataKey"] + __slots__ = ("folderUid", "recordUid", "userId", "encryptedDataKey") FOLDERUID_FIELD_NUMBER: _ClassVar[int] RECORDUID_FIELD_NUMBER: _ClassVar[int] USERID_FIELD_NUMBER: _ClassVar[int] @@ -66,13 +175,13 @@ class SharedFolderRecordData(_message.Message): def __init__(self, folderUid: _Optional[bytes] = ..., recordUid: _Optional[bytes] = ..., userId: _Optional[int] = ..., encryptedDataKey: _Optional[_Iterable[_Union[EncryptedDataKey, _Mapping]]] = ...) -> None: ... class SharedFolderRecordDataList(_message.Message): - __slots__ = ["sharedFolderRecordData"] + __slots__ = ("sharedFolderRecordData",) SHAREDFOLDERRECORDDATA_FIELD_NUMBER: _ClassVar[int] sharedFolderRecordData: _containers.RepeatedCompositeFieldContainer[SharedFolderRecordData] def __init__(self, sharedFolderRecordData: _Optional[_Iterable[_Union[SharedFolderRecordData, _Mapping]]] = ...) -> None: ... class SharedFolderRecordFix(_message.Message): - __slots__ = ["folderUid", "recordUid", "encryptedRecordFolderKey"] + __slots__ = ("folderUid", "recordUid", "encryptedRecordFolderKey") FOLDERUID_FIELD_NUMBER: _ClassVar[int] RECORDUID_FIELD_NUMBER: _ClassVar[int] ENCRYPTEDRECORDFOLDERKEY_FIELD_NUMBER: _ClassVar[int] @@ -82,13 +191,13 @@ class SharedFolderRecordFix(_message.Message): def __init__(self, folderUid: _Optional[bytes] = ..., recordUid: _Optional[bytes] = ..., encryptedRecordFolderKey: _Optional[bytes] = ...) -> None: ... class SharedFolderRecordFixList(_message.Message): - __slots__ = ["sharedFolderRecordFix"] + __slots__ = ("sharedFolderRecordFix",) SHAREDFOLDERRECORDFIX_FIELD_NUMBER: _ClassVar[int] sharedFolderRecordFix: _containers.RepeatedCompositeFieldContainer[SharedFolderRecordFix] def __init__(self, sharedFolderRecordFix: _Optional[_Iterable[_Union[SharedFolderRecordFix, _Mapping]]] = ...) -> None: ... class RecordRequest(_message.Message): - __slots__ = ["recordUid", "recordType", "recordData", "encryptedRecordKey", "folderType", "howLongAgo", "folderUid", "encryptedRecordFolderKey", "extra", "nonSharedData", "fileIds"] + __slots__ = ("recordUid", "recordType", "recordData", "encryptedRecordKey", "folderType", "howLongAgo", "folderUid", "encryptedRecordFolderKey", "extra", "nonSharedData", "fileIds") RECORDUID_FIELD_NUMBER: _ClassVar[int] RECORDTYPE_FIELD_NUMBER: _ClassVar[int] RECORDDATA_FIELD_NUMBER: _ClassVar[int] @@ -114,7 +223,7 @@ class RecordRequest(_message.Message): def __init__(self, recordUid: _Optional[bytes] = ..., recordType: _Optional[_Union[RecordType, str]] = ..., recordData: _Optional[bytes] = ..., encryptedRecordKey: _Optional[bytes] = ..., folderType: _Optional[_Union[FolderType, str]] = ..., howLongAgo: _Optional[int] = ..., folderUid: _Optional[bytes] = ..., encryptedRecordFolderKey: _Optional[bytes] = ..., extra: _Optional[bytes] = ..., nonSharedData: _Optional[bytes] = ..., fileIds: _Optional[_Iterable[int]] = ...) -> None: ... class RecordResponse(_message.Message): - __slots__ = ["recordUid", "revision", "status"] + __slots__ = ("recordUid", "revision", "status") RECORDUID_FIELD_NUMBER: _ClassVar[int] REVISION_FIELD_NUMBER: _ClassVar[int] STATUS_FIELD_NUMBER: _ClassVar[int] @@ -124,7 +233,7 @@ class RecordResponse(_message.Message): def __init__(self, recordUid: _Optional[bytes] = ..., revision: _Optional[int] = ..., status: _Optional[str] = ...) -> None: ... class SharedFolderFields(_message.Message): - __slots__ = ["encryptedFolderName", "manageUsers", "manageRecords", "canEdit", "canShare"] + __slots__ = ("encryptedFolderName", "manageUsers", "manageRecords", "canEdit", "canShare") ENCRYPTEDFOLDERNAME_FIELD_NUMBER: _ClassVar[int] MANAGEUSERS_FIELD_NUMBER: _ClassVar[int] MANAGERECORDS_FIELD_NUMBER: _ClassVar[int] @@ -135,16 +244,16 @@ class SharedFolderFields(_message.Message): manageRecords: bool canEdit: bool canShare: bool - def __init__(self, encryptedFolderName: _Optional[bytes] = ..., manageUsers: bool = ..., manageRecords: bool = ..., canEdit: bool = ..., canShare: bool = ...) -> None: ... + def __init__(self, encryptedFolderName: _Optional[bytes] = ..., manageUsers: _Optional[bool] = ..., manageRecords: _Optional[bool] = ..., canEdit: _Optional[bool] = ..., canShare: _Optional[bool] = ...) -> None: ... class SharedFolderFolderFields(_message.Message): - __slots__ = ["sharedFolderUid"] + __slots__ = ("sharedFolderUid",) SHAREDFOLDERUID_FIELD_NUMBER: _ClassVar[int] sharedFolderUid: bytes def __init__(self, sharedFolderUid: _Optional[bytes] = ...) -> None: ... class FolderRequest(_message.Message): - __slots__ = ["folderUid", "folderType", "parentFolderUid", "folderData", "encryptedFolderKey", "sharedFolderFields", "sharedFolderFolderFields"] + __slots__ = ("folderUid", "folderType", "parentFolderUid", "folderData", "encryptedFolderKey", "sharedFolderFields", "sharedFolderFolderFields") FOLDERUID_FIELD_NUMBER: _ClassVar[int] FOLDERTYPE_FIELD_NUMBER: _ClassVar[int] PARENTFOLDERUID_FIELD_NUMBER: _ClassVar[int] @@ -162,7 +271,7 @@ class FolderRequest(_message.Message): def __init__(self, folderUid: _Optional[bytes] = ..., folderType: _Optional[_Union[FolderType, str]] = ..., parentFolderUid: _Optional[bytes] = ..., folderData: _Optional[bytes] = ..., encryptedFolderKey: _Optional[bytes] = ..., sharedFolderFields: _Optional[_Union[SharedFolderFields, _Mapping]] = ..., sharedFolderFolderFields: _Optional[_Union[SharedFolderFolderFields, _Mapping]] = ...) -> None: ... class FolderResponse(_message.Message): - __slots__ = ["folderUid", "revision", "status"] + __slots__ = ("folderUid", "revision", "status") FOLDERUID_FIELD_NUMBER: _ClassVar[int] REVISION_FIELD_NUMBER: _ClassVar[int] STATUS_FIELD_NUMBER: _ClassVar[int] @@ -172,7 +281,7 @@ class FolderResponse(_message.Message): def __init__(self, folderUid: _Optional[bytes] = ..., revision: _Optional[int] = ..., status: _Optional[str] = ...) -> None: ... class ImportFolderRecordRequest(_message.Message): - __slots__ = ["folderRequest", "recordRequest"] + __slots__ = ("folderRequest", "recordRequest") FOLDERREQUEST_FIELD_NUMBER: _ClassVar[int] RECORDREQUEST_FIELD_NUMBER: _ClassVar[int] folderRequest: _containers.RepeatedCompositeFieldContainer[FolderRequest] @@ -180,7 +289,7 @@ class ImportFolderRecordRequest(_message.Message): def __init__(self, folderRequest: _Optional[_Iterable[_Union[FolderRequest, _Mapping]]] = ..., recordRequest: _Optional[_Iterable[_Union[RecordRequest, _Mapping]]] = ...) -> None: ... class ImportFolderRecordResponse(_message.Message): - __slots__ = ["folderResponse", "recordResponse"] + __slots__ = ("folderResponse", "recordResponse") FOLDERRESPONSE_FIELD_NUMBER: _ClassVar[int] RECORDRESPONSE_FIELD_NUMBER: _ClassVar[int] folderResponse: _containers.RepeatedCompositeFieldContainer[FolderResponse] @@ -188,7 +297,7 @@ class ImportFolderRecordResponse(_message.Message): def __init__(self, folderResponse: _Optional[_Iterable[_Union[FolderResponse, _Mapping]]] = ..., recordResponse: _Optional[_Iterable[_Union[RecordResponse, _Mapping]]] = ...) -> None: ... class SharedFolderUpdateRecord(_message.Message): - __slots__ = ["recordUid", "sharedFolderUid", "teamUid", "canEdit", "canShare", "encryptedRecordKey", "revision", "expiration", "timerNotificationType", "rotateOnExpiration"] + __slots__ = ("recordUid", "sharedFolderUid", "teamUid", "canEdit", "canShare", "encryptedRecordKey", "revision", "expiration", "timerNotificationType", "rotateOnExpiration") RECORDUID_FIELD_NUMBER: _ClassVar[int] SHAREDFOLDERUID_FIELD_NUMBER: _ClassVar[int] TEAMUID_FIELD_NUMBER: _ClassVar[int] @@ -209,10 +318,10 @@ class SharedFolderUpdateRecord(_message.Message): expiration: int timerNotificationType: _record_pb2.TimerNotificationType rotateOnExpiration: bool - def __init__(self, recordUid: _Optional[bytes] = ..., sharedFolderUid: _Optional[bytes] = ..., teamUid: _Optional[bytes] = ..., canEdit: _Optional[_Union[SetBooleanValue, str]] = ..., canShare: _Optional[_Union[SetBooleanValue, str]] = ..., encryptedRecordKey: _Optional[bytes] = ..., revision: _Optional[int] = ..., expiration: _Optional[int] = ..., timerNotificationType: _Optional[_Union[_record_pb2.TimerNotificationType, str]] = ..., rotateOnExpiration: bool = ...) -> None: ... + def __init__(self, recordUid: _Optional[bytes] = ..., sharedFolderUid: _Optional[bytes] = ..., teamUid: _Optional[bytes] = ..., canEdit: _Optional[_Union[SetBooleanValue, str]] = ..., canShare: _Optional[_Union[SetBooleanValue, str]] = ..., encryptedRecordKey: _Optional[bytes] = ..., revision: _Optional[int] = ..., expiration: _Optional[int] = ..., timerNotificationType: _Optional[_Union[_record_pb2.TimerNotificationType, str]] = ..., rotateOnExpiration: _Optional[bool] = ...) -> None: ... class SharedFolderUpdateUser(_message.Message): - __slots__ = ["username", "manageUsers", "manageRecords", "sharedFolderKey", "expiration", "timerNotificationType", "typedSharedFolderKey", "rotateOnExpiration"] + __slots__ = ("username", "manageUsers", "manageRecords", "sharedFolderKey", "expiration", "timerNotificationType", "typedSharedFolderKey", "rotateOnExpiration") USERNAME_FIELD_NUMBER: _ClassVar[int] MANAGEUSERS_FIELD_NUMBER: _ClassVar[int] MANAGERECORDS_FIELD_NUMBER: _ClassVar[int] @@ -229,10 +338,10 @@ class SharedFolderUpdateUser(_message.Message): timerNotificationType: _record_pb2.TimerNotificationType typedSharedFolderKey: EncryptedDataKey rotateOnExpiration: bool - def __init__(self, username: _Optional[str] = ..., manageUsers: _Optional[_Union[SetBooleanValue, str]] = ..., manageRecords: _Optional[_Union[SetBooleanValue, str]] = ..., sharedFolderKey: _Optional[bytes] = ..., expiration: _Optional[int] = ..., timerNotificationType: _Optional[_Union[_record_pb2.TimerNotificationType, str]] = ..., typedSharedFolderKey: _Optional[_Union[EncryptedDataKey, _Mapping]] = ..., rotateOnExpiration: bool = ...) -> None: ... + def __init__(self, username: _Optional[str] = ..., manageUsers: _Optional[_Union[SetBooleanValue, str]] = ..., manageRecords: _Optional[_Union[SetBooleanValue, str]] = ..., sharedFolderKey: _Optional[bytes] = ..., expiration: _Optional[int] = ..., timerNotificationType: _Optional[_Union[_record_pb2.TimerNotificationType, str]] = ..., typedSharedFolderKey: _Optional[_Union[EncryptedDataKey, _Mapping]] = ..., rotateOnExpiration: _Optional[bool] = ...) -> None: ... class SharedFolderUpdateTeam(_message.Message): - __slots__ = ["teamUid", "manageUsers", "manageRecords", "sharedFolderKey", "expiration", "timerNotificationType", "typedSharedFolderKey", "rotateOnExpiration"] + __slots__ = ("teamUid", "manageUsers", "manageRecords", "sharedFolderKey", "expiration", "timerNotificationType", "typedSharedFolderKey", "rotateOnExpiration") TEAMUID_FIELD_NUMBER: _ClassVar[int] MANAGEUSERS_FIELD_NUMBER: _ClassVar[int] MANAGERECORDS_FIELD_NUMBER: _ClassVar[int] @@ -249,10 +358,10 @@ class SharedFolderUpdateTeam(_message.Message): timerNotificationType: _record_pb2.TimerNotificationType typedSharedFolderKey: EncryptedDataKey rotateOnExpiration: bool - def __init__(self, teamUid: _Optional[bytes] = ..., manageUsers: bool = ..., manageRecords: bool = ..., sharedFolderKey: _Optional[bytes] = ..., expiration: _Optional[int] = ..., timerNotificationType: _Optional[_Union[_record_pb2.TimerNotificationType, str]] = ..., typedSharedFolderKey: _Optional[_Union[EncryptedDataKey, _Mapping]] = ..., rotateOnExpiration: bool = ...) -> None: ... + def __init__(self, teamUid: _Optional[bytes] = ..., manageUsers: _Optional[bool] = ..., manageRecords: _Optional[bool] = ..., sharedFolderKey: _Optional[bytes] = ..., expiration: _Optional[int] = ..., timerNotificationType: _Optional[_Union[_record_pb2.TimerNotificationType, str]] = ..., typedSharedFolderKey: _Optional[_Union[EncryptedDataKey, _Mapping]] = ..., rotateOnExpiration: _Optional[bool] = ...) -> None: ... class SharedFolderUpdateV3Request(_message.Message): - __slots__ = ["sharedFolderUpdateOperation_dont_use", "sharedFolderUid", "encryptedSharedFolderName", "revision", "forceUpdate", "fromTeamUid", "defaultManageUsers", "defaultManageRecords", "defaultCanEdit", "defaultCanShare", "sharedFolderAddRecord", "sharedFolderAddUser", "sharedFolderAddTeam", "sharedFolderUpdateRecord", "sharedFolderUpdateUser", "sharedFolderUpdateTeam", "sharedFolderRemoveRecord", "sharedFolderRemoveUser", "sharedFolderRemoveTeam", "sharedFolderOwner"] + __slots__ = ("sharedFolderUpdateOperation_dont_use", "sharedFolderUid", "encryptedSharedFolderName", "revision", "forceUpdate", "fromTeamUid", "defaultManageUsers", "defaultManageRecords", "defaultCanEdit", "defaultCanShare", "sharedFolderAddRecord", "sharedFolderAddUser", "sharedFolderAddTeam", "sharedFolderUpdateRecord", "sharedFolderUpdateUser", "sharedFolderUpdateTeam", "sharedFolderRemoveRecord", "sharedFolderRemoveUser", "sharedFolderRemoveTeam", "sharedFolderOwner") SHAREDFOLDERUPDATEOPERATION_DONT_USE_FIELD_NUMBER: _ClassVar[int] SHAREDFOLDERUID_FIELD_NUMBER: _ClassVar[int] ENCRYPTEDSHAREDFOLDERNAME_FIELD_NUMBER: _ClassVar[int] @@ -293,16 +402,16 @@ class SharedFolderUpdateV3Request(_message.Message): sharedFolderRemoveUser: _containers.RepeatedScalarFieldContainer[str] sharedFolderRemoveTeam: _containers.RepeatedScalarFieldContainer[bytes] sharedFolderOwner: str - def __init__(self, sharedFolderUpdateOperation_dont_use: _Optional[int] = ..., sharedFolderUid: _Optional[bytes] = ..., encryptedSharedFolderName: _Optional[bytes] = ..., revision: _Optional[int] = ..., forceUpdate: bool = ..., fromTeamUid: _Optional[bytes] = ..., defaultManageUsers: _Optional[_Union[SetBooleanValue, str]] = ..., defaultManageRecords: _Optional[_Union[SetBooleanValue, str]] = ..., defaultCanEdit: _Optional[_Union[SetBooleanValue, str]] = ..., defaultCanShare: _Optional[_Union[SetBooleanValue, str]] = ..., sharedFolderAddRecord: _Optional[_Iterable[_Union[SharedFolderUpdateRecord, _Mapping]]] = ..., sharedFolderAddUser: _Optional[_Iterable[_Union[SharedFolderUpdateUser, _Mapping]]] = ..., sharedFolderAddTeam: _Optional[_Iterable[_Union[SharedFolderUpdateTeam, _Mapping]]] = ..., sharedFolderUpdateRecord: _Optional[_Iterable[_Union[SharedFolderUpdateRecord, _Mapping]]] = ..., sharedFolderUpdateUser: _Optional[_Iterable[_Union[SharedFolderUpdateUser, _Mapping]]] = ..., sharedFolderUpdateTeam: _Optional[_Iterable[_Union[SharedFolderUpdateTeam, _Mapping]]] = ..., sharedFolderRemoveRecord: _Optional[_Iterable[bytes]] = ..., sharedFolderRemoveUser: _Optional[_Iterable[str]] = ..., sharedFolderRemoveTeam: _Optional[_Iterable[bytes]] = ..., sharedFolderOwner: _Optional[str] = ...) -> None: ... + def __init__(self, sharedFolderUpdateOperation_dont_use: _Optional[int] = ..., sharedFolderUid: _Optional[bytes] = ..., encryptedSharedFolderName: _Optional[bytes] = ..., revision: _Optional[int] = ..., forceUpdate: _Optional[bool] = ..., fromTeamUid: _Optional[bytes] = ..., defaultManageUsers: _Optional[_Union[SetBooleanValue, str]] = ..., defaultManageRecords: _Optional[_Union[SetBooleanValue, str]] = ..., defaultCanEdit: _Optional[_Union[SetBooleanValue, str]] = ..., defaultCanShare: _Optional[_Union[SetBooleanValue, str]] = ..., sharedFolderAddRecord: _Optional[_Iterable[_Union[SharedFolderUpdateRecord, _Mapping]]] = ..., sharedFolderAddUser: _Optional[_Iterable[_Union[SharedFolderUpdateUser, _Mapping]]] = ..., sharedFolderAddTeam: _Optional[_Iterable[_Union[SharedFolderUpdateTeam, _Mapping]]] = ..., sharedFolderUpdateRecord: _Optional[_Iterable[_Union[SharedFolderUpdateRecord, _Mapping]]] = ..., sharedFolderUpdateUser: _Optional[_Iterable[_Union[SharedFolderUpdateUser, _Mapping]]] = ..., sharedFolderUpdateTeam: _Optional[_Iterable[_Union[SharedFolderUpdateTeam, _Mapping]]] = ..., sharedFolderRemoveRecord: _Optional[_Iterable[bytes]] = ..., sharedFolderRemoveUser: _Optional[_Iterable[str]] = ..., sharedFolderRemoveTeam: _Optional[_Iterable[bytes]] = ..., sharedFolderOwner: _Optional[str] = ...) -> None: ... class SharedFolderUpdateV3RequestV2(_message.Message): - __slots__ = ["sharedFoldersUpdateV3"] + __slots__ = ("sharedFoldersUpdateV3",) SHAREDFOLDERSUPDATEV3_FIELD_NUMBER: _ClassVar[int] sharedFoldersUpdateV3: _containers.RepeatedCompositeFieldContainer[SharedFolderUpdateV3Request] def __init__(self, sharedFoldersUpdateV3: _Optional[_Iterable[_Union[SharedFolderUpdateV3Request, _Mapping]]] = ...) -> None: ... class SharedFolderUpdateRecordStatus(_message.Message): - __slots__ = ["recordUid", "status"] + __slots__ = ("recordUid", "status") RECORDUID_FIELD_NUMBER: _ClassVar[int] STATUS_FIELD_NUMBER: _ClassVar[int] recordUid: bytes @@ -310,7 +419,7 @@ class SharedFolderUpdateRecordStatus(_message.Message): def __init__(self, recordUid: _Optional[bytes] = ..., status: _Optional[str] = ...) -> None: ... class SharedFolderUpdateUserStatus(_message.Message): - __slots__ = ["username", "status"] + __slots__ = ("username", "status") USERNAME_FIELD_NUMBER: _ClassVar[int] STATUS_FIELD_NUMBER: _ClassVar[int] username: str @@ -318,7 +427,7 @@ class SharedFolderUpdateUserStatus(_message.Message): def __init__(self, username: _Optional[str] = ..., status: _Optional[str] = ...) -> None: ... class SharedFolderUpdateTeamStatus(_message.Message): - __slots__ = ["teamUid", "status"] + __slots__ = ("teamUid", "status") TEAMUID_FIELD_NUMBER: _ClassVar[int] STATUS_FIELD_NUMBER: _ClassVar[int] teamUid: bytes @@ -326,7 +435,7 @@ class SharedFolderUpdateTeamStatus(_message.Message): def __init__(self, teamUid: _Optional[bytes] = ..., status: _Optional[str] = ...) -> None: ... class SharedFolderUpdateV3Response(_message.Message): - __slots__ = ["revision", "sharedFolderAddRecordStatus", "sharedFolderAddUserStatus", "sharedFolderAddTeamStatus", "sharedFolderUpdateRecordStatus", "sharedFolderUpdateUserStatus", "sharedFolderUpdateTeamStatus", "sharedFolderRemoveRecordStatus", "sharedFolderRemoveUserStatus", "sharedFolderRemoveTeamStatus", "sharedFolderUid", "status"] + __slots__ = ("revision", "sharedFolderAddRecordStatus", "sharedFolderAddUserStatus", "sharedFolderAddTeamStatus", "sharedFolderUpdateRecordStatus", "sharedFolderUpdateUserStatus", "sharedFolderUpdateTeamStatus", "sharedFolderRemoveRecordStatus", "sharedFolderRemoveUserStatus", "sharedFolderRemoveTeamStatus", "sharedFolderUid", "status") REVISION_FIELD_NUMBER: _ClassVar[int] SHAREDFOLDERADDRECORDSTATUS_FIELD_NUMBER: _ClassVar[int] SHAREDFOLDERADDUSERSTATUS_FIELD_NUMBER: _ClassVar[int] @@ -354,13 +463,13 @@ class SharedFolderUpdateV3Response(_message.Message): def __init__(self, revision: _Optional[int] = ..., sharedFolderAddRecordStatus: _Optional[_Iterable[_Union[SharedFolderUpdateRecordStatus, _Mapping]]] = ..., sharedFolderAddUserStatus: _Optional[_Iterable[_Union[SharedFolderUpdateUserStatus, _Mapping]]] = ..., sharedFolderAddTeamStatus: _Optional[_Iterable[_Union[SharedFolderUpdateTeamStatus, _Mapping]]] = ..., sharedFolderUpdateRecordStatus: _Optional[_Iterable[_Union[SharedFolderUpdateRecordStatus, _Mapping]]] = ..., sharedFolderUpdateUserStatus: _Optional[_Iterable[_Union[SharedFolderUpdateUserStatus, _Mapping]]] = ..., sharedFolderUpdateTeamStatus: _Optional[_Iterable[_Union[SharedFolderUpdateTeamStatus, _Mapping]]] = ..., sharedFolderRemoveRecordStatus: _Optional[_Iterable[_Union[SharedFolderUpdateRecordStatus, _Mapping]]] = ..., sharedFolderRemoveUserStatus: _Optional[_Iterable[_Union[SharedFolderUpdateUserStatus, _Mapping]]] = ..., sharedFolderRemoveTeamStatus: _Optional[_Iterable[_Union[SharedFolderUpdateTeamStatus, _Mapping]]] = ..., sharedFolderUid: _Optional[bytes] = ..., status: _Optional[str] = ...) -> None: ... class SharedFolderUpdateV3ResponseV2(_message.Message): - __slots__ = ["sharedFoldersUpdateV3Response"] + __slots__ = ("sharedFoldersUpdateV3Response",) SHAREDFOLDERSUPDATEV3RESPONSE_FIELD_NUMBER: _ClassVar[int] sharedFoldersUpdateV3Response: _containers.RepeatedCompositeFieldContainer[SharedFolderUpdateV3Response] def __init__(self, sharedFoldersUpdateV3Response: _Optional[_Iterable[_Union[SharedFolderUpdateV3Response, _Mapping]]] = ...) -> None: ... class GetDeletedSharedFoldersAndRecordsResponse(_message.Message): - __slots__ = ["sharedFolders", "sharedFolderRecords", "deletedRecordData", "usernames"] + __slots__ = ("sharedFolders", "sharedFolderRecords", "deletedRecordData", "usernames") SHAREDFOLDERS_FIELD_NUMBER: _ClassVar[int] SHAREDFOLDERRECORDS_FIELD_NUMBER: _ClassVar[int] DELETEDRECORDDATA_FIELD_NUMBER: _ClassVar[int] @@ -372,7 +481,7 @@ class GetDeletedSharedFoldersAndRecordsResponse(_message.Message): def __init__(self, sharedFolders: _Optional[_Iterable[_Union[DeletedSharedFolder, _Mapping]]] = ..., sharedFolderRecords: _Optional[_Iterable[_Union[DeletedSharedFolderRecord, _Mapping]]] = ..., deletedRecordData: _Optional[_Iterable[_Union[DeletedRecordData, _Mapping]]] = ..., usernames: _Optional[_Iterable[_Union[Username, _Mapping]]] = ...) -> None: ... class DeletedSharedFolder(_message.Message): - __slots__ = ["sharedFolderUid", "folderUid", "parentUid", "sharedFolderKey", "folderKeyType", "data", "dateDeleted", "revision"] + __slots__ = ("sharedFolderUid", "folderUid", "parentUid", "sharedFolderKey", "folderKeyType", "data", "dateDeleted", "revision") SHAREDFOLDERUID_FIELD_NUMBER: _ClassVar[int] FOLDERUID_FIELD_NUMBER: _ClassVar[int] PARENTUID_FIELD_NUMBER: _ClassVar[int] @@ -392,7 +501,7 @@ class DeletedSharedFolder(_message.Message): def __init__(self, sharedFolderUid: _Optional[bytes] = ..., folderUid: _Optional[bytes] = ..., parentUid: _Optional[bytes] = ..., sharedFolderKey: _Optional[bytes] = ..., folderKeyType: _Optional[_Union[_record_pb2.RecordKeyType, str]] = ..., data: _Optional[bytes] = ..., dateDeleted: _Optional[int] = ..., revision: _Optional[int] = ...) -> None: ... class DeletedSharedFolderRecord(_message.Message): - __slots__ = ["folderUid", "recordUid", "sharedRecordKey", "dateDeleted", "revision"] + __slots__ = ("folderUid", "recordUid", "sharedRecordKey", "dateDeleted", "revision") FOLDERUID_FIELD_NUMBER: _ClassVar[int] RECORDUID_FIELD_NUMBER: _ClassVar[int] SHAREDRECORDKEY_FIELD_NUMBER: _ClassVar[int] @@ -406,7 +515,7 @@ class DeletedSharedFolderRecord(_message.Message): def __init__(self, folderUid: _Optional[bytes] = ..., recordUid: _Optional[bytes] = ..., sharedRecordKey: _Optional[bytes] = ..., dateDeleted: _Optional[int] = ..., revision: _Optional[int] = ...) -> None: ... class DeletedRecordData(_message.Message): - __slots__ = ["recordUid", "ownerUid", "revision", "clientModifiedTime", "data", "version"] + __slots__ = ("recordUid", "ownerUid", "revision", "clientModifiedTime", "data", "version") RECORDUID_FIELD_NUMBER: _ClassVar[int] OWNERUID_FIELD_NUMBER: _ClassVar[int] REVISION_FIELD_NUMBER: _ClassVar[int] @@ -422,7 +531,7 @@ class DeletedRecordData(_message.Message): def __init__(self, recordUid: _Optional[bytes] = ..., ownerUid: _Optional[bytes] = ..., revision: _Optional[int] = ..., clientModifiedTime: _Optional[int] = ..., data: _Optional[bytes] = ..., version: _Optional[int] = ...) -> None: ... class Username(_message.Message): - __slots__ = ["accountUid", "username"] + __slots__ = ("accountUid", "username") ACCOUNTUID_FIELD_NUMBER: _ClassVar[int] USERNAME_FIELD_NUMBER: _ClassVar[int] accountUid: bytes @@ -430,7 +539,7 @@ class Username(_message.Message): def __init__(self, accountUid: _Optional[bytes] = ..., username: _Optional[str] = ...) -> None: ... class RestoreDeletedSharedFoldersAndRecordsRequest(_message.Message): - __slots__ = ["folders", "records"] + __slots__ = ("folders", "records") FOLDERS_FIELD_NUMBER: _ClassVar[int] RECORDS_FIELD_NUMBER: _ClassVar[int] folders: _containers.RepeatedCompositeFieldContainer[RestoreSharedObject] @@ -438,9 +547,353 @@ class RestoreDeletedSharedFoldersAndRecordsRequest(_message.Message): def __init__(self, folders: _Optional[_Iterable[_Union[RestoreSharedObject, _Mapping]]] = ..., records: _Optional[_Iterable[_Union[RestoreSharedObject, _Mapping]]] = ...) -> None: ... class RestoreSharedObject(_message.Message): - __slots__ = ["folderUid", "recordUids"] + __slots__ = ("folderUid", "recordUids") FOLDERUID_FIELD_NUMBER: _ClassVar[int] RECORDUIDS_FIELD_NUMBER: _ClassVar[int] folderUid: bytes recordUids: _containers.RepeatedScalarFieldContainer[bytes] def __init__(self, folderUid: _Optional[bytes] = ..., recordUids: _Optional[_Iterable[bytes]] = ...) -> None: ... + +class FolderData(_message.Message): + __slots__ = ("folderUid", "parentUid", "data", "type", "inheritUserPermissions", "folderKey", "ownerInfo", "dateCreated", "lastModified") + FOLDERUID_FIELD_NUMBER: _ClassVar[int] + PARENTUID_FIELD_NUMBER: _ClassVar[int] + DATA_FIELD_NUMBER: _ClassVar[int] + TYPE_FIELD_NUMBER: _ClassVar[int] + INHERITUSERPERMISSIONS_FIELD_NUMBER: _ClassVar[int] + FOLDERKEY_FIELD_NUMBER: _ClassVar[int] + OWNERINFO_FIELD_NUMBER: _ClassVar[int] + DATECREATED_FIELD_NUMBER: _ClassVar[int] + LASTMODIFIED_FIELD_NUMBER: _ClassVar[int] + folderUid: bytes + parentUid: bytes + data: bytes + type: FolderUsageType + inheritUserPermissions: SetBooleanValue + folderKey: bytes + ownerInfo: UserInfo + dateCreated: int + lastModified: int + def __init__(self, folderUid: _Optional[bytes] = ..., parentUid: _Optional[bytes] = ..., data: _Optional[bytes] = ..., type: _Optional[_Union[FolderUsageType, str]] = ..., inheritUserPermissions: _Optional[_Union[SetBooleanValue, str]] = ..., folderKey: _Optional[bytes] = ..., ownerInfo: _Optional[_Union[UserInfo, _Mapping]] = ..., dateCreated: _Optional[int] = ..., lastModified: _Optional[int] = ...) -> None: ... + +class FolderKey(_message.Message): + __slots__ = ("folderUid", "parentUid", "folderKey", "encryptedBy") + FOLDERUID_FIELD_NUMBER: _ClassVar[int] + PARENTUID_FIELD_NUMBER: _ClassVar[int] + FOLDERKEY_FIELD_NUMBER: _ClassVar[int] + ENCRYPTEDBY_FIELD_NUMBER: _ClassVar[int] + folderUid: bytes + parentUid: bytes + folderKey: bytes + encryptedBy: FolderKeyEncryptionType + def __init__(self, folderUid: _Optional[bytes] = ..., parentUid: _Optional[bytes] = ..., folderKey: _Optional[bytes] = ..., encryptedBy: _Optional[_Union[FolderKeyEncryptionType, str]] = ...) -> None: ... + +class FolderAddRequest(_message.Message): + __slots__ = ("folderData",) + FOLDERDATA_FIELD_NUMBER: _ClassVar[int] + folderData: _containers.RepeatedCompositeFieldContainer[FolderData] + def __init__(self, folderData: _Optional[_Iterable[_Union[FolderData, _Mapping]]] = ...) -> None: ... + +class FolderModifyResult(_message.Message): + __slots__ = ("folderUid", "status", "message") + FOLDERUID_FIELD_NUMBER: _ClassVar[int] + STATUS_FIELD_NUMBER: _ClassVar[int] + MESSAGE_FIELD_NUMBER: _ClassVar[int] + folderUid: bytes + status: FolderModifyStatus + message: str + def __init__(self, folderUid: _Optional[bytes] = ..., status: _Optional[_Union[FolderModifyStatus, str]] = ..., message: _Optional[str] = ...) -> None: ... + +class FolderAddResponse(_message.Message): + __slots__ = ("folderAddResults",) + FOLDERADDRESULTS_FIELD_NUMBER: _ClassVar[int] + folderAddResults: _containers.RepeatedCompositeFieldContainer[FolderModifyResult] + def __init__(self, folderAddResults: _Optional[_Iterable[_Union[FolderModifyResult, _Mapping]]] = ...) -> None: ... + +class FolderUpdateRequest(_message.Message): + __slots__ = ("folderData",) + FOLDERDATA_FIELD_NUMBER: _ClassVar[int] + folderData: _containers.RepeatedCompositeFieldContainer[FolderData] + def __init__(self, folderData: _Optional[_Iterable[_Union[FolderData, _Mapping]]] = ...) -> None: ... + +class FolderUpdateResponse(_message.Message): + __slots__ = ("folderUpdateResults",) + FOLDERUPDATERESULTS_FIELD_NUMBER: _ClassVar[int] + folderUpdateResults: _containers.RepeatedCompositeFieldContainer[FolderModifyResult] + def __init__(self, folderUpdateResults: _Optional[_Iterable[_Union[FolderModifyResult, _Mapping]]] = ...) -> None: ... + +class FolderPermissions(_message.Message): + __slots__ = ("canAdd", "canRemove", "canDelete", "canListAccess", "canUpdateAccess", "canChangeOwnership", "canEditRecords", "canViewRecords", "canApproveAccess", "canRequestAccess", "canUpdateSetting", "canListRecords", "canListFolders") + CANADD_FIELD_NUMBER: _ClassVar[int] + CANREMOVE_FIELD_NUMBER: _ClassVar[int] + CANDELETE_FIELD_NUMBER: _ClassVar[int] + CANLISTACCESS_FIELD_NUMBER: _ClassVar[int] + CANUPDATEACCESS_FIELD_NUMBER: _ClassVar[int] + CANCHANGEOWNERSHIP_FIELD_NUMBER: _ClassVar[int] + CANEDITRECORDS_FIELD_NUMBER: _ClassVar[int] + CANVIEWRECORDS_FIELD_NUMBER: _ClassVar[int] + CANAPPROVEACCESS_FIELD_NUMBER: _ClassVar[int] + CANREQUESTACCESS_FIELD_NUMBER: _ClassVar[int] + CANUPDATESETTING_FIELD_NUMBER: _ClassVar[int] + CANLISTRECORDS_FIELD_NUMBER: _ClassVar[int] + CANLISTFOLDERS_FIELD_NUMBER: _ClassVar[int] + canAdd: bool + canRemove: bool + canDelete: bool + canListAccess: bool + canUpdateAccess: bool + canChangeOwnership: bool + canEditRecords: bool + canViewRecords: bool + canApproveAccess: bool + canRequestAccess: bool + canUpdateSetting: bool + canListRecords: bool + canListFolders: bool + def __init__(self, canAdd: _Optional[bool] = ..., canRemove: _Optional[bool] = ..., canDelete: _Optional[bool] = ..., canListAccess: _Optional[bool] = ..., canUpdateAccess: _Optional[bool] = ..., canChangeOwnership: _Optional[bool] = ..., canEditRecords: _Optional[bool] = ..., canViewRecords: _Optional[bool] = ..., canApproveAccess: _Optional[bool] = ..., canRequestAccess: _Optional[bool] = ..., canUpdateSetting: _Optional[bool] = ..., canListRecords: _Optional[bool] = ..., canListFolders: _Optional[bool] = ...) -> None: ... + +class Capabilities(_message.Message): + __slots__ = ("canAdd", "canRemove", "canDelete", "canListAccess", "canUpdateAccess", "canChangeOwnership", "canEditRecords", "canViewRecords", "canApproveAccess", "canRequestAccess", "canUpdateSetting", "canListRecords", "canListFolders") + CANADD_FIELD_NUMBER: _ClassVar[int] + CANREMOVE_FIELD_NUMBER: _ClassVar[int] + CANDELETE_FIELD_NUMBER: _ClassVar[int] + CANLISTACCESS_FIELD_NUMBER: _ClassVar[int] + CANUPDATEACCESS_FIELD_NUMBER: _ClassVar[int] + CANCHANGEOWNERSHIP_FIELD_NUMBER: _ClassVar[int] + CANEDITRECORDS_FIELD_NUMBER: _ClassVar[int] + CANVIEWRECORDS_FIELD_NUMBER: _ClassVar[int] + CANAPPROVEACCESS_FIELD_NUMBER: _ClassVar[int] + CANREQUESTACCESS_FIELD_NUMBER: _ClassVar[int] + CANUPDATESETTING_FIELD_NUMBER: _ClassVar[int] + CANLISTRECORDS_FIELD_NUMBER: _ClassVar[int] + CANLISTFOLDERS_FIELD_NUMBER: _ClassVar[int] + canAdd: SetBooleanValue + canRemove: SetBooleanValue + canDelete: SetBooleanValue + canListAccess: SetBooleanValue + canUpdateAccess: SetBooleanValue + canChangeOwnership: SetBooleanValue + canEditRecords: SetBooleanValue + canViewRecords: SetBooleanValue + canApproveAccess: SetBooleanValue + canRequestAccess: SetBooleanValue + canUpdateSetting: SetBooleanValue + canListRecords: SetBooleanValue + canListFolders: SetBooleanValue + def __init__(self, canAdd: _Optional[_Union[SetBooleanValue, str]] = ..., canRemove: _Optional[_Union[SetBooleanValue, str]] = ..., canDelete: _Optional[_Union[SetBooleanValue, str]] = ..., canListAccess: _Optional[_Union[SetBooleanValue, str]] = ..., canUpdateAccess: _Optional[_Union[SetBooleanValue, str]] = ..., canChangeOwnership: _Optional[_Union[SetBooleanValue, str]] = ..., canEditRecords: _Optional[_Union[SetBooleanValue, str]] = ..., canViewRecords: _Optional[_Union[SetBooleanValue, str]] = ..., canApproveAccess: _Optional[_Union[SetBooleanValue, str]] = ..., canRequestAccess: _Optional[_Union[SetBooleanValue, str]] = ..., canUpdateSetting: _Optional[_Union[SetBooleanValue, str]] = ..., canListRecords: _Optional[_Union[SetBooleanValue, str]] = ..., canListFolders: _Optional[_Union[SetBooleanValue, str]] = ...) -> None: ... + +class FolderRecordUpdateRequest(_message.Message): + __slots__ = ("folderUid", "addRecords", "updateRecords", "removeRecords") + FOLDERUID_FIELD_NUMBER: _ClassVar[int] + ADDRECORDS_FIELD_NUMBER: _ClassVar[int] + UPDATERECORDS_FIELD_NUMBER: _ClassVar[int] + REMOVERECORDS_FIELD_NUMBER: _ClassVar[int] + folderUid: bytes + addRecords: _containers.RepeatedCompositeFieldContainer[RecordMetadata] + updateRecords: _containers.RepeatedCompositeFieldContainer[RecordMetadata] + removeRecords: _containers.RepeatedCompositeFieldContainer[RecordMetadata] + def __init__(self, folderUid: _Optional[bytes] = ..., addRecords: _Optional[_Iterable[_Union[RecordMetadata, _Mapping]]] = ..., updateRecords: _Optional[_Iterable[_Union[RecordMetadata, _Mapping]]] = ..., removeRecords: _Optional[_Iterable[_Union[RecordMetadata, _Mapping]]] = ...) -> None: ... + +class RecordMetadata(_message.Message): + __slots__ = ("recordUid", "encryptedRecordKey", "encryptedRecordKeyType", "tlaProperties") + RECORDUID_FIELD_NUMBER: _ClassVar[int] + ENCRYPTEDRECORDKEY_FIELD_NUMBER: _ClassVar[int] + ENCRYPTEDRECORDKEYTYPE_FIELD_NUMBER: _ClassVar[int] + TLAPROPERTIES_FIELD_NUMBER: _ClassVar[int] + recordUid: bytes + encryptedRecordKey: bytes + encryptedRecordKeyType: EncryptedKeyType + tlaProperties: _tla_pb2.TLAProperties + def __init__(self, recordUid: _Optional[bytes] = ..., encryptedRecordKey: _Optional[bytes] = ..., encryptedRecordKeyType: _Optional[_Union[EncryptedKeyType, str]] = ..., tlaProperties: _Optional[_Union[_tla_pb2.TLAProperties, _Mapping]] = ...) -> None: ... + +class FolderRecord(_message.Message): + __slots__ = ("folderUid", "recordMetadata", "folderKeyEncryptionType") + FOLDERUID_FIELD_NUMBER: _ClassVar[int] + RECORDMETADATA_FIELD_NUMBER: _ClassVar[int] + FOLDERKEYENCRYPTIONTYPE_FIELD_NUMBER: _ClassVar[int] + folderUid: bytes + recordMetadata: RecordMetadata + folderKeyEncryptionType: FolderKeyEncryptionType + def __init__(self, folderUid: _Optional[bytes] = ..., recordMetadata: _Optional[_Union[RecordMetadata, _Mapping]] = ..., folderKeyEncryptionType: _Optional[_Union[FolderKeyEncryptionType, str]] = ...) -> None: ... + +class FolderRecordUpdateResponse(_message.Message): + __slots__ = ("folderUid", "folderRecordUpdateResult") + FOLDERUID_FIELD_NUMBER: _ClassVar[int] + FOLDERRECORDUPDATERESULT_FIELD_NUMBER: _ClassVar[int] + folderUid: bytes + folderRecordUpdateResult: _containers.RepeatedCompositeFieldContainer[FolderRecordUpdateResult] + def __init__(self, folderUid: _Optional[bytes] = ..., folderRecordUpdateResult: _Optional[_Iterable[_Union[FolderRecordUpdateResult, _Mapping]]] = ...) -> None: ... + +class FolderRecordUpdateResult(_message.Message): + __slots__ = ("recordUid", "status", "message") + RECORDUID_FIELD_NUMBER: _ClassVar[int] + STATUS_FIELD_NUMBER: _ClassVar[int] + MESSAGE_FIELD_NUMBER: _ClassVar[int] + recordUid: bytes + status: FolderModifyStatus + message: str + def __init__(self, recordUid: _Optional[bytes] = ..., status: _Optional[_Union[FolderModifyStatus, str]] = ..., message: _Optional[str] = ...) -> None: ... + +class FolderAccessData(_message.Message): + __slots__ = ("folderUid", "accessTypeUid", "accessType", "accessRoleType", "folderKey", "inherited", "hidden", "permissions", "tlaProperties", "dateCreated", "lastModified", "deniedAccess") + FOLDERUID_FIELD_NUMBER: _ClassVar[int] + ACCESSTYPEUID_FIELD_NUMBER: _ClassVar[int] + ACCESSTYPE_FIELD_NUMBER: _ClassVar[int] + ACCESSROLETYPE_FIELD_NUMBER: _ClassVar[int] + FOLDERKEY_FIELD_NUMBER: _ClassVar[int] + INHERITED_FIELD_NUMBER: _ClassVar[int] + HIDDEN_FIELD_NUMBER: _ClassVar[int] + PERMISSIONS_FIELD_NUMBER: _ClassVar[int] + TLAPROPERTIES_FIELD_NUMBER: _ClassVar[int] + DATECREATED_FIELD_NUMBER: _ClassVar[int] + LASTMODIFIED_FIELD_NUMBER: _ClassVar[int] + DENIEDACCESS_FIELD_NUMBER: _ClassVar[int] + folderUid: bytes + accessTypeUid: bytes + accessType: AccessType + accessRoleType: AccessRoleType + folderKey: EncryptedDataKey + inherited: bool + hidden: bool + permissions: FolderPermissions + tlaProperties: _tla_pb2.TLAProperties + dateCreated: int + lastModified: int + deniedAccess: bool + def __init__(self, folderUid: _Optional[bytes] = ..., accessTypeUid: _Optional[bytes] = ..., accessType: _Optional[_Union[AccessType, str]] = ..., accessRoleType: _Optional[_Union[AccessRoleType, str]] = ..., folderKey: _Optional[_Union[EncryptedDataKey, _Mapping]] = ..., inherited: _Optional[bool] = ..., hidden: _Optional[bool] = ..., permissions: _Optional[_Union[FolderPermissions, _Mapping]] = ..., tlaProperties: _Optional[_Union[_tla_pb2.TLAProperties, _Mapping]] = ..., dateCreated: _Optional[int] = ..., lastModified: _Optional[int] = ..., deniedAccess: _Optional[bool] = ...) -> None: ... + +class RevokedAccess(_message.Message): + __slots__ = ("folderUid", "actorUid", "accessType") + FOLDERUID_FIELD_NUMBER: _ClassVar[int] + ACTORUID_FIELD_NUMBER: _ClassVar[int] + ACCESSTYPE_FIELD_NUMBER: _ClassVar[int] + folderUid: bytes + actorUid: bytes + accessType: AccessType + def __init__(self, folderUid: _Optional[bytes] = ..., actorUid: _Optional[bytes] = ..., accessType: _Optional[_Union[AccessType, str]] = ...) -> None: ... + +class FolderRemoved(_message.Message): + __slots__ = ("folder_uid",) + FOLDER_UID_FIELD_NUMBER: _ClassVar[int] + folder_uid: bytes + def __init__(self, folder_uid: _Optional[bytes] = ...) -> None: ... + +class RecordAccessData(_message.Message): + __slots__ = ("accessTypeUid", "accessType", "recordUid", "accessRoleType", "owner", "inherited", "hidden", "deniedAccess", "can_view_title", "can_edit", "can_view", "can_list_access", "can_update_access", "can_delete", "can_change_ownership", "can_request_access", "can_approve_access", "dateCreated", "lastModified", "tlaProperties") + ACCESSTYPEUID_FIELD_NUMBER: _ClassVar[int] + ACCESSTYPE_FIELD_NUMBER: _ClassVar[int] + RECORDUID_FIELD_NUMBER: _ClassVar[int] + ACCESSROLETYPE_FIELD_NUMBER: _ClassVar[int] + OWNER_FIELD_NUMBER: _ClassVar[int] + INHERITED_FIELD_NUMBER: _ClassVar[int] + HIDDEN_FIELD_NUMBER: _ClassVar[int] + DENIEDACCESS_FIELD_NUMBER: _ClassVar[int] + CAN_VIEW_TITLE_FIELD_NUMBER: _ClassVar[int] + CAN_EDIT_FIELD_NUMBER: _ClassVar[int] + CAN_VIEW_FIELD_NUMBER: _ClassVar[int] + CAN_LIST_ACCESS_FIELD_NUMBER: _ClassVar[int] + CAN_UPDATE_ACCESS_FIELD_NUMBER: _ClassVar[int] + CAN_DELETE_FIELD_NUMBER: _ClassVar[int] + CAN_CHANGE_OWNERSHIP_FIELD_NUMBER: _ClassVar[int] + CAN_REQUEST_ACCESS_FIELD_NUMBER: _ClassVar[int] + CAN_APPROVE_ACCESS_FIELD_NUMBER: _ClassVar[int] + DATECREATED_FIELD_NUMBER: _ClassVar[int] + LASTMODIFIED_FIELD_NUMBER: _ClassVar[int] + TLAPROPERTIES_FIELD_NUMBER: _ClassVar[int] + accessTypeUid: bytes + accessType: AccessType + recordUid: bytes + accessRoleType: AccessRoleType + owner: bool + inherited: bool + hidden: bool + deniedAccess: bool + can_view_title: bool + can_edit: bool + can_view: bool + can_list_access: bool + can_update_access: bool + can_delete: bool + can_change_ownership: bool + can_request_access: bool + can_approve_access: bool + dateCreated: int + lastModified: int + tlaProperties: _tla_pb2.TLAProperties + def __init__(self, accessTypeUid: _Optional[bytes] = ..., accessType: _Optional[_Union[AccessType, str]] = ..., recordUid: _Optional[bytes] = ..., accessRoleType: _Optional[_Union[AccessRoleType, str]] = ..., owner: _Optional[bool] = ..., inherited: _Optional[bool] = ..., hidden: _Optional[bool] = ..., deniedAccess: _Optional[bool] = ..., can_view_title: _Optional[bool] = ..., can_edit: _Optional[bool] = ..., can_view: _Optional[bool] = ..., can_list_access: _Optional[bool] = ..., can_update_access: _Optional[bool] = ..., can_delete: _Optional[bool] = ..., can_change_ownership: _Optional[bool] = ..., can_request_access: _Optional[bool] = ..., can_approve_access: _Optional[bool] = ..., dateCreated: _Optional[int] = ..., lastModified: _Optional[int] = ..., tlaProperties: _Optional[_Union[_tla_pb2.TLAProperties, _Mapping]] = ...) -> None: ... + +class AccessData(_message.Message): + __slots__ = ("accessTypeUid", "accessRoleType", "deniedAccess", "inherited", "hidden", "capabilities") + ACCESSTYPEUID_FIELD_NUMBER: _ClassVar[int] + ACCESSROLETYPE_FIELD_NUMBER: _ClassVar[int] + DENIEDACCESS_FIELD_NUMBER: _ClassVar[int] + INHERITED_FIELD_NUMBER: _ClassVar[int] + HIDDEN_FIELD_NUMBER: _ClassVar[int] + CAPABILITIES_FIELD_NUMBER: _ClassVar[int] + accessTypeUid: bytes + accessRoleType: AccessRoleType + deniedAccess: bool + inherited: bool + hidden: bool + capabilities: Capabilities + def __init__(self, accessTypeUid: _Optional[bytes] = ..., accessRoleType: _Optional[_Union[AccessRoleType, str]] = ..., deniedAccess: _Optional[bool] = ..., inherited: _Optional[bool] = ..., hidden: _Optional[bool] = ..., capabilities: _Optional[_Union[Capabilities, _Mapping]] = ...) -> None: ... + +class FolderAccessRequest(_message.Message): + __slots__ = ("folderAccessAdds", "folderAccessUpdates", "folderAccessRemoves") + FOLDERACCESSADDS_FIELD_NUMBER: _ClassVar[int] + FOLDERACCESSUPDATES_FIELD_NUMBER: _ClassVar[int] + FOLDERACCESSREMOVES_FIELD_NUMBER: _ClassVar[int] + folderAccessAdds: _containers.RepeatedCompositeFieldContainer[FolderAccessData] + folderAccessUpdates: _containers.RepeatedCompositeFieldContainer[FolderAccessData] + folderAccessRemoves: _containers.RepeatedCompositeFieldContainer[FolderAccessData] + def __init__(self, folderAccessAdds: _Optional[_Iterable[_Union[FolderAccessData, _Mapping]]] = ..., folderAccessUpdates: _Optional[_Iterable[_Union[FolderAccessData, _Mapping]]] = ..., folderAccessRemoves: _Optional[_Iterable[_Union[FolderAccessData, _Mapping]]] = ...) -> None: ... + +class FolderAccessResult(_message.Message): + __slots__ = ("folderUid", "accessUid", "accessType", "status", "message") + FOLDERUID_FIELD_NUMBER: _ClassVar[int] + ACCESSUID_FIELD_NUMBER: _ClassVar[int] + ACCESSTYPE_FIELD_NUMBER: _ClassVar[int] + STATUS_FIELD_NUMBER: _ClassVar[int] + MESSAGE_FIELD_NUMBER: _ClassVar[int] + folderUid: bytes + accessUid: bytes + accessType: AccessType + status: FolderModifyStatus + message: str + def __init__(self, folderUid: _Optional[bytes] = ..., accessUid: _Optional[bytes] = ..., accessType: _Optional[_Union[AccessType, str]] = ..., status: _Optional[_Union[FolderModifyStatus, str]] = ..., message: _Optional[str] = ...) -> None: ... + +class FolderAccessResponse(_message.Message): + __slots__ = ("folderAccessResults",) + FOLDERACCESSRESULTS_FIELD_NUMBER: _ClassVar[int] + folderAccessResults: _containers.RepeatedCompositeFieldContainer[FolderAccessResult] + def __init__(self, folderAccessResults: _Optional[_Iterable[_Union[FolderAccessResult, _Mapping]]] = ...) -> None: ... + +class UserInfo(_message.Message): + __slots__ = ("accountUid", "username") + ACCOUNTUID_FIELD_NUMBER: _ClassVar[int] + USERNAME_FIELD_NUMBER: _ClassVar[int] + accountUid: bytes + username: str + def __init__(self, accountUid: _Optional[bytes] = ..., username: _Optional[str] = ...) -> None: ... + +class RecordData(_message.Message): + __slots__ = ("user", "data", "recordUid") + USER_FIELD_NUMBER: _ClassVar[int] + DATA_FIELD_NUMBER: _ClassVar[int] + RECORDUID_FIELD_NUMBER: _ClassVar[int] + user: UserInfo + data: bytes + recordUid: bytes + def __init__(self, user: _Optional[_Union[UserInfo, _Mapping]] = ..., data: _Optional[bytes] = ..., recordUid: _Optional[bytes] = ...) -> None: ... + +class RecordKey(_message.Message): + __slots__ = ("user_uid", "record_uid", "record_key", "encrypted_key_type") + USER_UID_FIELD_NUMBER: _ClassVar[int] + RECORD_UID_FIELD_NUMBER: _ClassVar[int] + RECORD_KEY_FIELD_NUMBER: _ClassVar[int] + ENCRYPTED_KEY_TYPE_FIELD_NUMBER: _ClassVar[int] + user_uid: bytes + record_uid: bytes + record_key: bytes + encrypted_key_type: EncryptedKeyType + def __init__(self, user_uid: _Optional[bytes] = ..., record_uid: _Optional[bytes] = ..., record_key: _Optional[bytes] = ..., encrypted_key_type: _Optional[_Union[EncryptedKeyType, str]] = ...) -> None: ... diff --git a/keepercommander/proto/pagination_pb2.py b/keepercommander/proto/pagination_pb2.py new file mode 100644 index 000000000..e74c67e39 --- /dev/null +++ b/keepercommander/proto/pagination_pb2.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: pagination.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x10pagination.proto\x12\x11keeper.api.common\"A\n\x04Page\x12\x12\n\npageNumber\x18\x01 \x01(\x05\x12\x10\n\x08pageSize\x18\x02 \x01(\x05\x12\x13\n\x0b\x63ursorToken\x18\x03 \x01(\t\"j\n\x08PageInfo\x12\x12\n\npageNumber\x18\x01 \x01(\x05\x12\x10\n\x08pageSize\x18\x02 \x01(\x05\x12\x12\n\ntotalCount\x18\x03 \x01(\x05\x12\x0f\n\x07hasMore\x18\x04 \x01(\x08\x12\x13\n\x0b\x63ursorToken\x18\x05 \x01(\tB\'\n#com.keepersecurity.proto.api.commonP\x01\x62\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'pagination_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n#com.keepersecurity.proto.api.commonP\001' + _globals['_PAGE']._serialized_start=39 + _globals['_PAGE']._serialized_end=104 + _globals['_PAGEINFO']._serialized_start=106 + _globals['_PAGEINFO']._serialized_end=212 +# @@protoc_insertion_point(module_scope) diff --git a/keepercommander/proto/pagination_pb2.pyi b/keepercommander/proto/pagination_pb2.pyi new file mode 100644 index 000000000..08c2e73a5 --- /dev/null +++ b/keepercommander/proto/pagination_pb2.pyi @@ -0,0 +1,29 @@ +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Optional as _Optional + +DESCRIPTOR: _descriptor.FileDescriptor + +class Page(_message.Message): + __slots__ = ("pageNumber", "pageSize", "cursorToken") + PAGENUMBER_FIELD_NUMBER: _ClassVar[int] + PAGESIZE_FIELD_NUMBER: _ClassVar[int] + CURSORTOKEN_FIELD_NUMBER: _ClassVar[int] + pageNumber: int + pageSize: int + cursorToken: str + def __init__(self, pageNumber: _Optional[int] = ..., pageSize: _Optional[int] = ..., cursorToken: _Optional[str] = ...) -> None: ... + +class PageInfo(_message.Message): + __slots__ = ("pageNumber", "pageSize", "totalCount", "hasMore", "cursorToken") + PAGENUMBER_FIELD_NUMBER: _ClassVar[int] + PAGESIZE_FIELD_NUMBER: _ClassVar[int] + TOTALCOUNT_FIELD_NUMBER: _ClassVar[int] + HASMORE_FIELD_NUMBER: _ClassVar[int] + CURSORTOKEN_FIELD_NUMBER: _ClassVar[int] + pageNumber: int + pageSize: int + totalCount: int + hasMore: bool + cursorToken: str + def __init__(self, pageNumber: _Optional[int] = ..., pageSize: _Optional[int] = ..., totalCount: _Optional[int] = ..., hasMore: _Optional[bool] = ..., cursorToken: _Optional[str] = ...) -> None: ... diff --git a/keepercommander/proto/record_details_pb2.py b/keepercommander/proto/record_details_pb2.py new file mode 100644 index 000000000..6f12d0cbb --- /dev/null +++ b/keepercommander/proto/record_details_pb2.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: record_details.proto +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from . import folder_pb2 as folder__pb2 +from . import record_pb2 as record__pb2 +from . import pagination_pb2 as pagination__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x14record_details.proto\x12\x11record.v3.details\x1a\x1cgoogle/api/annotations.proto\x1a\x0c\x66older.proto\x1a\x0crecord.proto\x1a\x10pagination.proto\";\n\x11RecordDataRequest\x12\x12\n\nclientTime\x18\x01 \x01(\x03\x12\x12\n\nrecordUids\x18\x03 \x03(\x0c\"Q\n\x12RecordDataResponse\x12!\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32\x13.Records.RecordData\x12\x18\n\x10\x66orbiddenRecords\x18\x02 \x03(\x0c\"P\n\x13RecordAccessRequest\x12\x12\n\nrecordUids\x18\x03 \x03(\x0c\x12%\n\x04page\x18\x02 \x01(\x0b\x32\x17.keeper.api.common.Page\"\x98\x01\n\x14RecordAccessResponse\x12\x37\n\x0erecordAccesses\x18\x01 \x03(\x0b\x32\x1f.record.v3.details.RecordAccess\x12\x18\n\x10\x66orbiddenRecords\x18\x02 \x03(\x0c\x12-\n\x08pageInfo\x18\x03 \x01(\x0b\x32\x1b.keeper.api.common.PageInfo\"m\n\x1cRecordAccessorDetailsRequest\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x13\n\x0b\x61\x63\x63\x65ssorUid\x18\x02 \x01(\x0c\x12%\n\x04page\x18\x03 \x01(\x0b\x32\x17.keeper.api.common.Page\"\xb6\x01\n\x1dRecordAccessorDetailsResponse\x12\x32\n\x10recordAccessData\x18\x01 \x01(\x0b\x32\x18.Folder.RecordAccessData\x12\x32\n\x10\x66olderAccessData\x18\x02 \x03(\x0b\x32\x18.Folder.FolderAccessData\x12-\n\x08pageInfo\x18\x03 \x01(\x0b\x32\x1b.keeper.api.common.PageInfo\"m\n\x0cRecordAccess\x12&\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32\x18.Folder.RecordAccessData\x12\x35\n\x0c\x61\x63\x63\x65ssorInfo\x18\x02 \x01(\x0b\x32\x1f.record.v3.details.AccessorInfo\"\x1c\n\x0c\x41\x63\x63\x65ssorInfo\x12\x0c\n\x04name\x18\x01 \x01(\t2\x86\x04\n\x14RecordDetailsService\x12\x90\x01\n\rGetRecordData\x12$.record.v3.details.RecordDataRequest\x1a%.record.v3.details.RecordDataResponse\"2\x82\xd3\xe4\x93\x02,\"\'/api/rest/vault/records/v3/details/data:\x01*\x12\x9b\x01\n\x12GetRecordAccessors\x12&.record.v3.details.RecordAccessRequest\x1a\'.record.v3.details.RecordAccessResponse\"4\x82\xd3\xe4\x93\x02.\")/api/rest/vault/records/v3/details/access:\x01*\x12\xbc\x01\n\x18GetRecordAccessorDetails\x12/.record.v3.details.RecordAccessorDetailsRequest\x1a\x30.record.v3.details.RecordAccessorDetailsResponse\"=\x82\xd3\xe4\x93\x02\x37\"2/api/rest/vault/records/v3/details/access/accessor:\x01*B2\n.com.keepersecurity.proto.api.record.v3.detailsP\x01\x62\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'record_details_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n.com.keepersecurity.proto.api.record.v3.detailsP\001' + _globals['_RECORDDETAILSSERVICE'].methods_by_name['GetRecordData']._loaded_options = None + _globals['_RECORDDETAILSSERVICE'].methods_by_name['GetRecordData']._serialized_options = b'\202\323\344\223\002,\"\'/api/rest/vault/records/v3/details/data:\001*' + _globals['_RECORDDETAILSSERVICE'].methods_by_name['GetRecordAccessors']._loaded_options = None + _globals['_RECORDDETAILSSERVICE'].methods_by_name['GetRecordAccessors']._serialized_options = b'\202\323\344\223\002.\")/api/rest/vault/records/v3/details/access:\001*' + _globals['_RECORDDETAILSSERVICE'].methods_by_name['GetRecordAccessorDetails']._loaded_options = None + _globals['_RECORDDETAILSSERVICE'].methods_by_name['GetRecordAccessorDetails']._serialized_options = b'\202\323\344\223\0027\"2/api/rest/vault/records/v3/details/access/accessor:\001*' + _globals['_RECORDDATAREQUEST']._serialized_start=119 + _globals['_RECORDDATAREQUEST']._serialized_end=178 + _globals['_RECORDDATARESPONSE']._serialized_start=180 + _globals['_RECORDDATARESPONSE']._serialized_end=261 + _globals['_RECORDACCESSREQUEST']._serialized_start=263 + _globals['_RECORDACCESSREQUEST']._serialized_end=343 + _globals['_RECORDACCESSRESPONSE']._serialized_start=346 + _globals['_RECORDACCESSRESPONSE']._serialized_end=498 + _globals['_RECORDACCESSORDETAILSREQUEST']._serialized_start=500 + _globals['_RECORDACCESSORDETAILSREQUEST']._serialized_end=609 + _globals['_RECORDACCESSORDETAILSRESPONSE']._serialized_start=612 + _globals['_RECORDACCESSORDETAILSRESPONSE']._serialized_end=794 + _globals['_RECORDACCESS']._serialized_start=796 + _globals['_RECORDACCESS']._serialized_end=905 + _globals['_ACCESSORINFO']._serialized_start=907 + _globals['_ACCESSORINFO']._serialized_end=935 + _globals['_RECORDDETAILSSERVICE']._serialized_start=938 + _globals['_RECORDDETAILSSERVICE']._serialized_end=1456 +# @@protoc_insertion_point(module_scope) diff --git a/keepercommander/proto/record_details_pb2.pyi b/keepercommander/proto/record_details_pb2.pyi new file mode 100644 index 000000000..50f7eefce --- /dev/null +++ b/keepercommander/proto/record_details_pb2.pyi @@ -0,0 +1,79 @@ +from google.api import annotations_pb2 as _annotations_pb2 +import folder_pb2 as _folder_pb2 +import record_pb2 as _record_pb2 +import pagination_pb2 as _pagination_pb2 +from google.protobuf.internal import containers as _containers +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from collections.abc import Iterable as _Iterable, Mapping as _Mapping +from typing import ClassVar as _ClassVar, Optional as _Optional, Union as _Union + +DESCRIPTOR: _descriptor.FileDescriptor + +class RecordDataRequest(_message.Message): + __slots__ = ("clientTime", "recordUids") + CLIENTTIME_FIELD_NUMBER: _ClassVar[int] + RECORDUIDS_FIELD_NUMBER: _ClassVar[int] + clientTime: int + recordUids: _containers.RepeatedScalarFieldContainer[bytes] + def __init__(self, clientTime: _Optional[int] = ..., recordUids: _Optional[_Iterable[bytes]] = ...) -> None: ... + +class RecordDataResponse(_message.Message): + __slots__ = ("data", "forbiddenRecords") + DATA_FIELD_NUMBER: _ClassVar[int] + FORBIDDENRECORDS_FIELD_NUMBER: _ClassVar[int] + data: _containers.RepeatedCompositeFieldContainer[_record_pb2.RecordData] + forbiddenRecords: _containers.RepeatedScalarFieldContainer[bytes] + def __init__(self, data: _Optional[_Iterable[_Union[_record_pb2.RecordData, _Mapping]]] = ..., forbiddenRecords: _Optional[_Iterable[bytes]] = ...) -> None: ... + +class RecordAccessRequest(_message.Message): + __slots__ = ("recordUids", "page") + RECORDUIDS_FIELD_NUMBER: _ClassVar[int] + PAGE_FIELD_NUMBER: _ClassVar[int] + recordUids: _containers.RepeatedScalarFieldContainer[bytes] + page: _pagination_pb2.Page + def __init__(self, recordUids: _Optional[_Iterable[bytes]] = ..., page: _Optional[_Union[_pagination_pb2.Page, _Mapping]] = ...) -> None: ... + +class RecordAccessResponse(_message.Message): + __slots__ = ("recordAccesses", "forbiddenRecords", "pageInfo") + RECORDACCESSES_FIELD_NUMBER: _ClassVar[int] + FORBIDDENRECORDS_FIELD_NUMBER: _ClassVar[int] + PAGEINFO_FIELD_NUMBER: _ClassVar[int] + recordAccesses: _containers.RepeatedCompositeFieldContainer[RecordAccess] + forbiddenRecords: _containers.RepeatedScalarFieldContainer[bytes] + pageInfo: _pagination_pb2.PageInfo + def __init__(self, recordAccesses: _Optional[_Iterable[_Union[RecordAccess, _Mapping]]] = ..., forbiddenRecords: _Optional[_Iterable[bytes]] = ..., pageInfo: _Optional[_Union[_pagination_pb2.PageInfo, _Mapping]] = ...) -> None: ... + +class RecordAccessorDetailsRequest(_message.Message): + __slots__ = ("recordUid", "accessorUid", "page") + RECORDUID_FIELD_NUMBER: _ClassVar[int] + ACCESSORUID_FIELD_NUMBER: _ClassVar[int] + PAGE_FIELD_NUMBER: _ClassVar[int] + recordUid: bytes + accessorUid: bytes + page: _pagination_pb2.Page + def __init__(self, recordUid: _Optional[bytes] = ..., accessorUid: _Optional[bytes] = ..., page: _Optional[_Union[_pagination_pb2.Page, _Mapping]] = ...) -> None: ... + +class RecordAccessorDetailsResponse(_message.Message): + __slots__ = ("recordAccessData", "folderAccessData", "pageInfo") + RECORDACCESSDATA_FIELD_NUMBER: _ClassVar[int] + FOLDERACCESSDATA_FIELD_NUMBER: _ClassVar[int] + PAGEINFO_FIELD_NUMBER: _ClassVar[int] + recordAccessData: _folder_pb2.RecordAccessData + folderAccessData: _containers.RepeatedCompositeFieldContainer[_folder_pb2.FolderAccessData] + pageInfo: _pagination_pb2.PageInfo + def __init__(self, recordAccessData: _Optional[_Union[_folder_pb2.RecordAccessData, _Mapping]] = ..., folderAccessData: _Optional[_Iterable[_Union[_folder_pb2.FolderAccessData, _Mapping]]] = ..., pageInfo: _Optional[_Union[_pagination_pb2.PageInfo, _Mapping]] = ...) -> None: ... + +class RecordAccess(_message.Message): + __slots__ = ("data", "accessorInfo") + DATA_FIELD_NUMBER: _ClassVar[int] + ACCESSORINFO_FIELD_NUMBER: _ClassVar[int] + data: _folder_pb2.RecordAccessData + accessorInfo: AccessorInfo + def __init__(self, data: _Optional[_Union[_folder_pb2.RecordAccessData, _Mapping]] = ..., accessorInfo: _Optional[_Union[AccessorInfo, _Mapping]] = ...) -> None: ... + +class AccessorInfo(_message.Message): + __slots__ = ("name",) + NAME_FIELD_NUMBER: _ClassVar[int] + name: str + def __init__(self, name: _Optional[str] = ...) -> None: ... diff --git a/keepercommander/proto/record_endpoints_pb2.py b/keepercommander/proto/record_endpoints_pb2.py new file mode 100644 index 000000000..51cae6af3 --- /dev/null +++ b/keepercommander/proto/record_endpoints_pb2.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: record_endpoints.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import record_pb2 as record__pb2 +from . import folder_pb2 as folder__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x16record_endpoints.proto\x12\trecord.v3\x1a\x0crecord.proto\x1a\x0c\x66older.proto\"\x83\x01\n\x11RecordsAddRequest\x12%\n\x07records\x18\x01 \x03(\x0b\x32\x14.record.v3.RecordAdd\x12\x12\n\nclientTime\x18\x02 \x01(\x03\x12\x33\n\x13securityDataKeyType\x18\x03 \x01(\x0e\x32\x16.Records.RecordKeyType\"\xa8\x03\n\tRecordAdd\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x11\n\trecordKey\x18\x02 \x01(\x0c\x12/\n\rrecordKeyType\x18\x03 \x01(\x0e\x32\x18.Folder.EncryptedKeyType\x12=\n\x14recordKeyEncryptedBy\x18\x04 \x01(\x0e\x32\x1f.Folder.FolderKeyEncryptionType\x12\x1a\n\x12\x63lientModifiedTime\x18\x05 \x01(\x03\x12\x0c\n\x04\x64\x61ta\x18\x06 \x01(\x0c\x12\x15\n\rnonSharedData\x18\x07 \x01(\x0c\x12\x11\n\tfolderUid\x18\x08 \x01(\x0c\x12(\n\x0brecordLinks\x18\t \x03(\x0b\x32\x13.Records.RecordLink\x12#\n\x05\x61udit\x18\n \x01(\x0b\x32\x14.Records.RecordAudit\x12+\n\x0csecurityData\x18\x0b \x01(\x0b\x32\x15.Records.SecurityData\x12\x35\n\x11securityScoreData\x18\x0c \x01(\x0b\x32\x1a.Records.SecurityScoreDataB*\n&com.keepersecurity.proto.api.record.v3P\x01\x62\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'record_endpoints_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n&com.keepersecurity.proto.api.record.v3P\001' + _globals['_RECORDSADDREQUEST']._serialized_start=66 + _globals['_RECORDSADDREQUEST']._serialized_end=197 + _globals['_RECORDADD']._serialized_start=200 + _globals['_RECORDADD']._serialized_end=624 +# @@protoc_insertion_point(module_scope) diff --git a/keepercommander/proto/record_endpoints_pb2.pyi b/keepercommander/proto/record_endpoints_pb2.pyi new file mode 100644 index 000000000..f022be90d --- /dev/null +++ b/keepercommander/proto/record_endpoints_pb2.pyi @@ -0,0 +1,47 @@ +import record_pb2 as _record_pb2 +import folder_pb2 as _folder_pb2 +from google.protobuf.internal import containers as _containers +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from collections.abc import Iterable as _Iterable, Mapping as _Mapping +from typing import ClassVar as _ClassVar, Optional as _Optional, Union as _Union + +DESCRIPTOR: _descriptor.FileDescriptor + +class RecordsAddRequest(_message.Message): + __slots__ = ("records", "clientTime", "securityDataKeyType") + RECORDS_FIELD_NUMBER: _ClassVar[int] + CLIENTTIME_FIELD_NUMBER: _ClassVar[int] + SECURITYDATAKEYTYPE_FIELD_NUMBER: _ClassVar[int] + records: _containers.RepeatedCompositeFieldContainer[RecordAdd] + clientTime: int + securityDataKeyType: _record_pb2.RecordKeyType + def __init__(self, records: _Optional[_Iterable[_Union[RecordAdd, _Mapping]]] = ..., clientTime: _Optional[int] = ..., securityDataKeyType: _Optional[_Union[_record_pb2.RecordKeyType, str]] = ...) -> None: ... + +class RecordAdd(_message.Message): + __slots__ = ("recordUid", "recordKey", "recordKeyType", "recordKeyEncryptedBy", "clientModifiedTime", "data", "nonSharedData", "folderUid", "recordLinks", "audit", "securityData", "securityScoreData") + RECORDUID_FIELD_NUMBER: _ClassVar[int] + RECORDKEY_FIELD_NUMBER: _ClassVar[int] + RECORDKEYTYPE_FIELD_NUMBER: _ClassVar[int] + RECORDKEYENCRYPTEDBY_FIELD_NUMBER: _ClassVar[int] + CLIENTMODIFIEDTIME_FIELD_NUMBER: _ClassVar[int] + DATA_FIELD_NUMBER: _ClassVar[int] + NONSHAREDDATA_FIELD_NUMBER: _ClassVar[int] + FOLDERUID_FIELD_NUMBER: _ClassVar[int] + RECORDLINKS_FIELD_NUMBER: _ClassVar[int] + AUDIT_FIELD_NUMBER: _ClassVar[int] + SECURITYDATA_FIELD_NUMBER: _ClassVar[int] + SECURITYSCOREDATA_FIELD_NUMBER: _ClassVar[int] + recordUid: bytes + recordKey: bytes + recordKeyType: _folder_pb2.EncryptedKeyType + recordKeyEncryptedBy: _folder_pb2.FolderKeyEncryptionType + clientModifiedTime: int + data: bytes + nonSharedData: bytes + folderUid: bytes + recordLinks: _containers.RepeatedCompositeFieldContainer[_record_pb2.RecordLink] + audit: _record_pb2.RecordAudit + securityData: _record_pb2.SecurityData + securityScoreData: _record_pb2.SecurityScoreData + def __init__(self, recordUid: _Optional[bytes] = ..., recordKey: _Optional[bytes] = ..., recordKeyType: _Optional[_Union[_folder_pb2.EncryptedKeyType, str]] = ..., recordKeyEncryptedBy: _Optional[_Union[_folder_pb2.FolderKeyEncryptionType, str]] = ..., clientModifiedTime: _Optional[int] = ..., data: _Optional[bytes] = ..., nonSharedData: _Optional[bytes] = ..., folderUid: _Optional[bytes] = ..., recordLinks: _Optional[_Iterable[_Union[_record_pb2.RecordLink, _Mapping]]] = ..., audit: _Optional[_Union[_record_pb2.RecordAudit, _Mapping]] = ..., securityData: _Optional[_Union[_record_pb2.SecurityData, _Mapping]] = ..., securityScoreData: _Optional[_Union[_record_pb2.SecurityScoreData, _Mapping]] = ...) -> None: ... diff --git a/keepercommander/proto/record_pb2.py b/keepercommander/proto/record_pb2.py index 941405bfc..329a522a1 100644 --- a/keepercommander/proto/record_pb2.py +++ b/keepercommander/proto/record_pb2.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE # source: record.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor @@ -13,38 +14,38 @@ -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0crecord.proto\x12\x07Records\"\\\n\nRecordType\x12\x14\n\x0crecordTypeId\x18\x01 \x01(\x05\x12\x0f\n\x07\x63ontent\x18\x02 \x01(\t\x12\'\n\x05scope\x18\x03 \x01(\x0e\x32\x18.Records.RecordTypeScope\"U\n\x12RecordTypesRequest\x12\x10\n\x08standard\x18\x01 \x01(\x08\x12\x0c\n\x04user\x18\x02 \x01(\x08\x12\x12\n\nenterprise\x18\x03 \x01(\x08\x12\x0b\n\x03pam\x18\x04 \x01(\x08\"\x9c\x01\n\x13RecordTypesResponse\x12(\n\x0brecordTypes\x18\x01 \x03(\x0b\x32\x13.Records.RecordType\x12\x17\n\x0fstandardCounter\x18\x02 \x01(\x05\x12\x13\n\x0buserCounter\x18\x03 \x01(\x05\x12\x19\n\x11\x65nterpriseCounter\x18\x04 \x01(\x05\x12\x12\n\npamCounter\x18\x05 \x01(\x05\"A\n\x18RecordTypeModifyResponse\x12\x14\n\x0crecordTypeId\x18\x01 \x01(\x05\x12\x0f\n\x07\x63ounter\x18\x02 \x01(\x05\"=\n\x11RecordsGetRequest\x12\x13\n\x0brecord_uids\x18\x01 \x03(\x0c\x12\x13\n\x0b\x63lient_time\x18\x02 \x01(\x03\"\xd1\x01\n\x06Record\x12\x12\n\nrecord_uid\x18\x01 \x01(\x0c\x12\x12\n\nrecord_key\x18\x02 \x01(\x0c\x12/\n\x0frecord_key_type\x18\x03 \x01(\x0e\x32\x16.Records.RecordKeyType\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\x12\r\n\x05\x65xtra\x18\x05 \x01(\x0c\x12\x0f\n\x07version\x18\x06 \x01(\x05\x12\x1c\n\x14\x63lient_modified_time\x18\x07 \x01(\x03\x12\x10\n\x08revision\x18\x08 \x01(\x03\x12\x10\n\x08\x66ile_ids\x18\t \x03(\x0c\"M\n\x0f\x46olderRecordKey\x12\x12\n\nfolder_uid\x18\x01 \x01(\x0c\x12\x12\n\nrecord_uid\x18\x02 \x01(\x0c\x12\x12\n\nrecord_key\x18\x03 \x01(\x0c\"a\n\x06\x46older\x12\x12\n\nfolder_uid\x18\x01 \x01(\x0c\x12\x12\n\nfolder_key\x18\x02 \x01(\x0c\x12/\n\x0f\x66older_key_type\x18\x03 \x01(\x0e\x32\x16.Records.RecordKeyType\"\x95\x01\n\x04Team\x12\x10\n\x08team_uid\x18\x01 \x01(\x0c\x12\x10\n\x08team_key\x18\x02 \x01(\x0c\x12\x18\n\x10team_private_key\x18\x03 \x01(\x0c\x12-\n\rteam_key_type\x18\x04 \x01(\x0e\x32\x16.Records.RecordKeyType\x12 \n\x07\x66olders\x18\x05 \x03(\x0b\x32\x0f.Records.Folder\"\xac\x01\n\x12RecordsGetResponse\x12 \n\x07records\x18\x01 \x03(\x0b\x32\x0f.Records.Record\x12\x34\n\x12\x66older_record_keys\x18\x02 \x03(\x0b\x32\x18.Records.FolderRecordKey\x12 \n\x07\x66olders\x18\x03 \x03(\x0b\x32\x0f.Records.Folder\x12\x1c\n\x05teams\x18\x04 \x03(\x0b\x32\r.Records.Team\"4\n\nRecordLink\x12\x12\n\nrecord_uid\x18\x01 \x01(\x0c\x12\x12\n\nrecord_key\x18\x02 \x01(\x0c\",\n\x0bRecordAudit\x12\x0f\n\x07version\x18\x01 \x01(\x05\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"\x1c\n\x0cSecurityData\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\x0c\"!\n\x11SecurityScoreData\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\x0c\"\x84\x03\n\tRecordAdd\x12\x12\n\nrecord_uid\x18\x01 \x01(\x0c\x12\x12\n\nrecord_key\x18\x02 \x01(\x0c\x12\x1c\n\x14\x63lient_modified_time\x18\x03 \x01(\x03\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\x12\x17\n\x0fnon_shared_data\x18\x05 \x01(\x0c\x12.\n\x0b\x66older_type\x18\x06 \x01(\x0e\x32\x19.Records.RecordFolderType\x12\x12\n\nfolder_uid\x18\x07 \x01(\x0c\x12\x12\n\nfolder_key\x18\x08 \x01(\x0c\x12)\n\x0crecord_links\x18\t \x03(\x0b\x32\x13.Records.RecordLink\x12#\n\x05\x61udit\x18\n \x01(\x0b\x32\x14.Records.RecordAudit\x12+\n\x0csecurityData\x18\x0b \x01(\x0b\x32\x15.Records.SecurityData\x12\x35\n\x11securityScoreData\x18\x0c \x01(\x0b\x32\x1a.Records.SecurityScoreData\"\x85\x01\n\x11RecordsAddRequest\x12#\n\x07records\x18\x01 \x03(\x0b\x32\x12.Records.RecordAdd\x12\x13\n\x0b\x63lient_time\x18\x02 \x01(\x03\x12\x36\n\x16security_data_key_type\x18\x03 \x01(\x0e\x32\x16.Records.RecordKeyType\"\xce\x02\n\x0cRecordUpdate\x12\x12\n\nrecord_uid\x18\x01 \x01(\x0c\x12\x1c\n\x14\x63lient_modified_time\x18\x02 \x01(\x03\x12\x10\n\x08revision\x18\x03 \x01(\x03\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\x12\x17\n\x0fnon_shared_data\x18\x05 \x01(\x0c\x12-\n\x10record_links_add\x18\x06 \x03(\x0b\x32\x13.Records.RecordLink\x12\x1b\n\x13record_links_remove\x18\x07 \x03(\x0c\x12#\n\x05\x61udit\x18\x08 \x01(\x0b\x32\x14.Records.RecordAudit\x12+\n\x0csecurityData\x18\t \x01(\x0b\x32\x15.Records.SecurityData\x12\x35\n\x11securityScoreData\x18\n \x01(\x0b\x32\x1a.Records.SecurityScoreData\"\x8b\x01\n\x14RecordsUpdateRequest\x12&\n\x07records\x18\x01 \x03(\x0b\x32\x15.Records.RecordUpdate\x12\x13\n\x0b\x63lient_time\x18\x02 \x01(\x03\x12\x36\n\x16security_data_key_type\x18\x03 \x01(\x0e\x32\x16.Records.RecordKeyType\"\x8e\x01\n\x17RecordFileForConversion\x12\x12\n\nrecord_uid\x18\x01 \x01(\x0c\x12\x14\n\x0c\x66ile_file_id\x18\x02 \x01(\t\x12\x15\n\rthumb_file_id\x18\x03 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\x12\x12\n\nrecord_key\x18\x05 \x01(\x0c\x12\x10\n\x08link_key\x18\x06 \x01(\x0c\"J\n\x19RecordFolderForConversion\x12\x12\n\nfolder_uid\x18\x01 \x01(\x0c\x12\x19\n\x11record_folder_key\x18\x02 \x01(\x0c\"\x92\x02\n\x11RecordConvertToV3\x12\x12\n\nrecord_uid\x18\x01 \x01(\x0c\x12\x1c\n\x14\x63lient_modified_time\x18\x02 \x01(\x03\x12\x10\n\x08revision\x18\x03 \x01(\x03\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\x12\x17\n\x0fnon_shared_data\x18\x05 \x01(\x0c\x12#\n\x05\x61udit\x18\x06 \x01(\x0b\x32\x14.Records.RecordAudit\x12\x35\n\x0brecord_file\x18\x07 \x03(\x0b\x32 .Records.RecordFileForConversion\x12\x36\n\nfolder_key\x18\x08 \x03(\x0b\x32\".Records.RecordFolderForConversion\"]\n\x19RecordsConvertToV3Request\x12+\n\x07records\x18\x01 \x03(\x0b\x32\x1a.Records.RecordConvertToV3\x12\x13\n\x0b\x63lient_time\x18\x02 \x01(\x03\"\'\n\x14RecordsRemoveRequest\x12\x0f\n\x07records\x18\x01 \x03(\x0c\">\n\x0cRecordRevert\x12\x12\n\nrecord_uid\x18\x01 \x01(\x0c\x12\x1a\n\x12revert_to_revision\x18\x02 \x01(\x03\">\n\x14RecordsRevertRequest\x12&\n\x07records\x18\x01 \x03(\x0b\x32\x15.Records.RecordRevert\"c\n\x0fRecordLinkError\x12\x12\n\nrecord_uid\x18\x01 \x01(\x0c\x12+\n\x06status\x18\x02 \x01(\x0e\x32\x1b.Records.RecordModifyResult\x12\x0f\n\x07message\x18\x03 \x01(\t\"\x95\x01\n\x12RecordModifyStatus\x12\x12\n\nrecord_uid\x18\x01 \x01(\x0c\x12+\n\x06status\x18\x02 \x01(\x0e\x32\x1b.Records.RecordModifyResult\x12\x0f\n\x07message\x18\x03 \x01(\t\x12-\n\x0blink_errors\x18\x04 \x03(\x0b\x32\x18.Records.RecordLinkError\"W\n\x15RecordsModifyResponse\x12,\n\x07records\x18\x01 \x03(\x0b\x32\x1b.Records.RecordModifyStatus\x12\x10\n\x08revision\x18\x02 \x01(\x03\"Y\n\x12RecordAddAuditData\x12\x12\n\nrecord_uid\x18\x01 \x01(\x0c\x12\x10\n\x08revision\x18\x02 \x01(\x03\x12\x0c\n\x04\x64\x61ta\x18\x03 \x01(\x0c\x12\x0f\n\x07version\x18\x04 \x01(\x05\"C\n\x13\x41\x64\x64\x41uditDataRequest\x12,\n\x07records\x18\x01 \x03(\x0b\x32\x1b.Records.RecordAddAuditData\"t\n\x04\x46ile\x12\x12\n\nrecord_uid\x18\x01 \x01(\x0c\x12\x12\n\nrecord_key\x18\x02 \x01(\x0c\x12\x0c\n\x04\x64\x61ta\x18\x03 \x01(\x0c\x12\x10\n\x08\x66ileSize\x18\x04 \x01(\x03\x12\x11\n\tthumbSize\x18\x05 \x01(\x05\x12\x11\n\tis_script\x18\x06 \x01(\x08\"D\n\x0f\x46ilesAddRequest\x12\x1c\n\x05\x66iles\x18\x01 \x03(\x0b\x32\r.Records.File\x12\x13\n\x0b\x63lient_time\x18\x02 \x01(\x03\"\xa7\x01\n\rFileAddStatus\x12\x12\n\nrecord_uid\x18\x01 \x01(\x0c\x12&\n\x06status\x18\x02 \x01(\x0e\x32\x16.Records.FileAddResult\x12\x0b\n\x03url\x18\x03 \x01(\t\x12\x12\n\nparameters\x18\x04 \x01(\t\x12\x1c\n\x14thumbnail_parameters\x18\x05 \x01(\t\x12\x1b\n\x13success_status_code\x18\x06 \x01(\x05\"K\n\x10\x46ilesAddResponse\x12%\n\x05\x66iles\x18\x01 \x03(\x0b\x32\x16.Records.FileAddStatus\x12\x10\n\x08revision\x18\x02 \x01(\x03\"f\n\x0f\x46ilesGetRequest\x12\x13\n\x0brecord_uids\x18\x01 \x03(\x0c\x12\x16\n\x0e\x66or_thumbnails\x18\x02 \x01(\x08\x12&\n\x1e\x65mergency_access_account_owner\x18\x03 \x01(\t\"\xa2\x01\n\rFileGetStatus\x12\x12\n\nrecord_uid\x18\x01 \x01(\x0c\x12&\n\x06status\x18\x02 \x01(\x0e\x32\x16.Records.FileGetResult\x12\x0b\n\x03url\x18\x03 \x01(\t\x12\x1b\n\x13success_status_code\x18\x04 \x01(\x05\x12+\n\x0b\x66ileKeyType\x18\x05 \x01(\x0e\x32\x16.Records.RecordKeyType\"9\n\x10\x46ilesGetResponse\x12%\n\x05\x66iles\x18\x01 \x03(\x0b\x32\x16.Records.FileGetStatus\"\x8d\x01\n\x15\x41pplicationAddRequest\x12\x0f\n\x07\x61pp_uid\x18\x01 \x01(\x0c\x12\x12\n\nrecord_key\x18\x02 \x01(\x0c\x12\x1c\n\x14\x63lient_modified_time\x18\x03 \x01(\x03\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\x12#\n\x05\x61udit\x18\x05 \x01(\x0b\x32\x14.Records.RecordAudit\"\x88\x01\n\"GetRecordDataWithAccessInfoRequest\x12\x12\n\nclientTime\x18\x01 \x01(\x03\x12\x11\n\trecordUid\x18\x02 \x03(\x0c\x12;\n\x14recordDetailsInclude\x18\x03 \x01(\x0e\x32\x1d.Records.RecordDetailsInclude\"\x86\x02\n\x0eUserPermission\x12\x10\n\x08username\x18\x01 \x01(\t\x12\r\n\x05owner\x18\x02 \x01(\x08\x12\x12\n\nshareAdmin\x18\x03 \x01(\x08\x12\x10\n\x08sharable\x18\x04 \x01(\x08\x12\x10\n\x08\x65\x64itable\x18\x05 \x01(\x08\x12\x18\n\x10\x61waitingApproval\x18\x06 \x01(\x08\x12\x12\n\nexpiration\x18\x07 \x01(\x03\x12\x12\n\naccountUid\x18\x08 \x01(\x0c\x12=\n\x15timerNotificationType\x18\t \x01(\x0e\x32\x1e.Records.TimerNotificationType\x12\x1a\n\x12rotateOnExpiration\x18\n \x01(\x08\"\xd8\x01\n\x16SharedFolderPermission\x12\x17\n\x0fsharedFolderUid\x18\x01 \x01(\x0c\x12\x12\n\nresharable\x18\x02 \x01(\x08\x12\x10\n\x08\x65\x64itable\x18\x03 \x01(\x08\x12\x10\n\x08revision\x18\x04 \x01(\x03\x12\x12\n\nexpiration\x18\x05 \x01(\x03\x12=\n\x15timerNotificationType\x18\x06 \x01(\x0e\x32\x1e.Records.TimerNotificationType\x12\x1a\n\x12rotateOnExpiration\x18\x07 \x01(\x08\"\xe8\x02\n\nRecordData\x12\x10\n\x08revision\x18\x01 \x01(\x03\x12\x0f\n\x07version\x18\x02 \x01(\x05\x12\x0e\n\x06shared\x18\x03 \x01(\x08\x12\x1b\n\x13\x65ncryptedRecordData\x18\x04 \x01(\t\x12\x1a\n\x12\x65ncryptedExtraData\x18\x05 \x01(\t\x12\x1a\n\x12\x63lientModifiedTime\x18\x06 \x01(\x03\x12\x15\n\rnonSharedData\x18\x07 \x01(\t\x12-\n\x10linkedRecordData\x18\x08 \x03(\x0b\x32\x13.Records.RecordData\x12\x0e\n\x06\x66ileId\x18\t \x03(\x0c\x12\x10\n\x08\x66ileSize\x18\n \x01(\x03\x12\x15\n\rthumbnailSize\x18\x0b \x01(\x03\x12-\n\rrecordKeyType\x18\x0c \x01(\x0e\x32\x16.Records.RecordKeyType\x12\x11\n\trecordKey\x18\r \x01(\x0c\x12\x11\n\trecordUid\x18\x0e \x01(\x0c\"\xc8\x01\n\x18RecordDataWithAccessInfo\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\'\n\nrecordData\x18\x02 \x01(\x0b\x32\x13.Records.RecordData\x12/\n\x0euserPermission\x18\x03 \x03(\x0b\x32\x17.Records.UserPermission\x12?\n\x16sharedFolderPermission\x18\x04 \x03(\x0b\x32\x1f.Records.SharedFolderPermission\"\x89\x01\n#GetRecordDataWithAccessInfoResponse\x12\x43\n\x18recordDataWithAccessInfo\x18\x01 \x03(\x0b\x32!.Records.RecordDataWithAccessInfo\x12\x1d\n\x15noPermissionRecordUid\x18\x02 \x03(\x0c\"j\n\x12IsObjectShareAdmin\x12\x0b\n\x03uid\x18\x01 \x01(\x0c\x12\x0f\n\x07isAdmin\x18\x02 \x01(\x08\x12\x36\n\nobjectType\x18\x03 \x01(\x0e\x32\".Records.CheckShareAdminObjectType\"H\n\rAmIShareAdmin\x12\x37\n\x12isObjectShareAdmin\x18\x01 \x03(\x0b\x32\x1b.Records.IsObjectShareAdmin\"\xbc\x01\n\x18RecordShareUpdateRequest\x12.\n\x0f\x61\x64\x64SharedRecord\x18\x01 \x03(\x0b\x32\x15.Records.SharedRecord\x12\x31\n\x12updateSharedRecord\x18\x02 \x03(\x0b\x32\x15.Records.SharedRecord\x12\x31\n\x12removeSharedRecord\x18\x03 \x03(\x0b\x32\x15.Records.SharedRecord\x12\n\n\x02pt\x18\x04 \x01(\t\"\xc4\x02\n\x0cSharedRecord\x12\x12\n\ntoUsername\x18\x01 \x01(\t\x12\x11\n\trecordUid\x18\x02 \x01(\x0c\x12\x11\n\trecordKey\x18\x03 \x01(\x0c\x12\x17\n\x0fsharedFolderUid\x18\x04 \x01(\x0c\x12\x0f\n\x07teamUid\x18\x05 \x01(\x0c\x12\x10\n\x08\x65\x64itable\x18\x06 \x01(\x08\x12\x11\n\tshareable\x18\x07 \x01(\x08\x12\x10\n\x08transfer\x18\x08 \x01(\x08\x12\x11\n\tuseEccKey\x18\t \x01(\x08\x12\x17\n\x0fremoveVaultData\x18\n \x01(\x08\x12\x12\n\nexpiration\x18\x0b \x01(\x03\x12=\n\x15timerNotificationType\x18\x0c \x01(\x0e\x32\x1e.Records.TimerNotificationType\x12\x1a\n\x12rotateOnExpiration\x18\r \x01(\x08\"\xd5\x01\n\x19RecordShareUpdateResponse\x12:\n\x15\x61\x64\x64SharedRecordStatus\x18\x01 \x03(\x0b\x32\x1b.Records.SharedRecordStatus\x12=\n\x18updateSharedRecordStatus\x18\x02 \x03(\x0b\x32\x1b.Records.SharedRecordStatus\x12=\n\x18removeSharedRecordStatus\x18\x03 \x03(\x0b\x32\x1b.Records.SharedRecordStatus\"Z\n\x12SharedRecordStatus\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x0e\n\x06status\x18\x02 \x01(\t\x12\x0f\n\x07message\x18\x03 \x01(\t\x12\x10\n\x08username\x18\x04 \x01(\t\"G\n\x1bGetRecordPermissionsRequest\x12\x12\n\nrecordUids\x18\x01 \x03(\x0c\x12\x14\n\x0cisShareAdmin\x18\x02 \x01(\x08\"T\n\x1cGetRecordPermissionsResponse\x12\x34\n\x11recordPermissions\x18\x01 \x03(\x0b\x32\x19.Records.RecordPermission\"l\n\x10RecordPermission\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\r\n\x05owner\x18\x02 \x01(\x08\x12\x0f\n\x07\x63\x61nEdit\x18\x03 \x01(\x08\x12\x10\n\x08\x63\x61nShare\x18\x04 \x01(\x08\x12\x13\n\x0b\x63\x61nTransfer\x18\x05 \x01(\x08\"h\n\x16GetShareObjectsRequest\x12\x11\n\tstartWith\x18\x01 \x01(\t\x12\x10\n\x08\x63ontains\x18\x02 \x01(\t\x12\x10\n\x08\x66iltered\x18\x03 \x01(\x08\x12\x17\n\x0fsharedFolderUid\x18\x04 \x01(\x0c\"\xe7\x02\n\x17GetShareObjectsResponse\x12.\n\x12shareRelationships\x18\x01 \x03(\x0b\x32\x12.Records.ShareUser\x12,\n\x10shareFamilyUsers\x18\x02 \x03(\x0b\x32\x12.Records.ShareUser\x12\x30\n\x14shareEnterpriseUsers\x18\x03 \x03(\x0b\x32\x12.Records.ShareUser\x12&\n\nshareTeams\x18\x04 \x03(\x0b\x32\x12.Records.ShareTeam\x12(\n\x0cshareMCTeams\x18\x05 \x03(\x0b\x32\x12.Records.ShareTeam\x12\x32\n\x16shareMCEnterpriseUsers\x18\x06 \x03(\x0b\x32\x12.Records.ShareUser\x12\x36\n\x14shareEnterpriseNames\x18\x07 \x03(\x0b\x32\x18.Records.ShareEnterprise\"\xa5\x01\n\tShareUser\x12\x10\n\x08username\x18\x01 \x01(\t\x12\x10\n\x08\x66ullname\x18\x02 \x01(\t\x12\x14\n\x0c\x65nterpriseId\x18\x03 \x01(\x05\x12$\n\x06status\x18\x04 \x01(\x0e\x32\x14.Records.ShareStatus\x12\x14\n\x0cisShareAdmin\x18\x05 \x01(\x08\x12\"\n\x1aisAdminOfSharedFolderOwner\x18\x06 \x01(\x08\"D\n\tShareTeam\x12\x10\n\x08teamname\x18\x01 \x01(\t\x12\x14\n\x0c\x65nterpriseId\x18\x02 \x01(\x05\x12\x0f\n\x07teamUid\x18\x03 \x01(\x0c\"?\n\x0fShareEnterprise\x12\x16\n\x0e\x65nterprisename\x18\x01 \x01(\t\x12\x14\n\x0c\x65nterpriseId\x18\x02 \x01(\x05\"S\n\x1fRecordsOnwershipTransferRequest\x12\x30\n\x0ftransferRecords\x18\x01 \x03(\x0b\x32\x17.Records.TransferRecord\"[\n\x0eTransferRecord\x12\x10\n\x08username\x18\x01 \x01(\t\x12\x11\n\trecordUid\x18\x02 \x01(\x0c\x12\x11\n\trecordKey\x18\x03 \x01(\x0c\x12\x11\n\tuseEccKey\x18\x04 \x01(\x08\"_\n RecordsOnwershipTransferResponse\x12;\n\x14transferRecordStatus\x18\x01 \x03(\x0b\x32\x1d.Records.TransferRecordStatus\"\\\n\x14TransferRecordStatus\x12\x10\n\x08username\x18\x01 \x01(\t\x12\x11\n\trecordUid\x18\x02 \x01(\x0c\x12\x0e\n\x06status\x18\x03 \x01(\t\x12\x0f\n\x07message\x18\x04 \x01(\t\"y\n\x15RecordsUnshareRequest\x12\x34\n\rsharedFolders\x18\x01 \x03(\x0b\x32\x1d.Records.RecordsUnshareFolder\x12*\n\x05users\x18\x02 \x03(\x0b\x32\x1b.Records.RecordsUnshareUser\"\x86\x01\n\x16RecordsUnshareResponse\x12:\n\rsharedFolders\x18\x01 \x03(\x0b\x32#.Records.RecordsUnshareFolderStatus\x12\x30\n\x05users\x18\x02 \x03(\x0b\x32!.Records.RecordsUnshareUserStatus\"B\n\x14RecordsUnshareFolder\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x17\n\x0fsharedFolderUid\x18\x02 \x01(\x0c\";\n\x12RecordsUnshareUser\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x12\n\naccountUid\x18\x02 \x01(\x0c\"H\n\x1aRecordsUnshareFolderStatus\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x17\n\x0fsharedFolderUid\x18\x02 \x01(\x0c\"A\n\x18RecordsUnshareUserStatus\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x12\n\naccountUid\x18\x02 \x01(\x0c\"[\n\x1aTimedAccessCallbackPayload\x12=\n\x15timeLimitedAccessType\x18\x01 \x01(\x0e\x32\x1e.Records.TimeLimitedAccessType\"\xfd\x01\n\x18TimeLimitedAccessRequest\x12\x12\n\naccountUid\x18\x01 \x03(\x0c\x12\x0f\n\x07teamUid\x18\x02 \x03(\x0c\x12\x11\n\trecordUid\x18\x03 \x03(\x0c\x12\x17\n\x0fsharedObjectUid\x18\x04 \x01(\x0c\x12=\n\x15timeLimitedAccessType\x18\x05 \x01(\x0e\x32\x1e.Records.TimeLimitedAccessType\x12\x12\n\nexpiration\x18\x06 \x01(\x03\x12=\n\x15timerNotificationType\x18\x07 \x01(\x0e\x32\x1e.Records.TimerNotificationType\"7\n\x17TimeLimitedAccessStatus\x12\x0b\n\x03uid\x18\x01 \x01(\x0c\x12\x0f\n\x07message\x18\x02 \x01(\t\"\xe3\x01\n\x19TimeLimitedAccessResponse\x12\x10\n\x08revision\x18\x01 \x01(\x03\x12:\n\x10userAccessStatus\x18\x02 \x03(\x0b\x32 .Records.TimeLimitedAccessStatus\x12:\n\x10teamAccessStatus\x18\x03 \x03(\x0b\x32 .Records.TimeLimitedAccessStatus\x12<\n\x12recordAccessStatus\x18\x04 \x03(\x0b\x32 .Records.TimeLimitedAccessStatus*h\n\x0fRecordTypeScope\x12\x0f\n\x0bRT_STANDARD\x10\x00\x12\x0b\n\x07RT_USER\x10\x01\x12\x11\n\rRT_ENTERPRISE\x10\x02\x12\n\n\x06RT_PAM\x10\x03\x12\x18\n\x14RT_PAM_CONFIGURATION\x10\x04*\xd1\x01\n\rRecordKeyType\x12\n\n\x06NO_KEY\x10\x00\x12\x19\n\x15\x45NCRYPTED_BY_DATA_KEY\x10\x01\x12\x1b\n\x17\x45NCRYPTED_BY_PUBLIC_KEY\x10\x02\x12\x1d\n\x19\x45NCRYPTED_BY_DATA_KEY_GCM\x10\x03\x12\x1f\n\x1b\x45NCRYPTED_BY_PUBLIC_KEY_ECC\x10\x04\x12\x1d\n\x19\x45NCRYPTED_BY_ROOT_KEY_CBC\x10\x05\x12\x1d\n\x19\x45NCRYPTED_BY_ROOT_KEY_GCM\x10\x06*P\n\x10RecordFolderType\x12\x0f\n\x0buser_folder\x10\x00\x12\x11\n\rshared_folder\x10\x01\x12\x18\n\x14shared_folder_folder\x10\x02*\xec\x02\n\x12RecordModifyResult\x12\x0e\n\nRS_SUCCESS\x10\x00\x12\x12\n\x0eRS_OUT_OF_SYNC\x10\x01\x12\x14\n\x10RS_ACCESS_DENIED\x10\x02\x12\x13\n\x0fRS_SHARE_DENIED\x10\x03\x12\x14\n\x10RS_RECORD_EXISTS\x10\x04\x12\x1e\n\x1aRS_OLD_RECORD_VERSION_TYPE\x10\x05\x12\x1e\n\x1aRS_NEW_RECORD_VERSION_TYPE\x10\x06\x12\x16\n\x12RS_FILES_NOT_MATCH\x10\x07\x12\x1b\n\x17RS_RECORD_NOT_SHAREABLE\x10\x08\x12\x1f\n\x1bRS_ATTACHMENT_NOT_SHAREABLE\x10\t\x12\x19\n\x15RS_FILE_LIMIT_REACHED\x10\n\x12\x1a\n\x16RS_SIZE_EXCEEDED_LIMIT\x10\x0b\x12$\n RS_ONLY_OWNER_CAN_MODIFY_SCRIPTS\x10\x0c*-\n\rFileAddResult\x12\x0e\n\nFA_SUCCESS\x10\x00\x12\x0c\n\x08\x46\x41_ERROR\x10\x01*C\n\rFileGetResult\x12\x0e\n\nFG_SUCCESS\x10\x00\x12\x0c\n\x08\x46G_ERROR\x10\x01\x12\x14\n\x10\x46G_ACCESS_DENIED\x10\x02*J\n\x14RecordDetailsInclude\x12\x13\n\x0f\x44\x41TA_PLUS_SHARE\x10\x00\x12\r\n\tDATA_ONLY\x10\x01\x12\x0e\n\nSHARE_ONLY\x10\x02*b\n\x19\x43heckShareAdminObjectType\x12\x19\n\x15\x43HECK_SA_INVALID_TYPE\x10\x00\x12\x12\n\x0e\x43HECK_SA_ON_SF\x10\x01\x12\x16\n\x12\x43HECK_SA_ON_RECORD\x10\x02*1\n\x0bShareStatus\x12\n\n\x06\x41\x43TIVE\x10\x00\x12\t\n\x05\x42LOCK\x10\x01\x12\x0b\n\x07INVITED\x10\x02*:\n\x15RecordTransactionType\x12\x0f\n\x0bRTT_GENERAL\x10\x00\x12\x10\n\x0cRTT_ROTATION\x10\x01*\xe6\x01\n\x15TimeLimitedAccessType\x12$\n INVALID_TIME_LIMITED_ACCESS_TYPE\x10\x00\x12\x19\n\x15USER_ACCESS_TO_RECORD\x10\x01\x12\'\n#USER_OR_TEAM_ACCESS_TO_SHAREDFOLDER\x10\x02\x12!\n\x1dRECORD_ACCESS_TO_SHAREDFOLDER\x10\x03\x12\x1f\n\x1bUSER_ACCESS_TO_SHAREDFOLDER\x10\x04\x12\x1f\n\x1bTEAM_ACCESS_TO_SHAREDFOLDER\x10\x05*\\\n\x15TimerNotificationType\x12\x14\n\x10NOTIFICATION_OFF\x10\x00\x12\x10\n\x0cNOTIFY_OWNER\x10\x01\x12\x1b\n\x17NOTIFY_PRIVILEGED_USERS\x10\x02\x42#\n\x18\x63om.keepersecurity.protoB\x07Recordsb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0crecord.proto\x12\x07Records\"\\\n\nRecordType\x12\x14\n\x0crecordTypeId\x18\x01 \x01(\x05\x12\x0f\n\x07\x63ontent\x18\x02 \x01(\t\x12\'\n\x05scope\x18\x03 \x01(\x0e\x32\x18.Records.RecordTypeScope\"U\n\x12RecordTypesRequest\x12\x10\n\x08standard\x18\x01 \x01(\x08\x12\x0c\n\x04user\x18\x02 \x01(\x08\x12\x12\n\nenterprise\x18\x03 \x01(\x08\x12\x0b\n\x03pam\x18\x04 \x01(\x08\"\x9c\x01\n\x13RecordTypesResponse\x12(\n\x0brecordTypes\x18\x01 \x03(\x0b\x32\x13.Records.RecordType\x12\x17\n\x0fstandardCounter\x18\x02 \x01(\x05\x12\x13\n\x0buserCounter\x18\x03 \x01(\x05\x12\x19\n\x11\x65nterpriseCounter\x18\x04 \x01(\x05\x12\x12\n\npamCounter\x18\x05 \x01(\x05\"A\n\x18RecordTypeModifyResponse\x12\x14\n\x0crecordTypeId\x18\x01 \x01(\x05\x12\x0f\n\x07\x63ounter\x18\x02 \x01(\x05\"=\n\x11RecordsGetRequest\x12\x13\n\x0brecord_uids\x18\x01 \x03(\x0c\x12\x13\n\x0b\x63lient_time\x18\x02 \x01(\x03\"\xd1\x01\n\x06Record\x12\x12\n\nrecord_uid\x18\x01 \x01(\x0c\x12\x12\n\nrecord_key\x18\x02 \x01(\x0c\x12/\n\x0frecord_key_type\x18\x03 \x01(\x0e\x32\x16.Records.RecordKeyType\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\x12\r\n\x05\x65xtra\x18\x05 \x01(\x0c\x12\x0f\n\x07version\x18\x06 \x01(\x05\x12\x1c\n\x14\x63lient_modified_time\x18\x07 \x01(\x03\x12\x10\n\x08revision\x18\x08 \x01(\x03\x12\x10\n\x08\x66ile_ids\x18\t \x03(\x0c\"M\n\x0f\x46olderRecordKey\x12\x12\n\nfolder_uid\x18\x01 \x01(\x0c\x12\x12\n\nrecord_uid\x18\x02 \x01(\x0c\x12\x12\n\nrecord_key\x18\x03 \x01(\x0c\"a\n\x06\x46older\x12\x12\n\nfolder_uid\x18\x01 \x01(\x0c\x12\x12\n\nfolder_key\x18\x02 \x01(\x0c\x12/\n\x0f\x66older_key_type\x18\x03 \x01(\x0e\x32\x16.Records.RecordKeyType\"\x95\x01\n\x04Team\x12\x10\n\x08team_uid\x18\x01 \x01(\x0c\x12\x10\n\x08team_key\x18\x02 \x01(\x0c\x12\x18\n\x10team_private_key\x18\x03 \x01(\x0c\x12-\n\rteam_key_type\x18\x04 \x01(\x0e\x32\x16.Records.RecordKeyType\x12 \n\x07\x66olders\x18\x05 \x03(\x0b\x32\x0f.Records.Folder\"\xac\x01\n\x12RecordsGetResponse\x12 \n\x07records\x18\x01 \x03(\x0b\x32\x0f.Records.Record\x12\x34\n\x12\x66older_record_keys\x18\x02 \x03(\x0b\x32\x18.Records.FolderRecordKey\x12 \n\x07\x66olders\x18\x03 \x03(\x0b\x32\x0f.Records.Folder\x12\x1c\n\x05teams\x18\x04 \x03(\x0b\x32\r.Records.Team\"4\n\nRecordLink\x12\x12\n\nrecord_uid\x18\x01 \x01(\x0c\x12\x12\n\nrecord_key\x18\x02 \x01(\x0c\",\n\x0bRecordAudit\x12\x0f\n\x07version\x18\x01 \x01(\x05\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"\x1c\n\x0cSecurityData\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\x0c\"!\n\x11SecurityScoreData\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\x0c\"\x84\x03\n\tRecordAdd\x12\x12\n\nrecord_uid\x18\x01 \x01(\x0c\x12\x12\n\nrecord_key\x18\x02 \x01(\x0c\x12\x1c\n\x14\x63lient_modified_time\x18\x03 \x01(\x03\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\x12\x17\n\x0fnon_shared_data\x18\x05 \x01(\x0c\x12.\n\x0b\x66older_type\x18\x06 \x01(\x0e\x32\x19.Records.RecordFolderType\x12\x12\n\nfolder_uid\x18\x07 \x01(\x0c\x12\x12\n\nfolder_key\x18\x08 \x01(\x0c\x12)\n\x0crecord_links\x18\t \x03(\x0b\x32\x13.Records.RecordLink\x12#\n\x05\x61udit\x18\n \x01(\x0b\x32\x14.Records.RecordAudit\x12+\n\x0csecurityData\x18\x0b \x01(\x0b\x32\x15.Records.SecurityData\x12\x35\n\x11securityScoreData\x18\x0c \x01(\x0b\x32\x1a.Records.SecurityScoreData\"\x85\x01\n\x11RecordsAddRequest\x12#\n\x07records\x18\x01 \x03(\x0b\x32\x12.Records.RecordAdd\x12\x13\n\x0b\x63lient_time\x18\x02 \x01(\x03\x12\x36\n\x16security_data_key_type\x18\x03 \x01(\x0e\x32\x16.Records.RecordKeyType\"\xce\x02\n\x0cRecordUpdate\x12\x12\n\nrecord_uid\x18\x01 \x01(\x0c\x12\x1c\n\x14\x63lient_modified_time\x18\x02 \x01(\x03\x12\x10\n\x08revision\x18\x03 \x01(\x03\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\x12\x17\n\x0fnon_shared_data\x18\x05 \x01(\x0c\x12-\n\x10record_links_add\x18\x06 \x03(\x0b\x32\x13.Records.RecordLink\x12\x1b\n\x13record_links_remove\x18\x07 \x03(\x0c\x12#\n\x05\x61udit\x18\x08 \x01(\x0b\x32\x14.Records.RecordAudit\x12+\n\x0csecurityData\x18\t \x01(\x0b\x32\x15.Records.SecurityData\x12\x35\n\x11securityScoreData\x18\n \x01(\x0b\x32\x1a.Records.SecurityScoreData\"\x8b\x01\n\x14RecordsUpdateRequest\x12&\n\x07records\x18\x01 \x03(\x0b\x32\x15.Records.RecordUpdate\x12\x13\n\x0b\x63lient_time\x18\x02 \x01(\x03\x12\x36\n\x16security_data_key_type\x18\x03 \x01(\x0e\x32\x16.Records.RecordKeyType\"\x8e\x01\n\x17RecordFileForConversion\x12\x12\n\nrecord_uid\x18\x01 \x01(\x0c\x12\x14\n\x0c\x66ile_file_id\x18\x02 \x01(\t\x12\x15\n\rthumb_file_id\x18\x03 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\x12\x12\n\nrecord_key\x18\x05 \x01(\x0c\x12\x10\n\x08link_key\x18\x06 \x01(\x0c\"J\n\x19RecordFolderForConversion\x12\x12\n\nfolder_uid\x18\x01 \x01(\x0c\x12\x19\n\x11record_folder_key\x18\x02 \x01(\x0c\"\x92\x02\n\x11RecordConvertToV3\x12\x12\n\nrecord_uid\x18\x01 \x01(\x0c\x12\x1c\n\x14\x63lient_modified_time\x18\x02 \x01(\x03\x12\x10\n\x08revision\x18\x03 \x01(\x03\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\x12\x17\n\x0fnon_shared_data\x18\x05 \x01(\x0c\x12#\n\x05\x61udit\x18\x06 \x01(\x0b\x32\x14.Records.RecordAudit\x12\x35\n\x0brecord_file\x18\x07 \x03(\x0b\x32 .Records.RecordFileForConversion\x12\x36\n\nfolder_key\x18\x08 \x03(\x0b\x32\".Records.RecordFolderForConversion\"]\n\x19RecordsConvertToV3Request\x12+\n\x07records\x18\x01 \x03(\x0b\x32\x1a.Records.RecordConvertToV3\x12\x13\n\x0b\x63lient_time\x18\x02 \x01(\x03\"\'\n\x14RecordsRemoveRequest\x12\x0f\n\x07records\x18\x01 \x03(\x0c\">\n\x0cRecordRevert\x12\x12\n\nrecord_uid\x18\x01 \x01(\x0c\x12\x1a\n\x12revert_to_revision\x18\x02 \x01(\x03\">\n\x14RecordsRevertRequest\x12&\n\x07records\x18\x01 \x03(\x0b\x32\x15.Records.RecordRevert\"c\n\x0fRecordLinkError\x12\x12\n\nrecord_uid\x18\x01 \x01(\x0c\x12+\n\x06status\x18\x02 \x01(\x0e\x32\x1b.Records.RecordModifyResult\x12\x0f\n\x07message\x18\x03 \x01(\t\"\x95\x01\n\x12RecordModifyStatus\x12\x12\n\nrecord_uid\x18\x01 \x01(\x0c\x12+\n\x06status\x18\x02 \x01(\x0e\x32\x1b.Records.RecordModifyResult\x12\x0f\n\x07message\x18\x03 \x01(\t\x12-\n\x0blink_errors\x18\x04 \x03(\x0b\x32\x18.Records.RecordLinkError\"W\n\x15RecordsModifyResponse\x12,\n\x07records\x18\x01 \x03(\x0b\x32\x1b.Records.RecordModifyStatus\x12\x10\n\x08revision\x18\x02 \x01(\x03\"Y\n\x12RecordAddAuditData\x12\x12\n\nrecord_uid\x18\x01 \x01(\x0c\x12\x10\n\x08revision\x18\x02 \x01(\x03\x12\x0c\n\x04\x64\x61ta\x18\x03 \x01(\x0c\x12\x0f\n\x07version\x18\x04 \x01(\x05\"C\n\x13\x41\x64\x64\x41uditDataRequest\x12,\n\x07records\x18\x01 \x03(\x0b\x32\x1b.Records.RecordAddAuditData\"t\n\x04\x46ile\x12\x12\n\nrecord_uid\x18\x01 \x01(\x0c\x12\x12\n\nrecord_key\x18\x02 \x01(\x0c\x12\x0c\n\x04\x64\x61ta\x18\x03 \x01(\x0c\x12\x10\n\x08\x66ileSize\x18\x04 \x01(\x03\x12\x11\n\tthumbSize\x18\x05 \x01(\x05\x12\x11\n\tis_script\x18\x06 \x01(\x08\"D\n\x0f\x46ilesAddRequest\x12\x1c\n\x05\x66iles\x18\x01 \x03(\x0b\x32\r.Records.File\x12\x13\n\x0b\x63lient_time\x18\x02 \x01(\x03\"\xa7\x01\n\rFileAddStatus\x12\x12\n\nrecord_uid\x18\x01 \x01(\x0c\x12&\n\x06status\x18\x02 \x01(\x0e\x32\x16.Records.FileAddResult\x12\x0b\n\x03url\x18\x03 \x01(\t\x12\x12\n\nparameters\x18\x04 \x01(\t\x12\x1c\n\x14thumbnail_parameters\x18\x05 \x01(\t\x12\x1b\n\x13success_status_code\x18\x06 \x01(\x05\"K\n\x10\x46ilesAddResponse\x12%\n\x05\x66iles\x18\x01 \x03(\x0b\x32\x16.Records.FileAddStatus\x12\x10\n\x08revision\x18\x02 \x01(\x03\"f\n\x0f\x46ilesGetRequest\x12\x13\n\x0brecord_uids\x18\x01 \x03(\x0c\x12\x16\n\x0e\x66or_thumbnails\x18\x02 \x01(\x08\x12&\n\x1e\x65mergency_access_account_owner\x18\x03 \x01(\t\"\xa2\x01\n\rFileGetStatus\x12\x12\n\nrecord_uid\x18\x01 \x01(\x0c\x12&\n\x06status\x18\x02 \x01(\x0e\x32\x16.Records.FileGetResult\x12\x0b\n\x03url\x18\x03 \x01(\t\x12\x1b\n\x13success_status_code\x18\x04 \x01(\x05\x12+\n\x0b\x66ileKeyType\x18\x05 \x01(\x0e\x32\x16.Records.RecordKeyType\"9\n\x10\x46ilesGetResponse\x12%\n\x05\x66iles\x18\x01 \x03(\x0b\x32\x16.Records.FileGetStatus\"\x8d\x01\n\x15\x41pplicationAddRequest\x12\x0f\n\x07\x61pp_uid\x18\x01 \x01(\x0c\x12\x12\n\nrecord_key\x18\x02 \x01(\x0c\x12\x1c\n\x14\x63lient_modified_time\x18\x03 \x01(\x03\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\x12#\n\x05\x61udit\x18\x05 \x01(\x0b\x32\x14.Records.RecordAudit\"\x88\x01\n\"GetRecordDataWithAccessInfoRequest\x12\x12\n\nclientTime\x18\x01 \x01(\x03\x12\x11\n\trecordUid\x18\x02 \x03(\x0c\x12;\n\x14recordDetailsInclude\x18\x03 \x01(\x0e\x32\x1d.Records.RecordDetailsInclude\"\x86\x02\n\x0eUserPermission\x12\x10\n\x08username\x18\x01 \x01(\t\x12\r\n\x05owner\x18\x02 \x01(\x08\x12\x12\n\nshareAdmin\x18\x03 \x01(\x08\x12\x10\n\x08sharable\x18\x04 \x01(\x08\x12\x10\n\x08\x65\x64itable\x18\x05 \x01(\x08\x12\x18\n\x10\x61waitingApproval\x18\x06 \x01(\x08\x12\x12\n\nexpiration\x18\x07 \x01(\x03\x12\x12\n\naccountUid\x18\x08 \x01(\x0c\x12=\n\x15timerNotificationType\x18\t \x01(\x0e\x32\x1e.Records.TimerNotificationType\x12\x1a\n\x12rotateOnExpiration\x18\n \x01(\x08\"\xd8\x01\n\x16SharedFolderPermission\x12\x17\n\x0fsharedFolderUid\x18\x01 \x01(\x0c\x12\x12\n\nresharable\x18\x02 \x01(\x08\x12\x10\n\x08\x65\x64itable\x18\x03 \x01(\x08\x12\x10\n\x08revision\x18\x04 \x01(\x03\x12\x12\n\nexpiration\x18\x05 \x01(\x03\x12=\n\x15timerNotificationType\x18\x06 \x01(\x0e\x32\x1e.Records.TimerNotificationType\x12\x1a\n\x12rotateOnExpiration\x18\x07 \x01(\x08\"\xe8\x02\n\nRecordData\x12\x10\n\x08revision\x18\x01 \x01(\x03\x12\x0f\n\x07version\x18\x02 \x01(\x05\x12\x0e\n\x06shared\x18\x03 \x01(\x08\x12\x1b\n\x13\x65ncryptedRecordData\x18\x04 \x01(\t\x12\x1a\n\x12\x65ncryptedExtraData\x18\x05 \x01(\t\x12\x1a\n\x12\x63lientModifiedTime\x18\x06 \x01(\x03\x12\x15\n\rnonSharedData\x18\x07 \x01(\t\x12-\n\x10linkedRecordData\x18\x08 \x03(\x0b\x32\x13.Records.RecordData\x12\x0e\n\x06\x66ileId\x18\t \x03(\x0c\x12\x10\n\x08\x66ileSize\x18\n \x01(\x03\x12\x15\n\rthumbnailSize\x18\x0b \x01(\x03\x12-\n\rrecordKeyType\x18\x0c \x01(\x0e\x32\x16.Records.RecordKeyType\x12\x11\n\trecordKey\x18\r \x01(\x0c\x12\x11\n\trecordUid\x18\x0e \x01(\x0c\"\xc8\x01\n\x18RecordDataWithAccessInfo\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\'\n\nrecordData\x18\x02 \x01(\x0b\x32\x13.Records.RecordData\x12/\n\x0euserPermission\x18\x03 \x03(\x0b\x32\x17.Records.UserPermission\x12?\n\x16sharedFolderPermission\x18\x04 \x03(\x0b\x32\x1f.Records.SharedFolderPermission\"\x89\x01\n#GetRecordDataWithAccessInfoResponse\x12\x43\n\x18recordDataWithAccessInfo\x18\x01 \x03(\x0b\x32!.Records.RecordDataWithAccessInfo\x12\x1d\n\x15noPermissionRecordUid\x18\x02 \x03(\x0c\"j\n\x12IsObjectShareAdmin\x12\x0b\n\x03uid\x18\x01 \x01(\x0c\x12\x0f\n\x07isAdmin\x18\x02 \x01(\x08\x12\x36\n\nobjectType\x18\x03 \x01(\x0e\x32\".Records.CheckShareAdminObjectType\"H\n\rAmIShareAdmin\x12\x37\n\x12isObjectShareAdmin\x18\x01 \x03(\x0b\x32\x1b.Records.IsObjectShareAdmin\"\xbc\x01\n\x18RecordShareUpdateRequest\x12.\n\x0f\x61\x64\x64SharedRecord\x18\x01 \x03(\x0b\x32\x15.Records.SharedRecord\x12\x31\n\x12updateSharedRecord\x18\x02 \x03(\x0b\x32\x15.Records.SharedRecord\x12\x31\n\x12removeSharedRecord\x18\x03 \x03(\x0b\x32\x15.Records.SharedRecord\x12\n\n\x02pt\x18\x04 \x01(\t\"\xc4\x02\n\x0cSharedRecord\x12\x12\n\ntoUsername\x18\x01 \x01(\t\x12\x11\n\trecordUid\x18\x02 \x01(\x0c\x12\x11\n\trecordKey\x18\x03 \x01(\x0c\x12\x17\n\x0fsharedFolderUid\x18\x04 \x01(\x0c\x12\x0f\n\x07teamUid\x18\x05 \x01(\x0c\x12\x10\n\x08\x65\x64itable\x18\x06 \x01(\x08\x12\x11\n\tshareable\x18\x07 \x01(\x08\x12\x10\n\x08transfer\x18\x08 \x01(\x08\x12\x11\n\tuseEccKey\x18\t \x01(\x08\x12\x17\n\x0fremoveVaultData\x18\n \x01(\x08\x12\x12\n\nexpiration\x18\x0b \x01(\x03\x12=\n\x15timerNotificationType\x18\x0c \x01(\x0e\x32\x1e.Records.TimerNotificationType\x12\x1a\n\x12rotateOnExpiration\x18\r \x01(\x08\"\xd5\x01\n\x19RecordShareUpdateResponse\x12:\n\x15\x61\x64\x64SharedRecordStatus\x18\x01 \x03(\x0b\x32\x1b.Records.SharedRecordStatus\x12=\n\x18updateSharedRecordStatus\x18\x02 \x03(\x0b\x32\x1b.Records.SharedRecordStatus\x12=\n\x18removeSharedRecordStatus\x18\x03 \x03(\x0b\x32\x1b.Records.SharedRecordStatus\"Z\n\x12SharedRecordStatus\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x0e\n\x06status\x18\x02 \x01(\t\x12\x0f\n\x07message\x18\x03 \x01(\t\x12\x10\n\x08username\x18\x04 \x01(\t\"G\n\x1bGetRecordPermissionsRequest\x12\x12\n\nrecordUids\x18\x01 \x03(\x0c\x12\x14\n\x0cisShareAdmin\x18\x02 \x01(\x08\"T\n\x1cGetRecordPermissionsResponse\x12\x34\n\x11recordPermissions\x18\x01 \x03(\x0b\x32\x19.Records.RecordPermission\"l\n\x10RecordPermission\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\r\n\x05owner\x18\x02 \x01(\x08\x12\x0f\n\x07\x63\x61nEdit\x18\x03 \x01(\x08\x12\x10\n\x08\x63\x61nShare\x18\x04 \x01(\x08\x12\x13\n\x0b\x63\x61nTransfer\x18\x05 \x01(\x08\"h\n\x16GetShareObjectsRequest\x12\x11\n\tstartWith\x18\x01 \x01(\t\x12\x10\n\x08\x63ontains\x18\x02 \x01(\t\x12\x10\n\x08\x66iltered\x18\x03 \x01(\x08\x12\x17\n\x0fsharedFolderUid\x18\x04 \x01(\x0c\"\xe7\x02\n\x17GetShareObjectsResponse\x12.\n\x12shareRelationships\x18\x01 \x03(\x0b\x32\x12.Records.ShareUser\x12,\n\x10shareFamilyUsers\x18\x02 \x03(\x0b\x32\x12.Records.ShareUser\x12\x30\n\x14shareEnterpriseUsers\x18\x03 \x03(\x0b\x32\x12.Records.ShareUser\x12&\n\nshareTeams\x18\x04 \x03(\x0b\x32\x12.Records.ShareTeam\x12(\n\x0cshareMCTeams\x18\x05 \x03(\x0b\x32\x12.Records.ShareTeam\x12\x32\n\x16shareMCEnterpriseUsers\x18\x06 \x03(\x0b\x32\x12.Records.ShareUser\x12\x36\n\x14shareEnterpriseNames\x18\x07 \x03(\x0b\x32\x18.Records.ShareEnterprise\"\xbd\x01\n\tShareUser\x12\x10\n\x08username\x18\x01 \x01(\t\x12\x10\n\x08\x66ullname\x18\x02 \x01(\t\x12\x14\n\x0c\x65nterpriseId\x18\x03 \x01(\x05\x12$\n\x06status\x18\x04 \x01(\x0e\x32\x14.Records.ShareStatus\x12\x14\n\x0cisShareAdmin\x18\x05 \x01(\x08\x12\"\n\x1aisAdminOfSharedFolderOwner\x18\x06 \x01(\x08\x12\x16\n\x0euserAccountUid\x18\x07 \x01(\x0c\"D\n\tShareTeam\x12\x10\n\x08teamname\x18\x01 \x01(\t\x12\x14\n\x0c\x65nterpriseId\x18\x02 \x01(\x05\x12\x0f\n\x07teamUid\x18\x03 \x01(\x0c\"?\n\x0fShareEnterprise\x12\x16\n\x0e\x65nterprisename\x18\x01 \x01(\t\x12\x14\n\x0c\x65nterpriseId\x18\x02 \x01(\x05\"S\n\x1fRecordsOnwershipTransferRequest\x12\x30\n\x0ftransferRecords\x18\x01 \x03(\x0b\x32\x17.Records.TransferRecord\"[\n\x0eTransferRecord\x12\x10\n\x08username\x18\x01 \x01(\t\x12\x11\n\trecordUid\x18\x02 \x01(\x0c\x12\x11\n\trecordKey\x18\x03 \x01(\x0c\x12\x11\n\tuseEccKey\x18\x04 \x01(\x08\"_\n RecordsOnwershipTransferResponse\x12;\n\x14transferRecordStatus\x18\x01 \x03(\x0b\x32\x1d.Records.TransferRecordStatus\"\\\n\x14TransferRecordStatus\x12\x10\n\x08username\x18\x01 \x01(\t\x12\x11\n\trecordUid\x18\x02 \x01(\x0c\x12\x0e\n\x06status\x18\x03 \x01(\t\x12\x0f\n\x07message\x18\x04 \x01(\t\"y\n\x15RecordsUnshareRequest\x12\x34\n\rsharedFolders\x18\x01 \x03(\x0b\x32\x1d.Records.RecordsUnshareFolder\x12*\n\x05users\x18\x02 \x03(\x0b\x32\x1b.Records.RecordsUnshareUser\"\x86\x01\n\x16RecordsUnshareResponse\x12:\n\rsharedFolders\x18\x01 \x03(\x0b\x32#.Records.RecordsUnshareFolderStatus\x12\x30\n\x05users\x18\x02 \x03(\x0b\x32!.Records.RecordsUnshareUserStatus\"B\n\x14RecordsUnshareFolder\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x17\n\x0fsharedFolderUid\x18\x02 \x01(\x0c\";\n\x12RecordsUnshareUser\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x12\n\naccountUid\x18\x02 \x01(\x0c\"H\n\x1aRecordsUnshareFolderStatus\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x17\n\x0fsharedFolderUid\x18\x02 \x01(\x0c\"A\n\x18RecordsUnshareUserStatus\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x12\n\naccountUid\x18\x02 \x01(\x0c\"[\n\x1aTimedAccessCallbackPayload\x12=\n\x15timeLimitedAccessType\x18\x01 \x01(\x0e\x32\x1e.Records.TimeLimitedAccessType\"\xfd\x01\n\x18TimeLimitedAccessRequest\x12\x12\n\naccountUid\x18\x01 \x03(\x0c\x12\x0f\n\x07teamUid\x18\x02 \x03(\x0c\x12\x11\n\trecordUid\x18\x03 \x03(\x0c\x12\x17\n\x0fsharedObjectUid\x18\x04 \x01(\x0c\x12=\n\x15timeLimitedAccessType\x18\x05 \x01(\x0e\x32\x1e.Records.TimeLimitedAccessType\x12\x12\n\nexpiration\x18\x06 \x01(\x03\x12=\n\x15timerNotificationType\x18\x07 \x01(\x0e\x32\x1e.Records.TimerNotificationType\"7\n\x17TimeLimitedAccessStatus\x12\x0b\n\x03uid\x18\x01 \x01(\x0c\x12\x0f\n\x07message\x18\x02 \x01(\t\"\xe3\x01\n\x19TimeLimitedAccessResponse\x12\x10\n\x08revision\x18\x01 \x01(\x03\x12:\n\x10userAccessStatus\x18\x02 \x03(\x0b\x32 .Records.TimeLimitedAccessStatus\x12:\n\x10teamAccessStatus\x18\x03 \x03(\x0b\x32 .Records.TimeLimitedAccessStatus\x12<\n\x12recordAccessStatus\x18\x04 \x03(\x0b\x32 .Records.TimeLimitedAccessStatus*h\n\x0fRecordTypeScope\x12\x0f\n\x0bRT_STANDARD\x10\x00\x12\x0b\n\x07RT_USER\x10\x01\x12\x11\n\rRT_ENTERPRISE\x10\x02\x12\n\n\x06RT_PAM\x10\x03\x12\x18\n\x14RT_PAM_CONFIGURATION\x10\x04*\xd1\x01\n\rRecordKeyType\x12\n\n\x06NO_KEY\x10\x00\x12\x19\n\x15\x45NCRYPTED_BY_DATA_KEY\x10\x01\x12\x1b\n\x17\x45NCRYPTED_BY_PUBLIC_KEY\x10\x02\x12\x1d\n\x19\x45NCRYPTED_BY_DATA_KEY_GCM\x10\x03\x12\x1f\n\x1b\x45NCRYPTED_BY_PUBLIC_KEY_ECC\x10\x04\x12\x1d\n\x19\x45NCRYPTED_BY_ROOT_KEY_CBC\x10\x05\x12\x1d\n\x19\x45NCRYPTED_BY_ROOT_KEY_GCM\x10\x06*P\n\x10RecordFolderType\x12\x0f\n\x0buser_folder\x10\x00\x12\x11\n\rshared_folder\x10\x01\x12\x18\n\x14shared_folder_folder\x10\x02*\xec\x02\n\x12RecordModifyResult\x12\x0e\n\nRS_SUCCESS\x10\x00\x12\x12\n\x0eRS_OUT_OF_SYNC\x10\x01\x12\x14\n\x10RS_ACCESS_DENIED\x10\x02\x12\x13\n\x0fRS_SHARE_DENIED\x10\x03\x12\x14\n\x10RS_RECORD_EXISTS\x10\x04\x12\x1e\n\x1aRS_OLD_RECORD_VERSION_TYPE\x10\x05\x12\x1e\n\x1aRS_NEW_RECORD_VERSION_TYPE\x10\x06\x12\x16\n\x12RS_FILES_NOT_MATCH\x10\x07\x12\x1b\n\x17RS_RECORD_NOT_SHAREABLE\x10\x08\x12\x1f\n\x1bRS_ATTACHMENT_NOT_SHAREABLE\x10\t\x12\x19\n\x15RS_FILE_LIMIT_REACHED\x10\n\x12\x1a\n\x16RS_SIZE_EXCEEDED_LIMIT\x10\x0b\x12$\n RS_ONLY_OWNER_CAN_MODIFY_SCRIPTS\x10\x0c*-\n\rFileAddResult\x12\x0e\n\nFA_SUCCESS\x10\x00\x12\x0c\n\x08\x46\x41_ERROR\x10\x01*C\n\rFileGetResult\x12\x0e\n\nFG_SUCCESS\x10\x00\x12\x0c\n\x08\x46G_ERROR\x10\x01\x12\x14\n\x10\x46G_ACCESS_DENIED\x10\x02*J\n\x14RecordDetailsInclude\x12\x13\n\x0f\x44\x41TA_PLUS_SHARE\x10\x00\x12\r\n\tDATA_ONLY\x10\x01\x12\x0e\n\nSHARE_ONLY\x10\x02*b\n\x19\x43heckShareAdminObjectType\x12\x19\n\x15\x43HECK_SA_INVALID_TYPE\x10\x00\x12\x12\n\x0e\x43HECK_SA_ON_SF\x10\x01\x12\x16\n\x12\x43HECK_SA_ON_RECORD\x10\x02*1\n\x0bShareStatus\x12\n\n\x06\x41\x43TIVE\x10\x00\x12\t\n\x05\x42LOCK\x10\x01\x12\x0b\n\x07INVITED\x10\x02*:\n\x15RecordTransactionType\x12\x0f\n\x0bRTT_GENERAL\x10\x00\x12\x10\n\x0cRTT_ROTATION\x10\x01*\xdc\x02\n\x15TimeLimitedAccessType\x12$\n INVALID_TIME_LIMITED_ACCESS_TYPE\x10\x00\x12\x19\n\x15USER_ACCESS_TO_RECORD\x10\x01\x12\'\n#USER_OR_TEAM_ACCESS_TO_SHAREDFOLDER\x10\x02\x12!\n\x1dRECORD_ACCESS_TO_SHAREDFOLDER\x10\x03\x12\x1f\n\x1bUSER_ACCESS_TO_SHAREDFOLDER\x10\x04\x12\x1f\n\x1bTEAM_ACCESS_TO_SHAREDFOLDER\x10\x05\x12\x1b\n\x17RECORD_ACCESS_TO_FOLDER\x10\x06\x12\x19\n\x15USER_ACCESS_TO_FOLDER\x10\x07\x12\x19\n\x15TEAM_ACCESS_TO_FOLDER\x10\x08\x12!\n\x1dUSER_OR_TEAM_ACCESS_TO_FOLDER\x10\t*\\\n\x15TimerNotificationType\x12\x14\n\x10NOTIFICATION_OFF\x10\x00\x12\x10\n\x0cNOTIFY_OWNER\x10\x01\x12\x1b\n\x17NOTIFY_PRIVILEGED_USERS\x10\x02\x42#\n\x18\x63om.keepersecurity.protoB\x07Recordsb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'record_pb2', _globals) -if _descriptor._USE_C_DESCRIPTORS == False: - _globals['DESCRIPTOR']._options = None +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None _globals['DESCRIPTOR']._serialized_options = b'\n\030com.keepersecurity.protoB\007Records' - _globals['_RECORDTYPESCOPE']._serialized_start=9466 - _globals['_RECORDTYPESCOPE']._serialized_end=9570 - _globals['_RECORDKEYTYPE']._serialized_start=9573 - _globals['_RECORDKEYTYPE']._serialized_end=9782 - _globals['_RECORDFOLDERTYPE']._serialized_start=9784 - _globals['_RECORDFOLDERTYPE']._serialized_end=9864 - _globals['_RECORDMODIFYRESULT']._serialized_start=9867 - _globals['_RECORDMODIFYRESULT']._serialized_end=10231 - _globals['_FILEADDRESULT']._serialized_start=10233 - _globals['_FILEADDRESULT']._serialized_end=10278 - _globals['_FILEGETRESULT']._serialized_start=10280 - _globals['_FILEGETRESULT']._serialized_end=10347 - _globals['_RECORDDETAILSINCLUDE']._serialized_start=10349 - _globals['_RECORDDETAILSINCLUDE']._serialized_end=10423 - _globals['_CHECKSHAREADMINOBJECTTYPE']._serialized_start=10425 - _globals['_CHECKSHAREADMINOBJECTTYPE']._serialized_end=10523 - _globals['_SHARESTATUS']._serialized_start=10525 - _globals['_SHARESTATUS']._serialized_end=10574 - _globals['_RECORDTRANSACTIONTYPE']._serialized_start=10576 - _globals['_RECORDTRANSACTIONTYPE']._serialized_end=10634 - _globals['_TIMELIMITEDACCESSTYPE']._serialized_start=10637 - _globals['_TIMELIMITEDACCESSTYPE']._serialized_end=10867 - _globals['_TIMERNOTIFICATIONTYPE']._serialized_start=10869 - _globals['_TIMERNOTIFICATIONTYPE']._serialized_end=10961 + _globals['_RECORDTYPESCOPE']._serialized_start=9490 + _globals['_RECORDTYPESCOPE']._serialized_end=9594 + _globals['_RECORDKEYTYPE']._serialized_start=9597 + _globals['_RECORDKEYTYPE']._serialized_end=9806 + _globals['_RECORDFOLDERTYPE']._serialized_start=9808 + _globals['_RECORDFOLDERTYPE']._serialized_end=9888 + _globals['_RECORDMODIFYRESULT']._serialized_start=9891 + _globals['_RECORDMODIFYRESULT']._serialized_end=10255 + _globals['_FILEADDRESULT']._serialized_start=10257 + _globals['_FILEADDRESULT']._serialized_end=10302 + _globals['_FILEGETRESULT']._serialized_start=10304 + _globals['_FILEGETRESULT']._serialized_end=10371 + _globals['_RECORDDETAILSINCLUDE']._serialized_start=10373 + _globals['_RECORDDETAILSINCLUDE']._serialized_end=10447 + _globals['_CHECKSHAREADMINOBJECTTYPE']._serialized_start=10449 + _globals['_CHECKSHAREADMINOBJECTTYPE']._serialized_end=10547 + _globals['_SHARESTATUS']._serialized_start=10549 + _globals['_SHARESTATUS']._serialized_end=10598 + _globals['_RECORDTRANSACTIONTYPE']._serialized_start=10600 + _globals['_RECORDTRANSACTIONTYPE']._serialized_end=10658 + _globals['_TIMELIMITEDACCESSTYPE']._serialized_start=10661 + _globals['_TIMELIMITEDACCESSTYPE']._serialized_end=11009 + _globals['_TIMERNOTIFICATIONTYPE']._serialized_start=11011 + _globals['_TIMERNOTIFICATIONTYPE']._serialized_end=11103 _globals['_RECORDTYPE']._serialized_start=25 _globals['_RECORDTYPE']._serialized_end=117 _globals['_RECORDTYPESREQUEST']._serialized_start=119 @@ -156,37 +157,37 @@ _globals['_GETSHAREOBJECTSRESPONSE']._serialized_start=7267 _globals['_GETSHAREOBJECTSRESPONSE']._serialized_end=7626 _globals['_SHAREUSER']._serialized_start=7629 - _globals['_SHAREUSER']._serialized_end=7794 - _globals['_SHARETEAM']._serialized_start=7796 - _globals['_SHARETEAM']._serialized_end=7864 - _globals['_SHAREENTERPRISE']._serialized_start=7866 - _globals['_SHAREENTERPRISE']._serialized_end=7929 - _globals['_RECORDSONWERSHIPTRANSFERREQUEST']._serialized_start=7931 - _globals['_RECORDSONWERSHIPTRANSFERREQUEST']._serialized_end=8014 - _globals['_TRANSFERRECORD']._serialized_start=8016 - _globals['_TRANSFERRECORD']._serialized_end=8107 - _globals['_RECORDSONWERSHIPTRANSFERRESPONSE']._serialized_start=8109 - _globals['_RECORDSONWERSHIPTRANSFERRESPONSE']._serialized_end=8204 - _globals['_TRANSFERRECORDSTATUS']._serialized_start=8206 - _globals['_TRANSFERRECORDSTATUS']._serialized_end=8298 - _globals['_RECORDSUNSHAREREQUEST']._serialized_start=8300 - _globals['_RECORDSUNSHAREREQUEST']._serialized_end=8421 - _globals['_RECORDSUNSHARERESPONSE']._serialized_start=8424 - _globals['_RECORDSUNSHARERESPONSE']._serialized_end=8558 - _globals['_RECORDSUNSHAREFOLDER']._serialized_start=8560 - _globals['_RECORDSUNSHAREFOLDER']._serialized_end=8626 - _globals['_RECORDSUNSHAREUSER']._serialized_start=8628 - _globals['_RECORDSUNSHAREUSER']._serialized_end=8687 - _globals['_RECORDSUNSHAREFOLDERSTATUS']._serialized_start=8689 - _globals['_RECORDSUNSHAREFOLDERSTATUS']._serialized_end=8761 - _globals['_RECORDSUNSHAREUSERSTATUS']._serialized_start=8763 - _globals['_RECORDSUNSHAREUSERSTATUS']._serialized_end=8828 - _globals['_TIMEDACCESSCALLBACKPAYLOAD']._serialized_start=8830 - _globals['_TIMEDACCESSCALLBACKPAYLOAD']._serialized_end=8921 - _globals['_TIMELIMITEDACCESSREQUEST']._serialized_start=8924 - _globals['_TIMELIMITEDACCESSREQUEST']._serialized_end=9177 - _globals['_TIMELIMITEDACCESSSTATUS']._serialized_start=9179 - _globals['_TIMELIMITEDACCESSSTATUS']._serialized_end=9234 - _globals['_TIMELIMITEDACCESSRESPONSE']._serialized_start=9237 - _globals['_TIMELIMITEDACCESSRESPONSE']._serialized_end=9464 + _globals['_SHAREUSER']._serialized_end=7818 + _globals['_SHARETEAM']._serialized_start=7820 + _globals['_SHARETEAM']._serialized_end=7888 + _globals['_SHAREENTERPRISE']._serialized_start=7890 + _globals['_SHAREENTERPRISE']._serialized_end=7953 + _globals['_RECORDSONWERSHIPTRANSFERREQUEST']._serialized_start=7955 + _globals['_RECORDSONWERSHIPTRANSFERREQUEST']._serialized_end=8038 + _globals['_TRANSFERRECORD']._serialized_start=8040 + _globals['_TRANSFERRECORD']._serialized_end=8131 + _globals['_RECORDSONWERSHIPTRANSFERRESPONSE']._serialized_start=8133 + _globals['_RECORDSONWERSHIPTRANSFERRESPONSE']._serialized_end=8228 + _globals['_TRANSFERRECORDSTATUS']._serialized_start=8230 + _globals['_TRANSFERRECORDSTATUS']._serialized_end=8322 + _globals['_RECORDSUNSHAREREQUEST']._serialized_start=8324 + _globals['_RECORDSUNSHAREREQUEST']._serialized_end=8445 + _globals['_RECORDSUNSHARERESPONSE']._serialized_start=8448 + _globals['_RECORDSUNSHARERESPONSE']._serialized_end=8582 + _globals['_RECORDSUNSHAREFOLDER']._serialized_start=8584 + _globals['_RECORDSUNSHAREFOLDER']._serialized_end=8650 + _globals['_RECORDSUNSHAREUSER']._serialized_start=8652 + _globals['_RECORDSUNSHAREUSER']._serialized_end=8711 + _globals['_RECORDSUNSHAREFOLDERSTATUS']._serialized_start=8713 + _globals['_RECORDSUNSHAREFOLDERSTATUS']._serialized_end=8785 + _globals['_RECORDSUNSHAREUSERSTATUS']._serialized_start=8787 + _globals['_RECORDSUNSHAREUSERSTATUS']._serialized_end=8852 + _globals['_TIMEDACCESSCALLBACKPAYLOAD']._serialized_start=8854 + _globals['_TIMEDACCESSCALLBACKPAYLOAD']._serialized_end=8945 + _globals['_TIMELIMITEDACCESSREQUEST']._serialized_start=8948 + _globals['_TIMELIMITEDACCESSREQUEST']._serialized_end=9201 + _globals['_TIMELIMITEDACCESSSTATUS']._serialized_start=9203 + _globals['_TIMELIMITEDACCESSSTATUS']._serialized_end=9258 + _globals['_TIMELIMITEDACCESSRESPONSE']._serialized_start=9261 + _globals['_TIMELIMITEDACCESSRESPONSE']._serialized_end=9488 # @@protoc_insertion_point(module_scope) diff --git a/keepercommander/proto/record_pb2.pyi b/keepercommander/proto/record_pb2.pyi index f91f13aea..6dfdc6fa3 100644 --- a/keepercommander/proto/record_pb2.pyi +++ b/keepercommander/proto/record_pb2.pyi @@ -2,12 +2,13 @@ from google.protobuf.internal import containers as _containers from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message -from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union +from collections.abc import Iterable as _Iterable, Mapping as _Mapping +from typing import ClassVar as _ClassVar, Optional as _Optional, Union as _Union DESCRIPTOR: _descriptor.FileDescriptor class RecordTypeScope(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = [] + __slots__ = () RT_STANDARD: _ClassVar[RecordTypeScope] RT_USER: _ClassVar[RecordTypeScope] RT_ENTERPRISE: _ClassVar[RecordTypeScope] @@ -15,7 +16,7 @@ class RecordTypeScope(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): RT_PAM_CONFIGURATION: _ClassVar[RecordTypeScope] class RecordKeyType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = [] + __slots__ = () NO_KEY: _ClassVar[RecordKeyType] ENCRYPTED_BY_DATA_KEY: _ClassVar[RecordKeyType] ENCRYPTED_BY_PUBLIC_KEY: _ClassVar[RecordKeyType] @@ -25,13 +26,13 @@ class RecordKeyType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): ENCRYPTED_BY_ROOT_KEY_GCM: _ClassVar[RecordKeyType] class RecordFolderType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = [] + __slots__ = () user_folder: _ClassVar[RecordFolderType] shared_folder: _ClassVar[RecordFolderType] shared_folder_folder: _ClassVar[RecordFolderType] class RecordModifyResult(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = [] + __slots__ = () RS_SUCCESS: _ClassVar[RecordModifyResult] RS_OUT_OF_SYNC: _ClassVar[RecordModifyResult] RS_ACCESS_DENIED: _ClassVar[RecordModifyResult] @@ -47,50 +48,54 @@ class RecordModifyResult(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): RS_ONLY_OWNER_CAN_MODIFY_SCRIPTS: _ClassVar[RecordModifyResult] class FileAddResult(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = [] + __slots__ = () FA_SUCCESS: _ClassVar[FileAddResult] FA_ERROR: _ClassVar[FileAddResult] class FileGetResult(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = [] + __slots__ = () FG_SUCCESS: _ClassVar[FileGetResult] FG_ERROR: _ClassVar[FileGetResult] FG_ACCESS_DENIED: _ClassVar[FileGetResult] class RecordDetailsInclude(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = [] + __slots__ = () DATA_PLUS_SHARE: _ClassVar[RecordDetailsInclude] DATA_ONLY: _ClassVar[RecordDetailsInclude] SHARE_ONLY: _ClassVar[RecordDetailsInclude] class CheckShareAdminObjectType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = [] + __slots__ = () CHECK_SA_INVALID_TYPE: _ClassVar[CheckShareAdminObjectType] CHECK_SA_ON_SF: _ClassVar[CheckShareAdminObjectType] CHECK_SA_ON_RECORD: _ClassVar[CheckShareAdminObjectType] class ShareStatus(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = [] + __slots__ = () ACTIVE: _ClassVar[ShareStatus] BLOCK: _ClassVar[ShareStatus] INVITED: _ClassVar[ShareStatus] class RecordTransactionType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = [] + __slots__ = () RTT_GENERAL: _ClassVar[RecordTransactionType] RTT_ROTATION: _ClassVar[RecordTransactionType] class TimeLimitedAccessType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = [] + __slots__ = () INVALID_TIME_LIMITED_ACCESS_TYPE: _ClassVar[TimeLimitedAccessType] USER_ACCESS_TO_RECORD: _ClassVar[TimeLimitedAccessType] USER_OR_TEAM_ACCESS_TO_SHAREDFOLDER: _ClassVar[TimeLimitedAccessType] RECORD_ACCESS_TO_SHAREDFOLDER: _ClassVar[TimeLimitedAccessType] USER_ACCESS_TO_SHAREDFOLDER: _ClassVar[TimeLimitedAccessType] TEAM_ACCESS_TO_SHAREDFOLDER: _ClassVar[TimeLimitedAccessType] + RECORD_ACCESS_TO_FOLDER: _ClassVar[TimeLimitedAccessType] + USER_ACCESS_TO_FOLDER: _ClassVar[TimeLimitedAccessType] + TEAM_ACCESS_TO_FOLDER: _ClassVar[TimeLimitedAccessType] + USER_OR_TEAM_ACCESS_TO_FOLDER: _ClassVar[TimeLimitedAccessType] class TimerNotificationType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = [] + __slots__ = () NOTIFICATION_OFF: _ClassVar[TimerNotificationType] NOTIFY_OWNER: _ClassVar[TimerNotificationType] NOTIFY_PRIVILEGED_USERS: _ClassVar[TimerNotificationType] @@ -144,12 +149,16 @@ USER_OR_TEAM_ACCESS_TO_SHAREDFOLDER: TimeLimitedAccessType RECORD_ACCESS_TO_SHAREDFOLDER: TimeLimitedAccessType USER_ACCESS_TO_SHAREDFOLDER: TimeLimitedAccessType TEAM_ACCESS_TO_SHAREDFOLDER: TimeLimitedAccessType +RECORD_ACCESS_TO_FOLDER: TimeLimitedAccessType +USER_ACCESS_TO_FOLDER: TimeLimitedAccessType +TEAM_ACCESS_TO_FOLDER: TimeLimitedAccessType +USER_OR_TEAM_ACCESS_TO_FOLDER: TimeLimitedAccessType NOTIFICATION_OFF: TimerNotificationType NOTIFY_OWNER: TimerNotificationType NOTIFY_PRIVILEGED_USERS: TimerNotificationType class RecordType(_message.Message): - __slots__ = ["recordTypeId", "content", "scope"] + __slots__ = ("recordTypeId", "content", "scope") RECORDTYPEID_FIELD_NUMBER: _ClassVar[int] CONTENT_FIELD_NUMBER: _ClassVar[int] SCOPE_FIELD_NUMBER: _ClassVar[int] @@ -159,7 +168,7 @@ class RecordType(_message.Message): def __init__(self, recordTypeId: _Optional[int] = ..., content: _Optional[str] = ..., scope: _Optional[_Union[RecordTypeScope, str]] = ...) -> None: ... class RecordTypesRequest(_message.Message): - __slots__ = ["standard", "user", "enterprise", "pam"] + __slots__ = ("standard", "user", "enterprise", "pam") STANDARD_FIELD_NUMBER: _ClassVar[int] USER_FIELD_NUMBER: _ClassVar[int] ENTERPRISE_FIELD_NUMBER: _ClassVar[int] @@ -168,10 +177,10 @@ class RecordTypesRequest(_message.Message): user: bool enterprise: bool pam: bool - def __init__(self, standard: bool = ..., user: bool = ..., enterprise: bool = ..., pam: bool = ...) -> None: ... + def __init__(self, standard: _Optional[bool] = ..., user: _Optional[bool] = ..., enterprise: _Optional[bool] = ..., pam: _Optional[bool] = ...) -> None: ... class RecordTypesResponse(_message.Message): - __slots__ = ["recordTypes", "standardCounter", "userCounter", "enterpriseCounter", "pamCounter"] + __slots__ = ("recordTypes", "standardCounter", "userCounter", "enterpriseCounter", "pamCounter") RECORDTYPES_FIELD_NUMBER: _ClassVar[int] STANDARDCOUNTER_FIELD_NUMBER: _ClassVar[int] USERCOUNTER_FIELD_NUMBER: _ClassVar[int] @@ -185,7 +194,7 @@ class RecordTypesResponse(_message.Message): def __init__(self, recordTypes: _Optional[_Iterable[_Union[RecordType, _Mapping]]] = ..., standardCounter: _Optional[int] = ..., userCounter: _Optional[int] = ..., enterpriseCounter: _Optional[int] = ..., pamCounter: _Optional[int] = ...) -> None: ... class RecordTypeModifyResponse(_message.Message): - __slots__ = ["recordTypeId", "counter"] + __slots__ = ("recordTypeId", "counter") RECORDTYPEID_FIELD_NUMBER: _ClassVar[int] COUNTER_FIELD_NUMBER: _ClassVar[int] recordTypeId: int @@ -193,7 +202,7 @@ class RecordTypeModifyResponse(_message.Message): def __init__(self, recordTypeId: _Optional[int] = ..., counter: _Optional[int] = ...) -> None: ... class RecordsGetRequest(_message.Message): - __slots__ = ["record_uids", "client_time"] + __slots__ = ("record_uids", "client_time") RECORD_UIDS_FIELD_NUMBER: _ClassVar[int] CLIENT_TIME_FIELD_NUMBER: _ClassVar[int] record_uids: _containers.RepeatedScalarFieldContainer[bytes] @@ -201,7 +210,7 @@ class RecordsGetRequest(_message.Message): def __init__(self, record_uids: _Optional[_Iterable[bytes]] = ..., client_time: _Optional[int] = ...) -> None: ... class Record(_message.Message): - __slots__ = ["record_uid", "record_key", "record_key_type", "data", "extra", "version", "client_modified_time", "revision", "file_ids"] + __slots__ = ("record_uid", "record_key", "record_key_type", "data", "extra", "version", "client_modified_time", "revision", "file_ids") RECORD_UID_FIELD_NUMBER: _ClassVar[int] RECORD_KEY_FIELD_NUMBER: _ClassVar[int] RECORD_KEY_TYPE_FIELD_NUMBER: _ClassVar[int] @@ -223,7 +232,7 @@ class Record(_message.Message): def __init__(self, record_uid: _Optional[bytes] = ..., record_key: _Optional[bytes] = ..., record_key_type: _Optional[_Union[RecordKeyType, str]] = ..., data: _Optional[bytes] = ..., extra: _Optional[bytes] = ..., version: _Optional[int] = ..., client_modified_time: _Optional[int] = ..., revision: _Optional[int] = ..., file_ids: _Optional[_Iterable[bytes]] = ...) -> None: ... class FolderRecordKey(_message.Message): - __slots__ = ["folder_uid", "record_uid", "record_key"] + __slots__ = ("folder_uid", "record_uid", "record_key") FOLDER_UID_FIELD_NUMBER: _ClassVar[int] RECORD_UID_FIELD_NUMBER: _ClassVar[int] RECORD_KEY_FIELD_NUMBER: _ClassVar[int] @@ -233,7 +242,7 @@ class FolderRecordKey(_message.Message): def __init__(self, folder_uid: _Optional[bytes] = ..., record_uid: _Optional[bytes] = ..., record_key: _Optional[bytes] = ...) -> None: ... class Folder(_message.Message): - __slots__ = ["folder_uid", "folder_key", "folder_key_type"] + __slots__ = ("folder_uid", "folder_key", "folder_key_type") FOLDER_UID_FIELD_NUMBER: _ClassVar[int] FOLDER_KEY_FIELD_NUMBER: _ClassVar[int] FOLDER_KEY_TYPE_FIELD_NUMBER: _ClassVar[int] @@ -243,7 +252,7 @@ class Folder(_message.Message): def __init__(self, folder_uid: _Optional[bytes] = ..., folder_key: _Optional[bytes] = ..., folder_key_type: _Optional[_Union[RecordKeyType, str]] = ...) -> None: ... class Team(_message.Message): - __slots__ = ["team_uid", "team_key", "team_private_key", "team_key_type", "folders"] + __slots__ = ("team_uid", "team_key", "team_private_key", "team_key_type", "folders") TEAM_UID_FIELD_NUMBER: _ClassVar[int] TEAM_KEY_FIELD_NUMBER: _ClassVar[int] TEAM_PRIVATE_KEY_FIELD_NUMBER: _ClassVar[int] @@ -257,7 +266,7 @@ class Team(_message.Message): def __init__(self, team_uid: _Optional[bytes] = ..., team_key: _Optional[bytes] = ..., team_private_key: _Optional[bytes] = ..., team_key_type: _Optional[_Union[RecordKeyType, str]] = ..., folders: _Optional[_Iterable[_Union[Folder, _Mapping]]] = ...) -> None: ... class RecordsGetResponse(_message.Message): - __slots__ = ["records", "folder_record_keys", "folders", "teams"] + __slots__ = ("records", "folder_record_keys", "folders", "teams") RECORDS_FIELD_NUMBER: _ClassVar[int] FOLDER_RECORD_KEYS_FIELD_NUMBER: _ClassVar[int] FOLDERS_FIELD_NUMBER: _ClassVar[int] @@ -269,7 +278,7 @@ class RecordsGetResponse(_message.Message): def __init__(self, records: _Optional[_Iterable[_Union[Record, _Mapping]]] = ..., folder_record_keys: _Optional[_Iterable[_Union[FolderRecordKey, _Mapping]]] = ..., folders: _Optional[_Iterable[_Union[Folder, _Mapping]]] = ..., teams: _Optional[_Iterable[_Union[Team, _Mapping]]] = ...) -> None: ... class RecordLink(_message.Message): - __slots__ = ["record_uid", "record_key"] + __slots__ = ("record_uid", "record_key") RECORD_UID_FIELD_NUMBER: _ClassVar[int] RECORD_KEY_FIELD_NUMBER: _ClassVar[int] record_uid: bytes @@ -277,7 +286,7 @@ class RecordLink(_message.Message): def __init__(self, record_uid: _Optional[bytes] = ..., record_key: _Optional[bytes] = ...) -> None: ... class RecordAudit(_message.Message): - __slots__ = ["version", "data"] + __slots__ = ("version", "data") VERSION_FIELD_NUMBER: _ClassVar[int] DATA_FIELD_NUMBER: _ClassVar[int] version: int @@ -285,19 +294,19 @@ class RecordAudit(_message.Message): def __init__(self, version: _Optional[int] = ..., data: _Optional[bytes] = ...) -> None: ... class SecurityData(_message.Message): - __slots__ = ["data"] + __slots__ = ("data",) DATA_FIELD_NUMBER: _ClassVar[int] data: bytes def __init__(self, data: _Optional[bytes] = ...) -> None: ... class SecurityScoreData(_message.Message): - __slots__ = ["data"] + __slots__ = ("data",) DATA_FIELD_NUMBER: _ClassVar[int] data: bytes def __init__(self, data: _Optional[bytes] = ...) -> None: ... class RecordAdd(_message.Message): - __slots__ = ["record_uid", "record_key", "client_modified_time", "data", "non_shared_data", "folder_type", "folder_uid", "folder_key", "record_links", "audit", "securityData", "securityScoreData"] + __slots__ = ("record_uid", "record_key", "client_modified_time", "data", "non_shared_data", "folder_type", "folder_uid", "folder_key", "record_links", "audit", "securityData", "securityScoreData") RECORD_UID_FIELD_NUMBER: _ClassVar[int] RECORD_KEY_FIELD_NUMBER: _ClassVar[int] CLIENT_MODIFIED_TIME_FIELD_NUMBER: _ClassVar[int] @@ -325,7 +334,7 @@ class RecordAdd(_message.Message): def __init__(self, record_uid: _Optional[bytes] = ..., record_key: _Optional[bytes] = ..., client_modified_time: _Optional[int] = ..., data: _Optional[bytes] = ..., non_shared_data: _Optional[bytes] = ..., folder_type: _Optional[_Union[RecordFolderType, str]] = ..., folder_uid: _Optional[bytes] = ..., folder_key: _Optional[bytes] = ..., record_links: _Optional[_Iterable[_Union[RecordLink, _Mapping]]] = ..., audit: _Optional[_Union[RecordAudit, _Mapping]] = ..., securityData: _Optional[_Union[SecurityData, _Mapping]] = ..., securityScoreData: _Optional[_Union[SecurityScoreData, _Mapping]] = ...) -> None: ... class RecordsAddRequest(_message.Message): - __slots__ = ["records", "client_time", "security_data_key_type"] + __slots__ = ("records", "client_time", "security_data_key_type") RECORDS_FIELD_NUMBER: _ClassVar[int] CLIENT_TIME_FIELD_NUMBER: _ClassVar[int] SECURITY_DATA_KEY_TYPE_FIELD_NUMBER: _ClassVar[int] @@ -335,7 +344,7 @@ class RecordsAddRequest(_message.Message): def __init__(self, records: _Optional[_Iterable[_Union[RecordAdd, _Mapping]]] = ..., client_time: _Optional[int] = ..., security_data_key_type: _Optional[_Union[RecordKeyType, str]] = ...) -> None: ... class RecordUpdate(_message.Message): - __slots__ = ["record_uid", "client_modified_time", "revision", "data", "non_shared_data", "record_links_add", "record_links_remove", "audit", "securityData", "securityScoreData"] + __slots__ = ("record_uid", "client_modified_time", "revision", "data", "non_shared_data", "record_links_add", "record_links_remove", "audit", "securityData", "securityScoreData") RECORD_UID_FIELD_NUMBER: _ClassVar[int] CLIENT_MODIFIED_TIME_FIELD_NUMBER: _ClassVar[int] REVISION_FIELD_NUMBER: _ClassVar[int] @@ -359,7 +368,7 @@ class RecordUpdate(_message.Message): def __init__(self, record_uid: _Optional[bytes] = ..., client_modified_time: _Optional[int] = ..., revision: _Optional[int] = ..., data: _Optional[bytes] = ..., non_shared_data: _Optional[bytes] = ..., record_links_add: _Optional[_Iterable[_Union[RecordLink, _Mapping]]] = ..., record_links_remove: _Optional[_Iterable[bytes]] = ..., audit: _Optional[_Union[RecordAudit, _Mapping]] = ..., securityData: _Optional[_Union[SecurityData, _Mapping]] = ..., securityScoreData: _Optional[_Union[SecurityScoreData, _Mapping]] = ...) -> None: ... class RecordsUpdateRequest(_message.Message): - __slots__ = ["records", "client_time", "security_data_key_type"] + __slots__ = ("records", "client_time", "security_data_key_type") RECORDS_FIELD_NUMBER: _ClassVar[int] CLIENT_TIME_FIELD_NUMBER: _ClassVar[int] SECURITY_DATA_KEY_TYPE_FIELD_NUMBER: _ClassVar[int] @@ -369,7 +378,7 @@ class RecordsUpdateRequest(_message.Message): def __init__(self, records: _Optional[_Iterable[_Union[RecordUpdate, _Mapping]]] = ..., client_time: _Optional[int] = ..., security_data_key_type: _Optional[_Union[RecordKeyType, str]] = ...) -> None: ... class RecordFileForConversion(_message.Message): - __slots__ = ["record_uid", "file_file_id", "thumb_file_id", "data", "record_key", "link_key"] + __slots__ = ("record_uid", "file_file_id", "thumb_file_id", "data", "record_key", "link_key") RECORD_UID_FIELD_NUMBER: _ClassVar[int] FILE_FILE_ID_FIELD_NUMBER: _ClassVar[int] THUMB_FILE_ID_FIELD_NUMBER: _ClassVar[int] @@ -385,7 +394,7 @@ class RecordFileForConversion(_message.Message): def __init__(self, record_uid: _Optional[bytes] = ..., file_file_id: _Optional[str] = ..., thumb_file_id: _Optional[str] = ..., data: _Optional[bytes] = ..., record_key: _Optional[bytes] = ..., link_key: _Optional[bytes] = ...) -> None: ... class RecordFolderForConversion(_message.Message): - __slots__ = ["folder_uid", "record_folder_key"] + __slots__ = ("folder_uid", "record_folder_key") FOLDER_UID_FIELD_NUMBER: _ClassVar[int] RECORD_FOLDER_KEY_FIELD_NUMBER: _ClassVar[int] folder_uid: bytes @@ -393,7 +402,7 @@ class RecordFolderForConversion(_message.Message): def __init__(self, folder_uid: _Optional[bytes] = ..., record_folder_key: _Optional[bytes] = ...) -> None: ... class RecordConvertToV3(_message.Message): - __slots__ = ["record_uid", "client_modified_time", "revision", "data", "non_shared_data", "audit", "record_file", "folder_key"] + __slots__ = ("record_uid", "client_modified_time", "revision", "data", "non_shared_data", "audit", "record_file", "folder_key") RECORD_UID_FIELD_NUMBER: _ClassVar[int] CLIENT_MODIFIED_TIME_FIELD_NUMBER: _ClassVar[int] REVISION_FIELD_NUMBER: _ClassVar[int] @@ -413,7 +422,7 @@ class RecordConvertToV3(_message.Message): def __init__(self, record_uid: _Optional[bytes] = ..., client_modified_time: _Optional[int] = ..., revision: _Optional[int] = ..., data: _Optional[bytes] = ..., non_shared_data: _Optional[bytes] = ..., audit: _Optional[_Union[RecordAudit, _Mapping]] = ..., record_file: _Optional[_Iterable[_Union[RecordFileForConversion, _Mapping]]] = ..., folder_key: _Optional[_Iterable[_Union[RecordFolderForConversion, _Mapping]]] = ...) -> None: ... class RecordsConvertToV3Request(_message.Message): - __slots__ = ["records", "client_time"] + __slots__ = ("records", "client_time") RECORDS_FIELD_NUMBER: _ClassVar[int] CLIENT_TIME_FIELD_NUMBER: _ClassVar[int] records: _containers.RepeatedCompositeFieldContainer[RecordConvertToV3] @@ -421,13 +430,13 @@ class RecordsConvertToV3Request(_message.Message): def __init__(self, records: _Optional[_Iterable[_Union[RecordConvertToV3, _Mapping]]] = ..., client_time: _Optional[int] = ...) -> None: ... class RecordsRemoveRequest(_message.Message): - __slots__ = ["records"] + __slots__ = ("records",) RECORDS_FIELD_NUMBER: _ClassVar[int] records: _containers.RepeatedScalarFieldContainer[bytes] def __init__(self, records: _Optional[_Iterable[bytes]] = ...) -> None: ... class RecordRevert(_message.Message): - __slots__ = ["record_uid", "revert_to_revision"] + __slots__ = ("record_uid", "revert_to_revision") RECORD_UID_FIELD_NUMBER: _ClassVar[int] REVERT_TO_REVISION_FIELD_NUMBER: _ClassVar[int] record_uid: bytes @@ -435,13 +444,13 @@ class RecordRevert(_message.Message): def __init__(self, record_uid: _Optional[bytes] = ..., revert_to_revision: _Optional[int] = ...) -> None: ... class RecordsRevertRequest(_message.Message): - __slots__ = ["records"] + __slots__ = ("records",) RECORDS_FIELD_NUMBER: _ClassVar[int] records: _containers.RepeatedCompositeFieldContainer[RecordRevert] def __init__(self, records: _Optional[_Iterable[_Union[RecordRevert, _Mapping]]] = ...) -> None: ... class RecordLinkError(_message.Message): - __slots__ = ["record_uid", "status", "message"] + __slots__ = ("record_uid", "status", "message") RECORD_UID_FIELD_NUMBER: _ClassVar[int] STATUS_FIELD_NUMBER: _ClassVar[int] MESSAGE_FIELD_NUMBER: _ClassVar[int] @@ -451,7 +460,7 @@ class RecordLinkError(_message.Message): def __init__(self, record_uid: _Optional[bytes] = ..., status: _Optional[_Union[RecordModifyResult, str]] = ..., message: _Optional[str] = ...) -> None: ... class RecordModifyStatus(_message.Message): - __slots__ = ["record_uid", "status", "message", "link_errors"] + __slots__ = ("record_uid", "status", "message", "link_errors") RECORD_UID_FIELD_NUMBER: _ClassVar[int] STATUS_FIELD_NUMBER: _ClassVar[int] MESSAGE_FIELD_NUMBER: _ClassVar[int] @@ -463,7 +472,7 @@ class RecordModifyStatus(_message.Message): def __init__(self, record_uid: _Optional[bytes] = ..., status: _Optional[_Union[RecordModifyResult, str]] = ..., message: _Optional[str] = ..., link_errors: _Optional[_Iterable[_Union[RecordLinkError, _Mapping]]] = ...) -> None: ... class RecordsModifyResponse(_message.Message): - __slots__ = ["records", "revision"] + __slots__ = ("records", "revision") RECORDS_FIELD_NUMBER: _ClassVar[int] REVISION_FIELD_NUMBER: _ClassVar[int] records: _containers.RepeatedCompositeFieldContainer[RecordModifyStatus] @@ -471,7 +480,7 @@ class RecordsModifyResponse(_message.Message): def __init__(self, records: _Optional[_Iterable[_Union[RecordModifyStatus, _Mapping]]] = ..., revision: _Optional[int] = ...) -> None: ... class RecordAddAuditData(_message.Message): - __slots__ = ["record_uid", "revision", "data", "version"] + __slots__ = ("record_uid", "revision", "data", "version") RECORD_UID_FIELD_NUMBER: _ClassVar[int] REVISION_FIELD_NUMBER: _ClassVar[int] DATA_FIELD_NUMBER: _ClassVar[int] @@ -483,13 +492,13 @@ class RecordAddAuditData(_message.Message): def __init__(self, record_uid: _Optional[bytes] = ..., revision: _Optional[int] = ..., data: _Optional[bytes] = ..., version: _Optional[int] = ...) -> None: ... class AddAuditDataRequest(_message.Message): - __slots__ = ["records"] + __slots__ = ("records",) RECORDS_FIELD_NUMBER: _ClassVar[int] records: _containers.RepeatedCompositeFieldContainer[RecordAddAuditData] def __init__(self, records: _Optional[_Iterable[_Union[RecordAddAuditData, _Mapping]]] = ...) -> None: ... class File(_message.Message): - __slots__ = ["record_uid", "record_key", "data", "fileSize", "thumbSize", "is_script"] + __slots__ = ("record_uid", "record_key", "data", "fileSize", "thumbSize", "is_script") RECORD_UID_FIELD_NUMBER: _ClassVar[int] RECORD_KEY_FIELD_NUMBER: _ClassVar[int] DATA_FIELD_NUMBER: _ClassVar[int] @@ -502,10 +511,10 @@ class File(_message.Message): fileSize: int thumbSize: int is_script: bool - def __init__(self, record_uid: _Optional[bytes] = ..., record_key: _Optional[bytes] = ..., data: _Optional[bytes] = ..., fileSize: _Optional[int] = ..., thumbSize: _Optional[int] = ..., is_script: bool = ...) -> None: ... + def __init__(self, record_uid: _Optional[bytes] = ..., record_key: _Optional[bytes] = ..., data: _Optional[bytes] = ..., fileSize: _Optional[int] = ..., thumbSize: _Optional[int] = ..., is_script: _Optional[bool] = ...) -> None: ... class FilesAddRequest(_message.Message): - __slots__ = ["files", "client_time"] + __slots__ = ("files", "client_time") FILES_FIELD_NUMBER: _ClassVar[int] CLIENT_TIME_FIELD_NUMBER: _ClassVar[int] files: _containers.RepeatedCompositeFieldContainer[File] @@ -513,7 +522,7 @@ class FilesAddRequest(_message.Message): def __init__(self, files: _Optional[_Iterable[_Union[File, _Mapping]]] = ..., client_time: _Optional[int] = ...) -> None: ... class FileAddStatus(_message.Message): - __slots__ = ["record_uid", "status", "url", "parameters", "thumbnail_parameters", "success_status_code"] + __slots__ = ("record_uid", "status", "url", "parameters", "thumbnail_parameters", "success_status_code") RECORD_UID_FIELD_NUMBER: _ClassVar[int] STATUS_FIELD_NUMBER: _ClassVar[int] URL_FIELD_NUMBER: _ClassVar[int] @@ -529,7 +538,7 @@ class FileAddStatus(_message.Message): def __init__(self, record_uid: _Optional[bytes] = ..., status: _Optional[_Union[FileAddResult, str]] = ..., url: _Optional[str] = ..., parameters: _Optional[str] = ..., thumbnail_parameters: _Optional[str] = ..., success_status_code: _Optional[int] = ...) -> None: ... class FilesAddResponse(_message.Message): - __slots__ = ["files", "revision"] + __slots__ = ("files", "revision") FILES_FIELD_NUMBER: _ClassVar[int] REVISION_FIELD_NUMBER: _ClassVar[int] files: _containers.RepeatedCompositeFieldContainer[FileAddStatus] @@ -537,17 +546,17 @@ class FilesAddResponse(_message.Message): def __init__(self, files: _Optional[_Iterable[_Union[FileAddStatus, _Mapping]]] = ..., revision: _Optional[int] = ...) -> None: ... class FilesGetRequest(_message.Message): - __slots__ = ["record_uids", "for_thumbnails", "emergency_access_account_owner"] + __slots__ = ("record_uids", "for_thumbnails", "emergency_access_account_owner") RECORD_UIDS_FIELD_NUMBER: _ClassVar[int] FOR_THUMBNAILS_FIELD_NUMBER: _ClassVar[int] EMERGENCY_ACCESS_ACCOUNT_OWNER_FIELD_NUMBER: _ClassVar[int] record_uids: _containers.RepeatedScalarFieldContainer[bytes] for_thumbnails: bool emergency_access_account_owner: str - def __init__(self, record_uids: _Optional[_Iterable[bytes]] = ..., for_thumbnails: bool = ..., emergency_access_account_owner: _Optional[str] = ...) -> None: ... + def __init__(self, record_uids: _Optional[_Iterable[bytes]] = ..., for_thumbnails: _Optional[bool] = ..., emergency_access_account_owner: _Optional[str] = ...) -> None: ... class FileGetStatus(_message.Message): - __slots__ = ["record_uid", "status", "url", "success_status_code", "fileKeyType"] + __slots__ = ("record_uid", "status", "url", "success_status_code", "fileKeyType") RECORD_UID_FIELD_NUMBER: _ClassVar[int] STATUS_FIELD_NUMBER: _ClassVar[int] URL_FIELD_NUMBER: _ClassVar[int] @@ -561,13 +570,13 @@ class FileGetStatus(_message.Message): def __init__(self, record_uid: _Optional[bytes] = ..., status: _Optional[_Union[FileGetResult, str]] = ..., url: _Optional[str] = ..., success_status_code: _Optional[int] = ..., fileKeyType: _Optional[_Union[RecordKeyType, str]] = ...) -> None: ... class FilesGetResponse(_message.Message): - __slots__ = ["files"] + __slots__ = ("files",) FILES_FIELD_NUMBER: _ClassVar[int] files: _containers.RepeatedCompositeFieldContainer[FileGetStatus] def __init__(self, files: _Optional[_Iterable[_Union[FileGetStatus, _Mapping]]] = ...) -> None: ... class ApplicationAddRequest(_message.Message): - __slots__ = ["app_uid", "record_key", "client_modified_time", "data", "audit"] + __slots__ = ("app_uid", "record_key", "client_modified_time", "data", "audit") APP_UID_FIELD_NUMBER: _ClassVar[int] RECORD_KEY_FIELD_NUMBER: _ClassVar[int] CLIENT_MODIFIED_TIME_FIELD_NUMBER: _ClassVar[int] @@ -581,7 +590,7 @@ class ApplicationAddRequest(_message.Message): def __init__(self, app_uid: _Optional[bytes] = ..., record_key: _Optional[bytes] = ..., client_modified_time: _Optional[int] = ..., data: _Optional[bytes] = ..., audit: _Optional[_Union[RecordAudit, _Mapping]] = ...) -> None: ... class GetRecordDataWithAccessInfoRequest(_message.Message): - __slots__ = ["clientTime", "recordUid", "recordDetailsInclude"] + __slots__ = ("clientTime", "recordUid", "recordDetailsInclude") CLIENTTIME_FIELD_NUMBER: _ClassVar[int] RECORDUID_FIELD_NUMBER: _ClassVar[int] RECORDDETAILSINCLUDE_FIELD_NUMBER: _ClassVar[int] @@ -591,7 +600,7 @@ class GetRecordDataWithAccessInfoRequest(_message.Message): def __init__(self, clientTime: _Optional[int] = ..., recordUid: _Optional[_Iterable[bytes]] = ..., recordDetailsInclude: _Optional[_Union[RecordDetailsInclude, str]] = ...) -> None: ... class UserPermission(_message.Message): - __slots__ = ["username", "owner", "shareAdmin", "sharable", "editable", "awaitingApproval", "expiration", "accountUid", "timerNotificationType", "rotateOnExpiration"] + __slots__ = ("username", "owner", "shareAdmin", "sharable", "editable", "awaitingApproval", "expiration", "accountUid", "timerNotificationType", "rotateOnExpiration") USERNAME_FIELD_NUMBER: _ClassVar[int] OWNER_FIELD_NUMBER: _ClassVar[int] SHAREADMIN_FIELD_NUMBER: _ClassVar[int] @@ -612,10 +621,10 @@ class UserPermission(_message.Message): accountUid: bytes timerNotificationType: TimerNotificationType rotateOnExpiration: bool - def __init__(self, username: _Optional[str] = ..., owner: bool = ..., shareAdmin: bool = ..., sharable: bool = ..., editable: bool = ..., awaitingApproval: bool = ..., expiration: _Optional[int] = ..., accountUid: _Optional[bytes] = ..., timerNotificationType: _Optional[_Union[TimerNotificationType, str]] = ..., rotateOnExpiration: bool = ...) -> None: ... + def __init__(self, username: _Optional[str] = ..., owner: _Optional[bool] = ..., shareAdmin: _Optional[bool] = ..., sharable: _Optional[bool] = ..., editable: _Optional[bool] = ..., awaitingApproval: _Optional[bool] = ..., expiration: _Optional[int] = ..., accountUid: _Optional[bytes] = ..., timerNotificationType: _Optional[_Union[TimerNotificationType, str]] = ..., rotateOnExpiration: _Optional[bool] = ...) -> None: ... class SharedFolderPermission(_message.Message): - __slots__ = ["sharedFolderUid", "resharable", "editable", "revision", "expiration", "timerNotificationType", "rotateOnExpiration"] + __slots__ = ("sharedFolderUid", "resharable", "editable", "revision", "expiration", "timerNotificationType", "rotateOnExpiration") SHAREDFOLDERUID_FIELD_NUMBER: _ClassVar[int] RESHARABLE_FIELD_NUMBER: _ClassVar[int] EDITABLE_FIELD_NUMBER: _ClassVar[int] @@ -630,10 +639,10 @@ class SharedFolderPermission(_message.Message): expiration: int timerNotificationType: TimerNotificationType rotateOnExpiration: bool - def __init__(self, sharedFolderUid: _Optional[bytes] = ..., resharable: bool = ..., editable: bool = ..., revision: _Optional[int] = ..., expiration: _Optional[int] = ..., timerNotificationType: _Optional[_Union[TimerNotificationType, str]] = ..., rotateOnExpiration: bool = ...) -> None: ... + def __init__(self, sharedFolderUid: _Optional[bytes] = ..., resharable: _Optional[bool] = ..., editable: _Optional[bool] = ..., revision: _Optional[int] = ..., expiration: _Optional[int] = ..., timerNotificationType: _Optional[_Union[TimerNotificationType, str]] = ..., rotateOnExpiration: _Optional[bool] = ...) -> None: ... class RecordData(_message.Message): - __slots__ = ["revision", "version", "shared", "encryptedRecordData", "encryptedExtraData", "clientModifiedTime", "nonSharedData", "linkedRecordData", "fileId", "fileSize", "thumbnailSize", "recordKeyType", "recordKey", "recordUid"] + __slots__ = ("revision", "version", "shared", "encryptedRecordData", "encryptedExtraData", "clientModifiedTime", "nonSharedData", "linkedRecordData", "fileId", "fileSize", "thumbnailSize", "recordKeyType", "recordKey", "recordUid") REVISION_FIELD_NUMBER: _ClassVar[int] VERSION_FIELD_NUMBER: _ClassVar[int] SHARED_FIELD_NUMBER: _ClassVar[int] @@ -662,10 +671,10 @@ class RecordData(_message.Message): recordKeyType: RecordKeyType recordKey: bytes recordUid: bytes - def __init__(self, revision: _Optional[int] = ..., version: _Optional[int] = ..., shared: bool = ..., encryptedRecordData: _Optional[str] = ..., encryptedExtraData: _Optional[str] = ..., clientModifiedTime: _Optional[int] = ..., nonSharedData: _Optional[str] = ..., linkedRecordData: _Optional[_Iterable[_Union[RecordData, _Mapping]]] = ..., fileId: _Optional[_Iterable[bytes]] = ..., fileSize: _Optional[int] = ..., thumbnailSize: _Optional[int] = ..., recordKeyType: _Optional[_Union[RecordKeyType, str]] = ..., recordKey: _Optional[bytes] = ..., recordUid: _Optional[bytes] = ...) -> None: ... + def __init__(self, revision: _Optional[int] = ..., version: _Optional[int] = ..., shared: _Optional[bool] = ..., encryptedRecordData: _Optional[str] = ..., encryptedExtraData: _Optional[str] = ..., clientModifiedTime: _Optional[int] = ..., nonSharedData: _Optional[str] = ..., linkedRecordData: _Optional[_Iterable[_Union[RecordData, _Mapping]]] = ..., fileId: _Optional[_Iterable[bytes]] = ..., fileSize: _Optional[int] = ..., thumbnailSize: _Optional[int] = ..., recordKeyType: _Optional[_Union[RecordKeyType, str]] = ..., recordKey: _Optional[bytes] = ..., recordUid: _Optional[bytes] = ...) -> None: ... class RecordDataWithAccessInfo(_message.Message): - __slots__ = ["recordUid", "recordData", "userPermission", "sharedFolderPermission"] + __slots__ = ("recordUid", "recordData", "userPermission", "sharedFolderPermission") RECORDUID_FIELD_NUMBER: _ClassVar[int] RECORDDATA_FIELD_NUMBER: _ClassVar[int] USERPERMISSION_FIELD_NUMBER: _ClassVar[int] @@ -677,7 +686,7 @@ class RecordDataWithAccessInfo(_message.Message): def __init__(self, recordUid: _Optional[bytes] = ..., recordData: _Optional[_Union[RecordData, _Mapping]] = ..., userPermission: _Optional[_Iterable[_Union[UserPermission, _Mapping]]] = ..., sharedFolderPermission: _Optional[_Iterable[_Union[SharedFolderPermission, _Mapping]]] = ...) -> None: ... class GetRecordDataWithAccessInfoResponse(_message.Message): - __slots__ = ["recordDataWithAccessInfo", "noPermissionRecordUid"] + __slots__ = ("recordDataWithAccessInfo", "noPermissionRecordUid") RECORDDATAWITHACCESSINFO_FIELD_NUMBER: _ClassVar[int] NOPERMISSIONRECORDUID_FIELD_NUMBER: _ClassVar[int] recordDataWithAccessInfo: _containers.RepeatedCompositeFieldContainer[RecordDataWithAccessInfo] @@ -685,23 +694,23 @@ class GetRecordDataWithAccessInfoResponse(_message.Message): def __init__(self, recordDataWithAccessInfo: _Optional[_Iterable[_Union[RecordDataWithAccessInfo, _Mapping]]] = ..., noPermissionRecordUid: _Optional[_Iterable[bytes]] = ...) -> None: ... class IsObjectShareAdmin(_message.Message): - __slots__ = ["uid", "isAdmin", "objectType"] + __slots__ = ("uid", "isAdmin", "objectType") UID_FIELD_NUMBER: _ClassVar[int] ISADMIN_FIELD_NUMBER: _ClassVar[int] OBJECTTYPE_FIELD_NUMBER: _ClassVar[int] uid: bytes isAdmin: bool objectType: CheckShareAdminObjectType - def __init__(self, uid: _Optional[bytes] = ..., isAdmin: bool = ..., objectType: _Optional[_Union[CheckShareAdminObjectType, str]] = ...) -> None: ... + def __init__(self, uid: _Optional[bytes] = ..., isAdmin: _Optional[bool] = ..., objectType: _Optional[_Union[CheckShareAdminObjectType, str]] = ...) -> None: ... class AmIShareAdmin(_message.Message): - __slots__ = ["isObjectShareAdmin"] + __slots__ = ("isObjectShareAdmin",) ISOBJECTSHAREADMIN_FIELD_NUMBER: _ClassVar[int] isObjectShareAdmin: _containers.RepeatedCompositeFieldContainer[IsObjectShareAdmin] def __init__(self, isObjectShareAdmin: _Optional[_Iterable[_Union[IsObjectShareAdmin, _Mapping]]] = ...) -> None: ... class RecordShareUpdateRequest(_message.Message): - __slots__ = ["addSharedRecord", "updateSharedRecord", "removeSharedRecord", "pt"] + __slots__ = ("addSharedRecord", "updateSharedRecord", "removeSharedRecord", "pt") ADDSHAREDRECORD_FIELD_NUMBER: _ClassVar[int] UPDATESHAREDRECORD_FIELD_NUMBER: _ClassVar[int] REMOVESHAREDRECORD_FIELD_NUMBER: _ClassVar[int] @@ -713,7 +722,7 @@ class RecordShareUpdateRequest(_message.Message): def __init__(self, addSharedRecord: _Optional[_Iterable[_Union[SharedRecord, _Mapping]]] = ..., updateSharedRecord: _Optional[_Iterable[_Union[SharedRecord, _Mapping]]] = ..., removeSharedRecord: _Optional[_Iterable[_Union[SharedRecord, _Mapping]]] = ..., pt: _Optional[str] = ...) -> None: ... class SharedRecord(_message.Message): - __slots__ = ["toUsername", "recordUid", "recordKey", "sharedFolderUid", "teamUid", "editable", "shareable", "transfer", "useEccKey", "removeVaultData", "expiration", "timerNotificationType", "rotateOnExpiration"] + __slots__ = ("toUsername", "recordUid", "recordKey", "sharedFolderUid", "teamUid", "editable", "shareable", "transfer", "useEccKey", "removeVaultData", "expiration", "timerNotificationType", "rotateOnExpiration") TOUSERNAME_FIELD_NUMBER: _ClassVar[int] RECORDUID_FIELD_NUMBER: _ClassVar[int] RECORDKEY_FIELD_NUMBER: _ClassVar[int] @@ -740,10 +749,10 @@ class SharedRecord(_message.Message): expiration: int timerNotificationType: TimerNotificationType rotateOnExpiration: bool - def __init__(self, toUsername: _Optional[str] = ..., recordUid: _Optional[bytes] = ..., recordKey: _Optional[bytes] = ..., sharedFolderUid: _Optional[bytes] = ..., teamUid: _Optional[bytes] = ..., editable: bool = ..., shareable: bool = ..., transfer: bool = ..., useEccKey: bool = ..., removeVaultData: bool = ..., expiration: _Optional[int] = ..., timerNotificationType: _Optional[_Union[TimerNotificationType, str]] = ..., rotateOnExpiration: bool = ...) -> None: ... + def __init__(self, toUsername: _Optional[str] = ..., recordUid: _Optional[bytes] = ..., recordKey: _Optional[bytes] = ..., sharedFolderUid: _Optional[bytes] = ..., teamUid: _Optional[bytes] = ..., editable: _Optional[bool] = ..., shareable: _Optional[bool] = ..., transfer: _Optional[bool] = ..., useEccKey: _Optional[bool] = ..., removeVaultData: _Optional[bool] = ..., expiration: _Optional[int] = ..., timerNotificationType: _Optional[_Union[TimerNotificationType, str]] = ..., rotateOnExpiration: _Optional[bool] = ...) -> None: ... class RecordShareUpdateResponse(_message.Message): - __slots__ = ["addSharedRecordStatus", "updateSharedRecordStatus", "removeSharedRecordStatus"] + __slots__ = ("addSharedRecordStatus", "updateSharedRecordStatus", "removeSharedRecordStatus") ADDSHAREDRECORDSTATUS_FIELD_NUMBER: _ClassVar[int] UPDATESHAREDRECORDSTATUS_FIELD_NUMBER: _ClassVar[int] REMOVESHAREDRECORDSTATUS_FIELD_NUMBER: _ClassVar[int] @@ -753,7 +762,7 @@ class RecordShareUpdateResponse(_message.Message): def __init__(self, addSharedRecordStatus: _Optional[_Iterable[_Union[SharedRecordStatus, _Mapping]]] = ..., updateSharedRecordStatus: _Optional[_Iterable[_Union[SharedRecordStatus, _Mapping]]] = ..., removeSharedRecordStatus: _Optional[_Iterable[_Union[SharedRecordStatus, _Mapping]]] = ...) -> None: ... class SharedRecordStatus(_message.Message): - __slots__ = ["recordUid", "status", "message", "username"] + __slots__ = ("recordUid", "status", "message", "username") RECORDUID_FIELD_NUMBER: _ClassVar[int] STATUS_FIELD_NUMBER: _ClassVar[int] MESSAGE_FIELD_NUMBER: _ClassVar[int] @@ -765,21 +774,21 @@ class SharedRecordStatus(_message.Message): def __init__(self, recordUid: _Optional[bytes] = ..., status: _Optional[str] = ..., message: _Optional[str] = ..., username: _Optional[str] = ...) -> None: ... class GetRecordPermissionsRequest(_message.Message): - __slots__ = ["recordUids", "isShareAdmin"] + __slots__ = ("recordUids", "isShareAdmin") RECORDUIDS_FIELD_NUMBER: _ClassVar[int] ISSHAREADMIN_FIELD_NUMBER: _ClassVar[int] recordUids: _containers.RepeatedScalarFieldContainer[bytes] isShareAdmin: bool - def __init__(self, recordUids: _Optional[_Iterable[bytes]] = ..., isShareAdmin: bool = ...) -> None: ... + def __init__(self, recordUids: _Optional[_Iterable[bytes]] = ..., isShareAdmin: _Optional[bool] = ...) -> None: ... class GetRecordPermissionsResponse(_message.Message): - __slots__ = ["recordPermissions"] + __slots__ = ("recordPermissions",) RECORDPERMISSIONS_FIELD_NUMBER: _ClassVar[int] recordPermissions: _containers.RepeatedCompositeFieldContainer[RecordPermission] def __init__(self, recordPermissions: _Optional[_Iterable[_Union[RecordPermission, _Mapping]]] = ...) -> None: ... class RecordPermission(_message.Message): - __slots__ = ["recordUid", "owner", "canEdit", "canShare", "canTransfer"] + __slots__ = ("recordUid", "owner", "canEdit", "canShare", "canTransfer") RECORDUID_FIELD_NUMBER: _ClassVar[int] OWNER_FIELD_NUMBER: _ClassVar[int] CANEDIT_FIELD_NUMBER: _ClassVar[int] @@ -790,10 +799,10 @@ class RecordPermission(_message.Message): canEdit: bool canShare: bool canTransfer: bool - def __init__(self, recordUid: _Optional[bytes] = ..., owner: bool = ..., canEdit: bool = ..., canShare: bool = ..., canTransfer: bool = ...) -> None: ... + def __init__(self, recordUid: _Optional[bytes] = ..., owner: _Optional[bool] = ..., canEdit: _Optional[bool] = ..., canShare: _Optional[bool] = ..., canTransfer: _Optional[bool] = ...) -> None: ... class GetShareObjectsRequest(_message.Message): - __slots__ = ["startWith", "contains", "filtered", "sharedFolderUid"] + __slots__ = ("startWith", "contains", "filtered", "sharedFolderUid") STARTWITH_FIELD_NUMBER: _ClassVar[int] CONTAINS_FIELD_NUMBER: _ClassVar[int] FILTERED_FIELD_NUMBER: _ClassVar[int] @@ -802,10 +811,10 @@ class GetShareObjectsRequest(_message.Message): contains: str filtered: bool sharedFolderUid: bytes - def __init__(self, startWith: _Optional[str] = ..., contains: _Optional[str] = ..., filtered: bool = ..., sharedFolderUid: _Optional[bytes] = ...) -> None: ... + def __init__(self, startWith: _Optional[str] = ..., contains: _Optional[str] = ..., filtered: _Optional[bool] = ..., sharedFolderUid: _Optional[bytes] = ...) -> None: ... class GetShareObjectsResponse(_message.Message): - __slots__ = ["shareRelationships", "shareFamilyUsers", "shareEnterpriseUsers", "shareTeams", "shareMCTeams", "shareMCEnterpriseUsers", "shareEnterpriseNames"] + __slots__ = ("shareRelationships", "shareFamilyUsers", "shareEnterpriseUsers", "shareTeams", "shareMCTeams", "shareMCEnterpriseUsers", "shareEnterpriseNames") SHARERELATIONSHIPS_FIELD_NUMBER: _ClassVar[int] SHAREFAMILYUSERS_FIELD_NUMBER: _ClassVar[int] SHAREENTERPRISEUSERS_FIELD_NUMBER: _ClassVar[int] @@ -823,23 +832,25 @@ class GetShareObjectsResponse(_message.Message): def __init__(self, shareRelationships: _Optional[_Iterable[_Union[ShareUser, _Mapping]]] = ..., shareFamilyUsers: _Optional[_Iterable[_Union[ShareUser, _Mapping]]] = ..., shareEnterpriseUsers: _Optional[_Iterable[_Union[ShareUser, _Mapping]]] = ..., shareTeams: _Optional[_Iterable[_Union[ShareTeam, _Mapping]]] = ..., shareMCTeams: _Optional[_Iterable[_Union[ShareTeam, _Mapping]]] = ..., shareMCEnterpriseUsers: _Optional[_Iterable[_Union[ShareUser, _Mapping]]] = ..., shareEnterpriseNames: _Optional[_Iterable[_Union[ShareEnterprise, _Mapping]]] = ...) -> None: ... class ShareUser(_message.Message): - __slots__ = ["username", "fullname", "enterpriseId", "status", "isShareAdmin", "isAdminOfSharedFolderOwner"] + __slots__ = ("username", "fullname", "enterpriseId", "status", "isShareAdmin", "isAdminOfSharedFolderOwner", "userAccountUid") USERNAME_FIELD_NUMBER: _ClassVar[int] FULLNAME_FIELD_NUMBER: _ClassVar[int] ENTERPRISEID_FIELD_NUMBER: _ClassVar[int] STATUS_FIELD_NUMBER: _ClassVar[int] ISSHAREADMIN_FIELD_NUMBER: _ClassVar[int] ISADMINOFSHAREDFOLDEROWNER_FIELD_NUMBER: _ClassVar[int] + USERACCOUNTUID_FIELD_NUMBER: _ClassVar[int] username: str fullname: str enterpriseId: int status: ShareStatus isShareAdmin: bool isAdminOfSharedFolderOwner: bool - def __init__(self, username: _Optional[str] = ..., fullname: _Optional[str] = ..., enterpriseId: _Optional[int] = ..., status: _Optional[_Union[ShareStatus, str]] = ..., isShareAdmin: bool = ..., isAdminOfSharedFolderOwner: bool = ...) -> None: ... + userAccountUid: bytes + def __init__(self, username: _Optional[str] = ..., fullname: _Optional[str] = ..., enterpriseId: _Optional[int] = ..., status: _Optional[_Union[ShareStatus, str]] = ..., isShareAdmin: _Optional[bool] = ..., isAdminOfSharedFolderOwner: _Optional[bool] = ..., userAccountUid: _Optional[bytes] = ...) -> None: ... class ShareTeam(_message.Message): - __slots__ = ["teamname", "enterpriseId", "teamUid"] + __slots__ = ("teamname", "enterpriseId", "teamUid") TEAMNAME_FIELD_NUMBER: _ClassVar[int] ENTERPRISEID_FIELD_NUMBER: _ClassVar[int] TEAMUID_FIELD_NUMBER: _ClassVar[int] @@ -849,7 +860,7 @@ class ShareTeam(_message.Message): def __init__(self, teamname: _Optional[str] = ..., enterpriseId: _Optional[int] = ..., teamUid: _Optional[bytes] = ...) -> None: ... class ShareEnterprise(_message.Message): - __slots__ = ["enterprisename", "enterpriseId"] + __slots__ = ("enterprisename", "enterpriseId") ENTERPRISENAME_FIELD_NUMBER: _ClassVar[int] ENTERPRISEID_FIELD_NUMBER: _ClassVar[int] enterprisename: str @@ -857,13 +868,13 @@ class ShareEnterprise(_message.Message): def __init__(self, enterprisename: _Optional[str] = ..., enterpriseId: _Optional[int] = ...) -> None: ... class RecordsOnwershipTransferRequest(_message.Message): - __slots__ = ["transferRecords"] + __slots__ = ("transferRecords",) TRANSFERRECORDS_FIELD_NUMBER: _ClassVar[int] transferRecords: _containers.RepeatedCompositeFieldContainer[TransferRecord] def __init__(self, transferRecords: _Optional[_Iterable[_Union[TransferRecord, _Mapping]]] = ...) -> None: ... class TransferRecord(_message.Message): - __slots__ = ["username", "recordUid", "recordKey", "useEccKey"] + __slots__ = ("username", "recordUid", "recordKey", "useEccKey") USERNAME_FIELD_NUMBER: _ClassVar[int] RECORDUID_FIELD_NUMBER: _ClassVar[int] RECORDKEY_FIELD_NUMBER: _ClassVar[int] @@ -872,16 +883,16 @@ class TransferRecord(_message.Message): recordUid: bytes recordKey: bytes useEccKey: bool - def __init__(self, username: _Optional[str] = ..., recordUid: _Optional[bytes] = ..., recordKey: _Optional[bytes] = ..., useEccKey: bool = ...) -> None: ... + def __init__(self, username: _Optional[str] = ..., recordUid: _Optional[bytes] = ..., recordKey: _Optional[bytes] = ..., useEccKey: _Optional[bool] = ...) -> None: ... class RecordsOnwershipTransferResponse(_message.Message): - __slots__ = ["transferRecordStatus"] + __slots__ = ("transferRecordStatus",) TRANSFERRECORDSTATUS_FIELD_NUMBER: _ClassVar[int] transferRecordStatus: _containers.RepeatedCompositeFieldContainer[TransferRecordStatus] def __init__(self, transferRecordStatus: _Optional[_Iterable[_Union[TransferRecordStatus, _Mapping]]] = ...) -> None: ... class TransferRecordStatus(_message.Message): - __slots__ = ["username", "recordUid", "status", "message"] + __slots__ = ("username", "recordUid", "status", "message") USERNAME_FIELD_NUMBER: _ClassVar[int] RECORDUID_FIELD_NUMBER: _ClassVar[int] STATUS_FIELD_NUMBER: _ClassVar[int] @@ -893,7 +904,7 @@ class TransferRecordStatus(_message.Message): def __init__(self, username: _Optional[str] = ..., recordUid: _Optional[bytes] = ..., status: _Optional[str] = ..., message: _Optional[str] = ...) -> None: ... class RecordsUnshareRequest(_message.Message): - __slots__ = ["sharedFolders", "users"] + __slots__ = ("sharedFolders", "users") SHAREDFOLDERS_FIELD_NUMBER: _ClassVar[int] USERS_FIELD_NUMBER: _ClassVar[int] sharedFolders: _containers.RepeatedCompositeFieldContainer[RecordsUnshareFolder] @@ -901,7 +912,7 @@ class RecordsUnshareRequest(_message.Message): def __init__(self, sharedFolders: _Optional[_Iterable[_Union[RecordsUnshareFolder, _Mapping]]] = ..., users: _Optional[_Iterable[_Union[RecordsUnshareUser, _Mapping]]] = ...) -> None: ... class RecordsUnshareResponse(_message.Message): - __slots__ = ["sharedFolders", "users"] + __slots__ = ("sharedFolders", "users") SHAREDFOLDERS_FIELD_NUMBER: _ClassVar[int] USERS_FIELD_NUMBER: _ClassVar[int] sharedFolders: _containers.RepeatedCompositeFieldContainer[RecordsUnshareFolderStatus] @@ -909,7 +920,7 @@ class RecordsUnshareResponse(_message.Message): def __init__(self, sharedFolders: _Optional[_Iterable[_Union[RecordsUnshareFolderStatus, _Mapping]]] = ..., users: _Optional[_Iterable[_Union[RecordsUnshareUserStatus, _Mapping]]] = ...) -> None: ... class RecordsUnshareFolder(_message.Message): - __slots__ = ["recordUid", "sharedFolderUid"] + __slots__ = ("recordUid", "sharedFolderUid") RECORDUID_FIELD_NUMBER: _ClassVar[int] SHAREDFOLDERUID_FIELD_NUMBER: _ClassVar[int] recordUid: bytes @@ -917,7 +928,7 @@ class RecordsUnshareFolder(_message.Message): def __init__(self, recordUid: _Optional[bytes] = ..., sharedFolderUid: _Optional[bytes] = ...) -> None: ... class RecordsUnshareUser(_message.Message): - __slots__ = ["recordUid", "accountUid"] + __slots__ = ("recordUid", "accountUid") RECORDUID_FIELD_NUMBER: _ClassVar[int] ACCOUNTUID_FIELD_NUMBER: _ClassVar[int] recordUid: bytes @@ -925,7 +936,7 @@ class RecordsUnshareUser(_message.Message): def __init__(self, recordUid: _Optional[bytes] = ..., accountUid: _Optional[bytes] = ...) -> None: ... class RecordsUnshareFolderStatus(_message.Message): - __slots__ = ["recordUid", "sharedFolderUid"] + __slots__ = ("recordUid", "sharedFolderUid") RECORDUID_FIELD_NUMBER: _ClassVar[int] SHAREDFOLDERUID_FIELD_NUMBER: _ClassVar[int] recordUid: bytes @@ -933,7 +944,7 @@ class RecordsUnshareFolderStatus(_message.Message): def __init__(self, recordUid: _Optional[bytes] = ..., sharedFolderUid: _Optional[bytes] = ...) -> None: ... class RecordsUnshareUserStatus(_message.Message): - __slots__ = ["recordUid", "accountUid"] + __slots__ = ("recordUid", "accountUid") RECORDUID_FIELD_NUMBER: _ClassVar[int] ACCOUNTUID_FIELD_NUMBER: _ClassVar[int] recordUid: bytes @@ -941,13 +952,13 @@ class RecordsUnshareUserStatus(_message.Message): def __init__(self, recordUid: _Optional[bytes] = ..., accountUid: _Optional[bytes] = ...) -> None: ... class TimedAccessCallbackPayload(_message.Message): - __slots__ = ["timeLimitedAccessType"] + __slots__ = ("timeLimitedAccessType",) TIMELIMITEDACCESSTYPE_FIELD_NUMBER: _ClassVar[int] timeLimitedAccessType: TimeLimitedAccessType def __init__(self, timeLimitedAccessType: _Optional[_Union[TimeLimitedAccessType, str]] = ...) -> None: ... class TimeLimitedAccessRequest(_message.Message): - __slots__ = ["accountUid", "teamUid", "recordUid", "sharedObjectUid", "timeLimitedAccessType", "expiration", "timerNotificationType"] + __slots__ = ("accountUid", "teamUid", "recordUid", "sharedObjectUid", "timeLimitedAccessType", "expiration", "timerNotificationType") ACCOUNTUID_FIELD_NUMBER: _ClassVar[int] TEAMUID_FIELD_NUMBER: _ClassVar[int] RECORDUID_FIELD_NUMBER: _ClassVar[int] @@ -965,7 +976,7 @@ class TimeLimitedAccessRequest(_message.Message): def __init__(self, accountUid: _Optional[_Iterable[bytes]] = ..., teamUid: _Optional[_Iterable[bytes]] = ..., recordUid: _Optional[_Iterable[bytes]] = ..., sharedObjectUid: _Optional[bytes] = ..., timeLimitedAccessType: _Optional[_Union[TimeLimitedAccessType, str]] = ..., expiration: _Optional[int] = ..., timerNotificationType: _Optional[_Union[TimerNotificationType, str]] = ...) -> None: ... class TimeLimitedAccessStatus(_message.Message): - __slots__ = ["uid", "message"] + __slots__ = ("uid", "message") UID_FIELD_NUMBER: _ClassVar[int] MESSAGE_FIELD_NUMBER: _ClassVar[int] uid: bytes @@ -973,7 +984,7 @@ class TimeLimitedAccessStatus(_message.Message): def __init__(self, uid: _Optional[bytes] = ..., message: _Optional[str] = ...) -> None: ... class TimeLimitedAccessResponse(_message.Message): - __slots__ = ["revision", "userAccessStatus", "teamAccessStatus", "recordAccessStatus"] + __slots__ = ("revision", "userAccessStatus", "teamAccessStatus", "recordAccessStatus") REVISION_FIELD_NUMBER: _ClassVar[int] USERACCESSSTATUS_FIELD_NUMBER: _ClassVar[int] TEAMACCESSSTATUS_FIELD_NUMBER: _ClassVar[int] diff --git a/keepercommander/proto/record_sharing_pb2.py b/keepercommander/proto/record_sharing_pb2.py new file mode 100644 index 000000000..00b1bd3a3 --- /dev/null +++ b/keepercommander/proto/record_sharing_pb2.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: record_sharing.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import folder_pb2 as folder__pb2 +from . import tla_pb2 as tla__pb2 +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x14record_sharing.proto\x12\x11record.v3.sharing\x1a\x0c\x66older.proto\x1a\ttla.proto\x1a\x1cgoogle/api/annotations.proto\"\xdd\x01\n\x07Request\x12@\n\x18\x63reateSharingPermissions\x18\x01 \x03(\x0b\x32\x1e.record.v3.sharing.Permissions\x12@\n\x18updateSharingPermissions\x18\x02 \x03(\x0b\x32\x1e.record.v3.sharing.Permissions\x12@\n\x18revokeSharingPermissions\x18\x03 \x03(\x0b\x32\x1e.record.v3.sharing.Permissions\x12\x0c\n\x04\x65\x63ho\x18\x04 \x01(\t\"\x85\x01\n\x0bPermissions\x12\x14\n\x0crecipientUid\x18\x01 \x01(\x0c\x12\x11\n\trecordUid\x18\x03 \x01(\x0c\x12\x11\n\trecordKey\x18\x04 \x01(\x0c\x12\x11\n\tuseEccKey\x18\x05 \x01(\x08\x12\'\n\x05rules\x18\x06 \x01(\x0b\x32\x18.Folder.RecordAccessData\"\xb5\x01\n\x08Response\x12\x37\n\x14\x63reatedSharingStatus\x18\x01 \x03(\x0b\x32\x19.record.v3.sharing.Status\x12\x37\n\x14updatedSharingStatus\x18\x02 \x03(\x0b\x32\x19.record.v3.sharing.Status\x12\x37\n\x14revokedSharingStatus\x18\x03 \x03(\x0b\x32\x19.record.v3.sharing.Status\"t\n\x06Status\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x30\n\x06status\x18\x02 \x01(\x0e\x32 .record.v3.sharing.SharingStatus\x12\x0f\n\x07message\x18\x03 \x01(\t\x12\x14\n\x0crecipientUid\x18\x04 \x01(\x0c\"4\n\rRevokedAccess\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x10\n\x08\x61\x63torUid\x18\x02 \x01(\x0c\"o\n\x12RecordSharingState\x12\x11\n\trecordUid\x18\x01 \x01(\x0c\x12\x18\n\x10isDirectlyShared\x18\x02 \x01(\x08\x12\x1a\n\x12isIndirectlyShared\x18\x03 \x01(\x08\x12\x10\n\x08isShared\x18\x04 \x01(\x08*\xa9\x01\n\rSharingStatus\x12\x0b\n\x07SUCCESS\x10\x00\x12\x12\n\x0ePENDING_ACCEPT\x10\x01\x12\x12\n\x0eUSER_NOT_FOUND\x10\x02\x12\x12\n\x0e\x41LREADY_SHARED\x10\x03\x12\x18\n\x14NOT_ALLOWED_TO_SHARE\x10\x04\x12\x11\n\rACCESS_DENIED\x10\x05\x12\"\n\x1eNOT_ALLOWED_TO_SET_PERMISSIONS\x10\x06\x32\x8b\x01\n\x14RecordSharingService\x12s\n\x0bShareRecord\x12\x1a.record.v3.sharing.Request\x1a\x1b.record.v3.sharing.Response\"+\x82\xd3\xe4\x93\x02%\" /api/rest/vault/records/v3/share:\x01*B2\n.com.keepersecurity.proto.api.record.v3.sharingP\x01\x62\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'record_sharing_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n.com.keepersecurity.proto.api.record.v3.sharingP\001' + _globals['_RECORDSHARINGSERVICE'].methods_by_name['ShareRecord']._loaded_options = None + _globals['_RECORDSHARINGSERVICE'].methods_by_name['ShareRecord']._serialized_options = b'\202\323\344\223\002%\" /api/rest/vault/records/v3/share:\001*' + _globals['_SHARINGSTATUS']._serialized_start=928 + _globals['_SHARINGSTATUS']._serialized_end=1097 + _globals['_REQUEST']._serialized_start=99 + _globals['_REQUEST']._serialized_end=320 + _globals['_PERMISSIONS']._serialized_start=323 + _globals['_PERMISSIONS']._serialized_end=456 + _globals['_RESPONSE']._serialized_start=459 + _globals['_RESPONSE']._serialized_end=640 + _globals['_STATUS']._serialized_start=642 + _globals['_STATUS']._serialized_end=758 + _globals['_REVOKEDACCESS']._serialized_start=760 + _globals['_REVOKEDACCESS']._serialized_end=812 + _globals['_RECORDSHARINGSTATE']._serialized_start=814 + _globals['_RECORDSHARINGSTATE']._serialized_end=925 + _globals['_RECORDSHARINGSERVICE']._serialized_start=1100 + _globals['_RECORDSHARINGSERVICE']._serialized_end=1239 +# @@protoc_insertion_point(module_scope) diff --git a/keepercommander/proto/record_sharing_pb2.pyi b/keepercommander/proto/record_sharing_pb2.pyi new file mode 100644 index 000000000..51887fd6b --- /dev/null +++ b/keepercommander/proto/record_sharing_pb2.pyi @@ -0,0 +1,96 @@ +import folder_pb2 as _folder_pb2 +import tla_pb2 as _tla_pb2 +from google.api import annotations_pb2 as _annotations_pb2 +from google.protobuf.internal import containers as _containers +from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from collections.abc import Iterable as _Iterable, Mapping as _Mapping +from typing import ClassVar as _ClassVar, Optional as _Optional, Union as _Union + +DESCRIPTOR: _descriptor.FileDescriptor + +class SharingStatus(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = () + SUCCESS: _ClassVar[SharingStatus] + PENDING_ACCEPT: _ClassVar[SharingStatus] + USER_NOT_FOUND: _ClassVar[SharingStatus] + ALREADY_SHARED: _ClassVar[SharingStatus] + NOT_ALLOWED_TO_SHARE: _ClassVar[SharingStatus] + ACCESS_DENIED: _ClassVar[SharingStatus] + NOT_ALLOWED_TO_SET_PERMISSIONS: _ClassVar[SharingStatus] +SUCCESS: SharingStatus +PENDING_ACCEPT: SharingStatus +USER_NOT_FOUND: SharingStatus +ALREADY_SHARED: SharingStatus +NOT_ALLOWED_TO_SHARE: SharingStatus +ACCESS_DENIED: SharingStatus +NOT_ALLOWED_TO_SET_PERMISSIONS: SharingStatus + +class Request(_message.Message): + __slots__ = ("createSharingPermissions", "updateSharingPermissions", "revokeSharingPermissions", "echo") + CREATESHARINGPERMISSIONS_FIELD_NUMBER: _ClassVar[int] + UPDATESHARINGPERMISSIONS_FIELD_NUMBER: _ClassVar[int] + REVOKESHARINGPERMISSIONS_FIELD_NUMBER: _ClassVar[int] + ECHO_FIELD_NUMBER: _ClassVar[int] + createSharingPermissions: _containers.RepeatedCompositeFieldContainer[Permissions] + updateSharingPermissions: _containers.RepeatedCompositeFieldContainer[Permissions] + revokeSharingPermissions: _containers.RepeatedCompositeFieldContainer[Permissions] + echo: str + def __init__(self, createSharingPermissions: _Optional[_Iterable[_Union[Permissions, _Mapping]]] = ..., updateSharingPermissions: _Optional[_Iterable[_Union[Permissions, _Mapping]]] = ..., revokeSharingPermissions: _Optional[_Iterable[_Union[Permissions, _Mapping]]] = ..., echo: _Optional[str] = ...) -> None: ... + +class Permissions(_message.Message): + __slots__ = ("recipientUid", "recordUid", "recordKey", "useEccKey", "rules") + RECIPIENTUID_FIELD_NUMBER: _ClassVar[int] + RECORDUID_FIELD_NUMBER: _ClassVar[int] + RECORDKEY_FIELD_NUMBER: _ClassVar[int] + USEECCKEY_FIELD_NUMBER: _ClassVar[int] + RULES_FIELD_NUMBER: _ClassVar[int] + recipientUid: bytes + recordUid: bytes + recordKey: bytes + useEccKey: bool + rules: _folder_pb2.RecordAccessData + def __init__(self, recipientUid: _Optional[bytes] = ..., recordUid: _Optional[bytes] = ..., recordKey: _Optional[bytes] = ..., useEccKey: _Optional[bool] = ..., rules: _Optional[_Union[_folder_pb2.RecordAccessData, _Mapping]] = ...) -> None: ... + +class Response(_message.Message): + __slots__ = ("createdSharingStatus", "updatedSharingStatus", "revokedSharingStatus") + CREATEDSHARINGSTATUS_FIELD_NUMBER: _ClassVar[int] + UPDATEDSHARINGSTATUS_FIELD_NUMBER: _ClassVar[int] + REVOKEDSHARINGSTATUS_FIELD_NUMBER: _ClassVar[int] + createdSharingStatus: _containers.RepeatedCompositeFieldContainer[Status] + updatedSharingStatus: _containers.RepeatedCompositeFieldContainer[Status] + revokedSharingStatus: _containers.RepeatedCompositeFieldContainer[Status] + def __init__(self, createdSharingStatus: _Optional[_Iterable[_Union[Status, _Mapping]]] = ..., updatedSharingStatus: _Optional[_Iterable[_Union[Status, _Mapping]]] = ..., revokedSharingStatus: _Optional[_Iterable[_Union[Status, _Mapping]]] = ...) -> None: ... + +class Status(_message.Message): + __slots__ = ("recordUid", "status", "message", "recipientUid") + RECORDUID_FIELD_NUMBER: _ClassVar[int] + STATUS_FIELD_NUMBER: _ClassVar[int] + MESSAGE_FIELD_NUMBER: _ClassVar[int] + RECIPIENTUID_FIELD_NUMBER: _ClassVar[int] + recordUid: bytes + status: SharingStatus + message: str + recipientUid: bytes + def __init__(self, recordUid: _Optional[bytes] = ..., status: _Optional[_Union[SharingStatus, str]] = ..., message: _Optional[str] = ..., recipientUid: _Optional[bytes] = ...) -> None: ... + +class RevokedAccess(_message.Message): + __slots__ = ("recordUid", "actorUid") + RECORDUID_FIELD_NUMBER: _ClassVar[int] + ACTORUID_FIELD_NUMBER: _ClassVar[int] + recordUid: bytes + actorUid: bytes + def __init__(self, recordUid: _Optional[bytes] = ..., actorUid: _Optional[bytes] = ...) -> None: ... + +class RecordSharingState(_message.Message): + __slots__ = ("recordUid", "isDirectlyShared", "isIndirectlyShared", "isShared") + RECORDUID_FIELD_NUMBER: _ClassVar[int] + ISDIRECTLYSHARED_FIELD_NUMBER: _ClassVar[int] + ISINDIRECTLYSHARED_FIELD_NUMBER: _ClassVar[int] + ISSHARED_FIELD_NUMBER: _ClassVar[int] + recordUid: bytes + isDirectlyShared: bool + isIndirectlyShared: bool + isShared: bool + def __init__(self, recordUid: _Optional[bytes] = ..., isDirectlyShared: _Optional[bool] = ..., isIndirectlyShared: _Optional[bool] = ..., isShared: _Optional[bool] = ...) -> None: ... diff --git a/keepercommander/proto/remove_pb2.py b/keepercommander/proto/remove_pb2.py new file mode 100644 index 000000000..f5a48130c --- /dev/null +++ b/keepercommander/proto/remove_pb2.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: remove.proto +# Protobuf Python Version: 6.33.4 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0cremove.proto\x12\x10\x66older.v3.remove\x1a\x1cgoogle/api/annotations.proto\"v\n\rRecordRemoval\x12\x12\n\nfolder_uid\x18\x01 \x01(\x0c\x12\x12\n\nrecord_uid\x18\x02 \x01(\x0c\x12=\n\x0eoperation_type\x18\x03 \x01(\x0e\x32%.folder.v3.remove.RecordOperationType\"b\n\rFolderRemoval\x12\x12\n\nfolder_uid\x18\x01 \x01(\x0c\x12=\n\x0eoperation_type\x18\x02 \x01(\x0e\x32%.folder.v3.remove.FolderOperationType\"\x93\x01\n\x13RemoveRecordRequest\x12.\n\x06\x61\x63tion\x18\x01 \x01(\x0e\x32\x1e.folder.v3.remove.RemoveAction\x12\x30\n\x07records\x18\x02 \x03(\x0b\x32\x1f.folder.v3.remove.RecordRemoval\x12\x1a\n\x12\x63onfirmation_token\x18\x03 \x01(\x0c\"\x93\x01\n\x13RemoveFolderRequest\x12.\n\x06\x61\x63tion\x18\x01 \x01(\x0e\x32\x1e.folder.v3.remove.RemoveAction\x12\x30\n\x07\x66olders\x18\x02 \x03(\x0b\x32\x1f.folder.v3.remove.FolderRemoval\x12\x1a\n\x12\x63onfirmation_token\x18\x03 \x01(\x0c\"\x8e\x01\n\x0eRemoveResponse\x12\x1a\n\x12\x63onfirmation_token\x18\x01 \x01(\x0c\x12\x18\n\x10token_expires_at\x18\x02 \x01(\x03\x12/\n\x07results\x18\x03 \x03(\x0b\x32\x1e.folder.v3.remove.RemoveResult\x12\x15\n\rerror_message\x18\x04 \x01(\t\"\xba\x01\n\x0cRemoveResult\x12\x10\n\x08item_uid\x18\x01 \x01(\x0c\x12\x12\n\nfolder_uid\x18\x02 \x01(\x0c\x12.\n\x06status\x18\x03 \x01(\x0e\x32\x1e.folder.v3.remove.RemoveStatus\x12(\n\x06impact\x18\x04 \x01(\x0b\x32\x18.folder.v3.remove.Impact\x12*\n\x05\x65rror\x18\x05 \x01(\x0b\x32\x1b.folder.v3.remove.ItemError\"\xb7\x01\n\x06Impact\x12\x15\n\rfolders_count\x18\x01 \x01(\x05\x12\x15\n\rrecords_count\x18\x02 \x01(\x05\x12\x1c\n\x14\x61\x66\x66\x65\x63ted_users_count\x18\x03 \x01(\x05\x12\x1c\n\x14\x61\x66\x66\x65\x63ted_teams_count\x18\x04 \x01(\x05\x12\x31\n\x0brecord_info\x18\x05 \x03(\x0b\x32\x1c.folder.v3.remove.RecordInfo\x12\x10\n\x08warnings\x18\x06 \x03(\t\"9\n\nRecordInfo\x12\x12\n\nrecord_uid\x18\x01 \x01(\x0c\x12\x17\n\x0flocations_count\x18\x02 \x01(\x05\"M\n\tItemError\x12/\n\x04\x63ode\x18\x01 \x01(\x0e\x32!.folder.v3.remove.RemoveErrorCode\x12\x0f\n\x07message\x18\x02 \x01(\t\"\xa7\x01\n\x13RemovalTokenPayload\x12<\n\x11item_fingerprints\x18\x01 \x03(\x0b\x32!.folder.v3.remove.ItemFingerprint\x12\x0f\n\x07user_id\x18\x02 \x01(\x05\x12\x11\n\tdevice_id\x18\x03 \x01(\x03\x12\x13\n\x0bsession_uid\x18\x04 \x01(\x0c\x12\x19\n\x11\x65xpires_at_millis\x18\x05 \x01(\x03\"\x94\x01\n\x0fItemFingerprint\x12\x30\n\x06record\x18\x01 \x01(\x0b\x32\x1e.folder.v3.remove.RecordTargetH\x00\x12\x30\n\x06\x66older\x18\x02 \x01(\x0b\x32\x1e.folder.v3.remove.FolderTargetH\x00\x12\x13\n\x0b\x66ingerprint\x18\n \x01(\x0c\x42\x08\n\x06target\"u\n\x0cRecordTarget\x12\x12\n\nfolder_uid\x18\x01 \x01(\x0c\x12\x12\n\nrecord_uid\x18\x02 \x01(\x0c\x12=\n\x0eoperation_type\x18\x03 \x01(\x0e\x32%.folder.v3.remove.RecordOperationType\"a\n\x0c\x46olderTarget\x12\x12\n\nfolder_uid\x18\x01 \x01(\x0c\x12=\n\x0eoperation_type\x18\x02 \x01(\x0e\x32%.folder.v3.remove.FolderOperationType*D\n\x0cRemoveAction\x12\x19\n\x15REMOVE_ACTION_PREVIEW\x10\x00\x12\x19\n\x15REMOVE_ACTION_CONFIRM\x10\x01*~\n\x13RecordOperationType\x12\x1c\n\x18RECORD_OPERATION_UNKNOWN\x10\x00\x12\x16\n\x12UNLINK_FROM_FOLDER\x10\x01\x12\x18\n\x14MOVE_TO_FOLDER_TRASH\x10\x02\x12\x17\n\x13MOVE_TO_OWNER_TRASH\x10\x03*\x91\x01\n\x13\x46olderOperationType\x12\x1c\n\x18\x46OLDER_OPERATION_UNKNOWN\x10\x00\x12\x1f\n\x1b\x46OLDER_MOVE_TO_FOLDER_TRASH\x10\x01\x12\x1e\n\x1a\x46OLDER_MOVE_TO_OWNER_TRASH\x10\x02\x12\x1b\n\x17\x46OLDER_DELETE_PERMANENT\x10\x03*\xcb\x01\n\x0fRemoveErrorCode\x12\x18\n\x14REMOVE_ERROR_UNKNOWN\x10\x00\x12\x1a\n\x16REMOVE_ERROR_NOT_FOUND\x10\x01\x12\x1e\n\x1aREMOVE_ERROR_ACCESS_DENIED\x10\x02\x12 \n\x1cREMOVE_ERROR_TRASHCAN_FOLDER\x10\x03\x12\x1c\n\x18REMOVE_ERROR_ROOT_FOLDER\x10\x04\x12\"\n\x1eREMOVE_ERROR_DESCENDANT_DENIED\x10\x05*\xec\x01\n\x0cRemoveStatus\x12\x19\n\x15REMOVE_STATUS_UNKNOWN\x10\x00\x12\x19\n\x15REMOVE_STATUS_SUCCESS\x10\x01\x12\x1f\n\x1bREMOVE_STATUS_STALE_PREVIEW\x10\x02\x12\x1f\n\x1bREMOVE_STATUS_TOKEN_EXPIRED\x10\x03\x12\x1f\n\x1bREMOVE_STATUS_TOKEN_INVALID\x10\x04\x12\x1f\n\x1bREMOVE_STATUS_ACCESS_DENIED\x10\x05\x12\"\n\x1eREMOVE_STATUS_VALIDATION_ERROR\x10\x06\x32\xad\x02\n\rRemoveService\x12\x8c\x01\n\x0cRemoveRecord\x12%.folder.v3.remove.RemoveRecordRequest\x1a .folder.v3.remove.RemoveResponse\"3\x82\xd3\xe4\x93\x02-\"(/api/rest/vault/folders/v3/remove_record:\x01*\x12\x8c\x01\n\x0cRemoveFolder\x12%.folder.v3.remove.RemoveFolderRequest\x1a .folder.v3.remove.RemoveResponse\"3\x82\xd3\xe4\x93\x02-\"(/api/rest/vault/folders/v3/remove_folder:\x01*B1\n-com.keepersecurity.proto.api.folder.v3.removeP\x01\x62\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'remove_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n-com.keepersecurity.proto.api.folder.v3.removeP\001' + _globals['_REMOVESERVICE'].methods_by_name['RemoveRecord']._loaded_options = None + _globals['_REMOVESERVICE'].methods_by_name['RemoveRecord']._serialized_options = b'\202\323\344\223\002-\"(/api/rest/vault/folders/v3/remove_record:\001*' + _globals['_REMOVESERVICE'].methods_by_name['RemoveFolder']._loaded_options = None + _globals['_REMOVESERVICE'].methods_by_name['RemoveFolder']._serialized_options = b'\202\323\344\223\002-\"(/api/rest/vault/folders/v3/remove_folder:\001*' + _globals['_REMOVEACTION']._serialized_start=1781 + _globals['_REMOVEACTION']._serialized_end=1849 + _globals['_RECORDOPERATIONTYPE']._serialized_start=1851 + _globals['_RECORDOPERATIONTYPE']._serialized_end=1977 + _globals['_FOLDEROPERATIONTYPE']._serialized_start=1980 + _globals['_FOLDEROPERATIONTYPE']._serialized_end=2125 + _globals['_REMOVEERRORCODE']._serialized_start=2128 + _globals['_REMOVEERRORCODE']._serialized_end=2331 + _globals['_REMOVESTATUS']._serialized_start=2334 + _globals['_REMOVESTATUS']._serialized_end=2570 + _globals['_RECORDREMOVAL']._serialized_start=64 + _globals['_RECORDREMOVAL']._serialized_end=182 + _globals['_FOLDERREMOVAL']._serialized_start=184 + _globals['_FOLDERREMOVAL']._serialized_end=282 + _globals['_REMOVERECORDREQUEST']._serialized_start=285 + _globals['_REMOVERECORDREQUEST']._serialized_end=432 + _globals['_REMOVEFOLDERREQUEST']._serialized_start=435 + _globals['_REMOVEFOLDERREQUEST']._serialized_end=582 + _globals['_REMOVERESPONSE']._serialized_start=585 + _globals['_REMOVERESPONSE']._serialized_end=727 + _globals['_REMOVERESULT']._serialized_start=730 + _globals['_REMOVERESULT']._serialized_end=916 + _globals['_IMPACT']._serialized_start=919 + _globals['_IMPACT']._serialized_end=1102 + _globals['_RECORDINFO']._serialized_start=1104 + _globals['_RECORDINFO']._serialized_end=1161 + _globals['_ITEMERROR']._serialized_start=1163 + _globals['_ITEMERROR']._serialized_end=1240 + _globals['_REMOVALTOKENPAYLOAD']._serialized_start=1243 + _globals['_REMOVALTOKENPAYLOAD']._serialized_end=1410 + _globals['_ITEMFINGERPRINT']._serialized_start=1413 + _globals['_ITEMFINGERPRINT']._serialized_end=1561 + _globals['_RECORDTARGET']._serialized_start=1563 + _globals['_RECORDTARGET']._serialized_end=1680 + _globals['_FOLDERTARGET']._serialized_start=1682 + _globals['_FOLDERTARGET']._serialized_end=1779 + _globals['_REMOVESERVICE']._serialized_start=2573 + _globals['_REMOVESERVICE']._serialized_end=2874 +# @@protoc_insertion_point(module_scope) diff --git a/keepercommander/proto/remove_pb2.pyi b/keepercommander/proto/remove_pb2.pyi new file mode 100644 index 000000000..c88bbfd0b --- /dev/null +++ b/keepercommander/proto/remove_pb2.pyi @@ -0,0 +1,208 @@ +from google.api import annotations_pb2 as _annotations_pb2 +from google.protobuf.internal import containers as _containers +from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from collections.abc import Iterable as _Iterable, Mapping as _Mapping +from typing import ClassVar as _ClassVar, Optional as _Optional, Union as _Union + +DESCRIPTOR: _descriptor.FileDescriptor + +class RemoveAction(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = () + REMOVE_ACTION_PREVIEW: _ClassVar[RemoveAction] + REMOVE_ACTION_CONFIRM: _ClassVar[RemoveAction] + +class RecordOperationType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = () + RECORD_OPERATION_UNKNOWN: _ClassVar[RecordOperationType] + UNLINK_FROM_FOLDER: _ClassVar[RecordOperationType] + MOVE_TO_FOLDER_TRASH: _ClassVar[RecordOperationType] + MOVE_TO_OWNER_TRASH: _ClassVar[RecordOperationType] + +class FolderOperationType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = () + FOLDER_OPERATION_UNKNOWN: _ClassVar[FolderOperationType] + FOLDER_MOVE_TO_FOLDER_TRASH: _ClassVar[FolderOperationType] + FOLDER_MOVE_TO_OWNER_TRASH: _ClassVar[FolderOperationType] + FOLDER_DELETE_PERMANENT: _ClassVar[FolderOperationType] + +class RemoveErrorCode(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = () + REMOVE_ERROR_UNKNOWN: _ClassVar[RemoveErrorCode] + REMOVE_ERROR_NOT_FOUND: _ClassVar[RemoveErrorCode] + REMOVE_ERROR_ACCESS_DENIED: _ClassVar[RemoveErrorCode] + REMOVE_ERROR_TRASHCAN_FOLDER: _ClassVar[RemoveErrorCode] + REMOVE_ERROR_ROOT_FOLDER: _ClassVar[RemoveErrorCode] + REMOVE_ERROR_DESCENDANT_DENIED: _ClassVar[RemoveErrorCode] + +class RemoveStatus(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = () + REMOVE_STATUS_UNKNOWN: _ClassVar[RemoveStatus] + REMOVE_STATUS_SUCCESS: _ClassVar[RemoveStatus] + REMOVE_STATUS_STALE_PREVIEW: _ClassVar[RemoveStatus] + REMOVE_STATUS_TOKEN_EXPIRED: _ClassVar[RemoveStatus] + REMOVE_STATUS_TOKEN_INVALID: _ClassVar[RemoveStatus] + REMOVE_STATUS_ACCESS_DENIED: _ClassVar[RemoveStatus] + REMOVE_STATUS_VALIDATION_ERROR: _ClassVar[RemoveStatus] +REMOVE_ACTION_PREVIEW: RemoveAction +REMOVE_ACTION_CONFIRM: RemoveAction +RECORD_OPERATION_UNKNOWN: RecordOperationType +UNLINK_FROM_FOLDER: RecordOperationType +MOVE_TO_FOLDER_TRASH: RecordOperationType +MOVE_TO_OWNER_TRASH: RecordOperationType +FOLDER_OPERATION_UNKNOWN: FolderOperationType +FOLDER_MOVE_TO_FOLDER_TRASH: FolderOperationType +FOLDER_MOVE_TO_OWNER_TRASH: FolderOperationType +FOLDER_DELETE_PERMANENT: FolderOperationType +REMOVE_ERROR_UNKNOWN: RemoveErrorCode +REMOVE_ERROR_NOT_FOUND: RemoveErrorCode +REMOVE_ERROR_ACCESS_DENIED: RemoveErrorCode +REMOVE_ERROR_TRASHCAN_FOLDER: RemoveErrorCode +REMOVE_ERROR_ROOT_FOLDER: RemoveErrorCode +REMOVE_ERROR_DESCENDANT_DENIED: RemoveErrorCode +REMOVE_STATUS_UNKNOWN: RemoveStatus +REMOVE_STATUS_SUCCESS: RemoveStatus +REMOVE_STATUS_STALE_PREVIEW: RemoveStatus +REMOVE_STATUS_TOKEN_EXPIRED: RemoveStatus +REMOVE_STATUS_TOKEN_INVALID: RemoveStatus +REMOVE_STATUS_ACCESS_DENIED: RemoveStatus +REMOVE_STATUS_VALIDATION_ERROR: RemoveStatus + +class RecordRemoval(_message.Message): + __slots__ = ("folder_uid", "record_uid", "operation_type") + FOLDER_UID_FIELD_NUMBER: _ClassVar[int] + RECORD_UID_FIELD_NUMBER: _ClassVar[int] + OPERATION_TYPE_FIELD_NUMBER: _ClassVar[int] + folder_uid: bytes + record_uid: bytes + operation_type: RecordOperationType + def __init__(self, folder_uid: _Optional[bytes] = ..., record_uid: _Optional[bytes] = ..., operation_type: _Optional[_Union[RecordOperationType, str]] = ...) -> None: ... + +class FolderRemoval(_message.Message): + __slots__ = ("folder_uid", "operation_type") + FOLDER_UID_FIELD_NUMBER: _ClassVar[int] + OPERATION_TYPE_FIELD_NUMBER: _ClassVar[int] + folder_uid: bytes + operation_type: FolderOperationType + def __init__(self, folder_uid: _Optional[bytes] = ..., operation_type: _Optional[_Union[FolderOperationType, str]] = ...) -> None: ... + +class RemoveRecordRequest(_message.Message): + __slots__ = ("action", "records", "confirmation_token") + ACTION_FIELD_NUMBER: _ClassVar[int] + RECORDS_FIELD_NUMBER: _ClassVar[int] + CONFIRMATION_TOKEN_FIELD_NUMBER: _ClassVar[int] + action: RemoveAction + records: _containers.RepeatedCompositeFieldContainer[RecordRemoval] + confirmation_token: bytes + def __init__(self, action: _Optional[_Union[RemoveAction, str]] = ..., records: _Optional[_Iterable[_Union[RecordRemoval, _Mapping]]] = ..., confirmation_token: _Optional[bytes] = ...) -> None: ... + +class RemoveFolderRequest(_message.Message): + __slots__ = ("action", "folders", "confirmation_token") + ACTION_FIELD_NUMBER: _ClassVar[int] + FOLDERS_FIELD_NUMBER: _ClassVar[int] + CONFIRMATION_TOKEN_FIELD_NUMBER: _ClassVar[int] + action: RemoveAction + folders: _containers.RepeatedCompositeFieldContainer[FolderRemoval] + confirmation_token: bytes + def __init__(self, action: _Optional[_Union[RemoveAction, str]] = ..., folders: _Optional[_Iterable[_Union[FolderRemoval, _Mapping]]] = ..., confirmation_token: _Optional[bytes] = ...) -> None: ... + +class RemoveResponse(_message.Message): + __slots__ = ("confirmation_token", "token_expires_at", "results", "error_message") + CONFIRMATION_TOKEN_FIELD_NUMBER: _ClassVar[int] + TOKEN_EXPIRES_AT_FIELD_NUMBER: _ClassVar[int] + RESULTS_FIELD_NUMBER: _ClassVar[int] + ERROR_MESSAGE_FIELD_NUMBER: _ClassVar[int] + confirmation_token: bytes + token_expires_at: int + results: _containers.RepeatedCompositeFieldContainer[RemoveResult] + error_message: str + def __init__(self, confirmation_token: _Optional[bytes] = ..., token_expires_at: _Optional[int] = ..., results: _Optional[_Iterable[_Union[RemoveResult, _Mapping]]] = ..., error_message: _Optional[str] = ...) -> None: ... + +class RemoveResult(_message.Message): + __slots__ = ("item_uid", "folder_uid", "status", "impact", "error") + ITEM_UID_FIELD_NUMBER: _ClassVar[int] + FOLDER_UID_FIELD_NUMBER: _ClassVar[int] + STATUS_FIELD_NUMBER: _ClassVar[int] + IMPACT_FIELD_NUMBER: _ClassVar[int] + ERROR_FIELD_NUMBER: _ClassVar[int] + item_uid: bytes + folder_uid: bytes + status: RemoveStatus + impact: Impact + error: ItemError + def __init__(self, item_uid: _Optional[bytes] = ..., folder_uid: _Optional[bytes] = ..., status: _Optional[_Union[RemoveStatus, str]] = ..., impact: _Optional[_Union[Impact, _Mapping]] = ..., error: _Optional[_Union[ItemError, _Mapping]] = ...) -> None: ... + +class Impact(_message.Message): + __slots__ = ("folders_count", "records_count", "affected_users_count", "affected_teams_count", "record_info", "warnings") + FOLDERS_COUNT_FIELD_NUMBER: _ClassVar[int] + RECORDS_COUNT_FIELD_NUMBER: _ClassVar[int] + AFFECTED_USERS_COUNT_FIELD_NUMBER: _ClassVar[int] + AFFECTED_TEAMS_COUNT_FIELD_NUMBER: _ClassVar[int] + RECORD_INFO_FIELD_NUMBER: _ClassVar[int] + WARNINGS_FIELD_NUMBER: _ClassVar[int] + folders_count: int + records_count: int + affected_users_count: int + affected_teams_count: int + record_info: _containers.RepeatedCompositeFieldContainer[RecordInfo] + warnings: _containers.RepeatedScalarFieldContainer[str] + def __init__(self, folders_count: _Optional[int] = ..., records_count: _Optional[int] = ..., affected_users_count: _Optional[int] = ..., affected_teams_count: _Optional[int] = ..., record_info: _Optional[_Iterable[_Union[RecordInfo, _Mapping]]] = ..., warnings: _Optional[_Iterable[str]] = ...) -> None: ... + +class RecordInfo(_message.Message): + __slots__ = ("record_uid", "locations_count") + RECORD_UID_FIELD_NUMBER: _ClassVar[int] + LOCATIONS_COUNT_FIELD_NUMBER: _ClassVar[int] + record_uid: bytes + locations_count: int + def __init__(self, record_uid: _Optional[bytes] = ..., locations_count: _Optional[int] = ...) -> None: ... + +class ItemError(_message.Message): + __slots__ = ("code", "message") + CODE_FIELD_NUMBER: _ClassVar[int] + MESSAGE_FIELD_NUMBER: _ClassVar[int] + code: RemoveErrorCode + message: str + def __init__(self, code: _Optional[_Union[RemoveErrorCode, str]] = ..., message: _Optional[str] = ...) -> None: ... + +class RemovalTokenPayload(_message.Message): + __slots__ = ("item_fingerprints", "user_id", "device_id", "session_uid", "expires_at_millis") + ITEM_FINGERPRINTS_FIELD_NUMBER: _ClassVar[int] + USER_ID_FIELD_NUMBER: _ClassVar[int] + DEVICE_ID_FIELD_NUMBER: _ClassVar[int] + SESSION_UID_FIELD_NUMBER: _ClassVar[int] + EXPIRES_AT_MILLIS_FIELD_NUMBER: _ClassVar[int] + item_fingerprints: _containers.RepeatedCompositeFieldContainer[ItemFingerprint] + user_id: int + device_id: int + session_uid: bytes + expires_at_millis: int + def __init__(self, item_fingerprints: _Optional[_Iterable[_Union[ItemFingerprint, _Mapping]]] = ..., user_id: _Optional[int] = ..., device_id: _Optional[int] = ..., session_uid: _Optional[bytes] = ..., expires_at_millis: _Optional[int] = ...) -> None: ... + +class ItemFingerprint(_message.Message): + __slots__ = ("record", "folder", "fingerprint") + RECORD_FIELD_NUMBER: _ClassVar[int] + FOLDER_FIELD_NUMBER: _ClassVar[int] + FINGERPRINT_FIELD_NUMBER: _ClassVar[int] + record: RecordTarget + folder: FolderTarget + fingerprint: bytes + def __init__(self, record: _Optional[_Union[RecordTarget, _Mapping]] = ..., folder: _Optional[_Union[FolderTarget, _Mapping]] = ..., fingerprint: _Optional[bytes] = ...) -> None: ... + +class RecordTarget(_message.Message): + __slots__ = ("folder_uid", "record_uid", "operation_type") + FOLDER_UID_FIELD_NUMBER: _ClassVar[int] + RECORD_UID_FIELD_NUMBER: _ClassVar[int] + OPERATION_TYPE_FIELD_NUMBER: _ClassVar[int] + folder_uid: bytes + record_uid: bytes + operation_type: RecordOperationType + def __init__(self, folder_uid: _Optional[bytes] = ..., record_uid: _Optional[bytes] = ..., operation_type: _Optional[_Union[RecordOperationType, str]] = ...) -> None: ... + +class FolderTarget(_message.Message): + __slots__ = ("folder_uid", "operation_type") + FOLDER_UID_FIELD_NUMBER: _ClassVar[int] + OPERATION_TYPE_FIELD_NUMBER: _ClassVar[int] + folder_uid: bytes + operation_type: FolderOperationType + def __init__(self, folder_uid: _Optional[bytes] = ..., operation_type: _Optional[_Union[FolderOperationType, str]] = ...) -> None: ... diff --git a/keepercommander/proto/tla_pb2.py b/keepercommander/proto/tla_pb2.py new file mode 100644 index 000000000..afb757bdf --- /dev/null +++ b/keepercommander/proto/tla_pb2.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: tla.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder + + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\ttla.proto\x12\ncommon.tla\"\x81\x01\n\rTLAProperties\x12\x12\n\nexpiration\x18\x01 \x01(\x03\x12@\n\x15timerNotificationType\x18\x02 \x01(\x0e\x32!.common.tla.TimerNotificationType\x12\x1a\n\x12rotateOnExpiration\x18\x03 \x01(\x08*\\\n\x15TimerNotificationType\x12\x14\n\x10NOTIFICATION_OFF\x10\x00\x12\x10\n\x0cNOTIFY_OWNER\x10\x01\x12\x1b\n\x17NOTIFY_PRIVILEGED_USERS\x10\x02\x42 \n\x1c\x63om.keepersecurity.proto.tlaP\x01\x62\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'tla_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n\034com.keepersecurity.proto.tlaP\001' + _globals['_TIMERNOTIFICATIONTYPE']._serialized_start=157 + _globals['_TIMERNOTIFICATIONTYPE']._serialized_end=249 + _globals['_TLAPROPERTIES']._serialized_start=26 + _globals['_TLAPROPERTIES']._serialized_end=155 +# @@protoc_insertion_point(module_scope) diff --git a/keepercommander/proto/tla_pb2.pyi b/keepercommander/proto/tla_pb2.pyi new file mode 100644 index 000000000..7075a1c31 --- /dev/null +++ b/keepercommander/proto/tla_pb2.pyi @@ -0,0 +1,25 @@ +from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Optional as _Optional, Union as _Union + +DESCRIPTOR: _descriptor.FileDescriptor + +class TimerNotificationType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = () + NOTIFICATION_OFF: _ClassVar[TimerNotificationType] + NOTIFY_OWNER: _ClassVar[TimerNotificationType] + NOTIFY_PRIVILEGED_USERS: _ClassVar[TimerNotificationType] +NOTIFICATION_OFF: TimerNotificationType +NOTIFY_OWNER: TimerNotificationType +NOTIFY_PRIVILEGED_USERS: TimerNotificationType + +class TLAProperties(_message.Message): + __slots__ = ("expiration", "timerNotificationType", "rotateOnExpiration") + EXPIRATION_FIELD_NUMBER: _ClassVar[int] + TIMERNOTIFICATIONTYPE_FIELD_NUMBER: _ClassVar[int] + ROTATEONEXPIRATION_FIELD_NUMBER: _ClassVar[int] + expiration: int + timerNotificationType: TimerNotificationType + rotateOnExpiration: bool + def __init__(self, expiration: _Optional[int] = ..., timerNotificationType: _Optional[_Union[TimerNotificationType, str]] = ..., rotateOnExpiration: _Optional[bool] = ...) -> None: ... diff --git a/keepercommander/rest_api.py b/keepercommander/rest_api.py index 5f9314dc9..b56fb8cdc 100644 --- a/keepercommander/rest_api.py +++ b/keepercommander/rest_api.py @@ -26,7 +26,7 @@ from . import crypto, utils from cryptography.hazmat.primitives.asymmetric import rsa, ec -CLIENT_VERSION = 'c17.2.0' +CLIENT_VERSION = 'c18.0.0' SERVER_PUBLIC_KEYS = { 1: crypto.load_rsa_public_key(utils.base64_url_decode( diff --git a/keepercommander/subfolder.py b/keepercommander/subfolder.py index 3516fa752..673b940c0 100644 --- a/keepercommander/subfolder.py +++ b/keepercommander/subfolder.py @@ -89,10 +89,23 @@ def contained_folders(params, folders, component): """Return list of folders (empty if component not present) containing component within parent folder 'folder'""" get_folder_by_id = lambda uid: params.folder_cache.get(uid) get_folder_ids = lambda: params.folder_cache.keys() - result = folders if component in ('.', '') \ - else [(get_folder_by_id(f.parent_uid) if f.parent_uid else params.root_folder) for f in folders] if component == '..' \ - else [get_folder_by_id(component)] if component in get_folder_ids() \ - else [get_folder_by_id(uid) for f in folders for uid in f.subfolders if get_folder_by_id(uid).name == component] + + if component in ('.', ''): + result = folders + elif component == '..': + # Handle parent navigation, accounting for KD folders with invalid parent_uid + result = [] + for f in folders: + if f.parent_uid: + parent = get_folder_by_id(f.parent_uid) + result.append(parent if parent else params.root_folder) + else: + result.append(params.root_folder) + elif component in get_folder_ids(): + result = [get_folder_by_id(component)] + else: + result = [get_folder_by_id(uid) for f in folders for uid in f.subfolders if get_folder_by_id(uid).name == component] + return result @@ -220,6 +233,7 @@ class BaseFolderNode: UserFolderType = 'user_folder' SharedFolderType = 'shared_folder' SharedFolderFolderType = 'shared_folder_folder' + KeeperDriveFolderType = 'keeper_drive_folder' """ Folder Common Fields""" def __init__(self, type): @@ -239,6 +253,8 @@ def get_folder_type(self): return 'Shared Folder' elif self.type == BaseFolderNode.SharedFolderFolderType: return 'Subfolder in Shared Folder' + elif self.type == BaseFolderNode.KeeperDriveFolderType: + return 'KeeperDrive Folder' return '' def __repr__(self): @@ -252,13 +268,17 @@ def __repr__(self): def display(self): print('') - print('{0:>20s}: {1:<20s}'.format('Folder UID', self.uid)) - print('{0:>20s}: {1:<20s}'.format('Folder Type', self.get_folder_type())) - print('{0:>20s}: {1}'.format('Name', self.name)) + if self.type == BaseFolderNode.KeeperDriveFolderType: + uid_label = 'KeeperDrive Folder UID' + else: + uid_label = 'Folder UID' + print('{0:>25s}: {1:<20s}'.format(uid_label, self.uid)) + print('{0:>25s}: {1:<20s}'.format('Folder Type', self.get_folder_type())) + print('{0:>25s}: {1}'.format('Name', self.name)) if self.parent_uid: - print('{0:>20s}: {1:<20s}'.format('Parent Folder UID', self.parent_uid)) + print('{0:>25s}: {1:<20s}'.format('Parent Folder UID', self.parent_uid)) if isinstance(self, SharedFolderFolderNode): - print('{0:>20s}: {1:<20s}'.format('Shared Folder UID', self.shared_folder_uid)) + print('{0:>25s}: {1:<20s}'.format('Shared Folder UID', self.shared_folder_uid)) class UserFolderNode(BaseFolderNode): @@ -266,6 +286,11 @@ def __init__(self): BaseFolderNode.__init__(self, BaseFolderNode.UserFolderType) +class KeeperDriveFolderNode(BaseFolderNode): + def __init__(self): + BaseFolderNode.__init__(self, BaseFolderNode.KeeperDriveFolderType) + + class SharedFolderFolderNode(BaseFolderNode): def __init__(self): BaseFolderNode.__init__(self, BaseFolderNode.SharedFolderFolderType) diff --git a/keepercommander/sync_down.py b/keepercommander/sync_down.py index bb76d0515..fde03562c 100644 --- a/keepercommander/sync_down.py +++ b/keepercommander/sync_down.py @@ -16,10 +16,11 @@ import google from . import api, utils, crypto, convert_keys +from .keeper_drive import sync as keeper_drive_sync from .display import bcolors, Spinner from .params import KeeperParams, RecordOwner -from .proto import SyncDown_pb2, record_pb2, client_pb2, breachwatch_pb2 -from .subfolder import RootFolderNode, UserFolderNode, SharedFolderNode, SharedFolderFolderNode, BaseFolderNode +from .proto import SyncDown_pb2, record_pb2, client_pb2, breachwatch_pb2, folder_pb2 +from .subfolder import RootFolderNode, UserFolderNode, SharedFolderNode, SharedFolderFolderNode, BaseFolderNode, KeeperDriveFolderNode from .vault import KeeperRecord @@ -74,6 +75,10 @@ def delete_team_key(team_uid): resp_bw_recs = [] # type: List[SyncDown_pb2.BreachWatchRecord] resp_sec_data_recs = [] # type: List[SyncDown_pb2.BreachWatchSecurityData] resp_sec_scores = [] # type: List[SyncDown_pb2.SecurityScoreData] + record_rotation_items = [] # type: List[record_pb2.RecordRotation] + kd_enabled = not params.is_feature_disallowed('keeper_drive') + kd_acc = keeper_drive_sync.create_accumulator() if kd_enabled else None + request = SyncDown_pb2.SyncDownRequest() revision = params.revision full_sync = False @@ -99,6 +104,8 @@ def delete_team_key(team_uid): params.breach_watch_security_data.clear() params.breach_watch_records.clear() params.security_score_data.clear() + if kd_enabled: + keeper_drive_sync.clear_caches(params) if len(response.removedRecords) > 0: logging.debug('Processing removed records') @@ -562,6 +569,11 @@ def convert_user_folder_shared_folder(ufsf): if len(response.securityScoreData) > 0: resp_sec_scores.extend(response.securityScoreData) + if kd_enabled: + keeper_drive_sync.collect_from_response( + kd_acc, response, resp_bw_recs, resp_sec_data_recs, resp_sec_scores, record_rotation_items + ) + if len(response.removedUsers) > 0: for a_uid in response.removedUsers: account_uid = utils.base64_url_encode(a_uid) @@ -577,25 +589,15 @@ def convert_user_folder_shared_folder(ufsf): params.user_cache[account_uid] = params.user if len(response.recordRotations) > 0: - for rr in response.recordRotations: - record_uid = utils.base64_url_encode(rr.recordUid) - rr_obj = { - 'record_uid': record_uid, - 'revision': rr.revision, - 'configuration_uid': utils.base64_url_encode(rr.configurationUid), - 'schedule': rr.schedule, - 'pwd_complexity': utils.base64_url_encode(rr.pwdComplexity), - 'disabled': rr.disabled, - 'resource_uid': utils.base64_url_encode(rr.resourceUid), - 'last_rotation': rr.lastRotation, - 'last_rotation_status': rr.lastRotationStatus, - } - params.record_rotation_cache[record_uid] = rr_obj + record_rotation_items.extend(response.recordRotations) params.sync_down_token = response.continuationToken params.revision = revision + if kd_enabled: + keeper_drive_sync.process(params, kd_acc) + for sf in params.shared_folder_cache.values(): owner = sf.get('owner_username') if not owner: @@ -946,18 +948,20 @@ def convert_user_folder_shared_folder(ufsf): if shared_folder_uid in params.shared_folder_cache: shared_folder = params.shared_folder_cache[shared_folder_uid] encrypted_key = utils.base64_url_decode(sf['shared_folder_folder_key']) - sf['folder_key_unencrypted'] = crypto.decrypt_aes_v1(encrypted_key, shared_folder['shared_folder_key_unencrypted']) + sf['folder_key_unencrypted'] = crypto.decrypt_aes_v1(encrypted_key, shared_folder.get('shared_folder_key_unencrypted')) except Exception as e: logging.debug('Shared folder folder %s data decryption error: %s', sf['folder_uid'], e) else: continue if 'folder_key_unencrypted' in sf: if 'data_unencrypted' not in sf: - try: - data_encrypted = utils.base64_url_decode(sf['data']) - sf['data_unencrypted'] = crypto.decrypt_aes_v1(data_encrypted, sf['folder_key_unencrypted']) - except Exception as e: - logging.debug('Error decrypting shared folder folder %s data: %s', sf['folder_uid'], e) + data_b64 = sf.get('data') + if data_b64: + try: + data_encrypted = utils.base64_url_decode(data_b64) + sf['data_unencrypted'] = crypto.decrypt_aes_v1(data_encrypted, sf['folder_key_unencrypted']) + except Exception as e: + logging.debug('Error decrypting shared folder folder %s data: %s', sf['folder_uid'], e) prepare_folder_tree(params) @@ -1044,6 +1048,8 @@ def convert_user_folder_shared_folder(ufsf): params._sync_record_count = record_count + + def _sync_record_types(params): # type: (KeeperParams) -> Any rq = record_pb2.RecordTypesRequest() rq.standard = True @@ -1068,9 +1074,14 @@ def prepare_folder_tree(params): # type: (KeeperParams) -> None folder_uid = None if sf['type'] == 'user_folder': folder_uid = sf['folder_uid'] - uf = UserFolderNode() + if sf.get('source') == 'keeper_drive': + uf = KeeperDriveFolderNode() + else: + uf = UserFolderNode() uf.uid = folder_uid uf.parent_uid = sf.get('parent_uid') + if sf.get('name'): + uf.name = sf['name'] params.folder_cache[uf.uid] = uf elif sf['type'] == 'shared_folder_folder': @@ -1105,3 +1116,9 @@ def prepare_folder_tree(params): # type: (KeeperParams) -> None parent_folder = params.folder_cache.get(f.parent_uid) if f.parent_uid else params.root_folder if parent_folder: parent_folder.subfolders.append(f.uid) + elif f.parent_uid and hasattr(params, 'keeper_drive_folders') and f.uid in params.keeper_drive_folders: + # KD root-level folders have a parent UID pointing to the KD vault root, + # which is not a real vault folder. Clear parent_uid so that navigation + # and ls treat them as direct children of the vault root. + f.parent_uid = None + params.root_folder.subfolders.append(f.uid) \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 156d2f51f..b33dc9b3a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -25,5 +25,6 @@ pyobjc-framework-LocalAuthentication; sys_platform == "darwin" and python_versio winrt-runtime; sys_platform == "win32" winrt-Windows.Foundation; sys_platform == "win32" winrt-Windows.Security.Credentials.UI; sys_platform == "win32" +googleapis-common-protos keeper-mlkem; python_version>='3.11' textual; python_version>='3.9' \ No newline at end of file diff --git a/setup.cfg b/setup.cfg index 732d48d9b..7a17f75aa 100644 --- a/setup.cfg +++ b/setup.cfg @@ -35,6 +35,7 @@ install_requires = keeper-secrets-manager-core>=16.6.0 prompt_toolkit protobuf>=4.23.0 + googleapis-common-protos psutil; python_version>='3.8' pycryptodomex>=3.20.0 pyngrok; python_version>='3.8' @@ -55,7 +56,7 @@ install_requires = textual; python_version>='3.9' [options.package_data] -keepercommander = resources/*, resources/email_templates/* +keepercommander = resources/*, resources/email_templates/*, commands/pam_import/*.json [options.extras_require] test = diff --git a/unit-tests/pam/test_kcm_import.py b/unit-tests/pam/test_kcm_import.py new file mode 100644 index 000000000..e8b5db5ad --- /dev/null +++ b/unit-tests/pam/test_kcm_import.py @@ -0,0 +1,3779 @@ +# _ __ +# | |/ /___ ___ _ __ ___ _ _ ® +# | ' 0, 'No temp file was created') + for path in created_paths: + self.assertFalse(os.path.exists(path), + f'Temp file leaked: {path}') + + +class TestDBSSLFlag(unittest.TestCase): + """E2E: --db-ssl flag is passed through to connector.""" + + @patch('keepercommander.commands.pam_import.kcm_import.KCMDatabaseConnector') + @patch('keepercommander.commands.pam_import.kcm_import.getpass.getpass', + return_value='pass') + def test_ssl_flag_passed_to_connector(self, mock_getpass, MockConnector): + """--db-ssl=True is forwarded as ssl=True to KCMDatabaseConnector.""" + mock_conn = MockConnector.return_value + mock_conn.extract_groups.return_value = [] + mock_conn.extract_connections.return_value = ([ + _make_row(parameter_name='hostname', parameter_value='x')], {}) + + cmd = PAMProjectKCMImportCommand() + params = MagicMock() + + with patch('builtins.print'): + cmd.execute(params, + db_host='127.0.0.1', + db_ssl=True, + dry_run=True) + + call_kwargs = MockConnector.call_args + self.assertTrue(call_kwargs[1].get('ssl', False)) + + @patch('keepercommander.commands.pam_import.kcm_import.KCMDatabaseConnector') + @patch('keepercommander.commands.pam_import.kcm_import.getpass.getpass', + return_value='pass') + def test_ssl_default_off(self, mock_getpass, MockConnector): + """Without --db-ssl, ssl defaults to False.""" + mock_conn = MockConnector.return_value + mock_conn.extract_groups.return_value = [] + mock_conn.extract_connections.return_value = ([ + _make_row(parameter_name='hostname', parameter_value='x')], {}) + + cmd = PAMProjectKCMImportCommand() + params = MagicMock() + + with patch('builtins.print'): + cmd.execute(params, + db_host='127.0.0.1', + dry_run=True) + + call_kwargs = MockConnector.call_args + self.assertFalse(call_kwargs[1].get('ssl', False)) + + +class TestOutputFilePermissions(unittest.TestCase): + """E2E: --output file is written with secure permissions.""" + + @patch('keepercommander.commands.pam_import.kcm_import.KCMDatabaseConnector') + @patch('keepercommander.commands.pam_import.kcm_import.getpass.getpass', + return_value='pass') + def test_output_file_owner_only(self, mock_getpass, MockConnector): + """--output file must have 0o600 permissions (owner read/write only).""" + mock_conn = MockConnector.return_value + mock_conn.extract_groups.return_value = [] + mock_conn.extract_connections.return_value = ([ + _make_row(parameter_name='hostname', parameter_value='x')], {}) + + cmd = PAMProjectKCMImportCommand() + params = MagicMock() + + import stat + tmp_dir = tempfile.mkdtemp() + output_path = os.path.join(tmp_dir, 'test_output.json') + try: + cmd.execute(params, + db_host='127.0.0.1', + output=output_path) + + file_mode = os.stat(output_path).st_mode & 0o777 + self.assertEqual(file_mode, 0o600, + f'Expected 0o600, got {oct(file_mode)}') + finally: + if os.path.exists(output_path): + os.unlink(output_path) + os.rmdir(tmp_dir) + + +class TestRemoteSSLEnforcement(unittest.TestCase): + """Remote connections must require SSL or explicit --allow-cleartext.""" + + @patch('keepercommander.commands.pam_import.kcm_import.getpass.getpass', + return_value='pass') + def test_remote_host_without_ssl_blocked(self, mock_getpass): + """Remote host without --db-ssl or --allow-cleartext must raise.""" + cmd = PAMProjectKCMImportCommand() + params = MagicMock() + + from keepercommander.error import CommandError + with self.assertRaises(CommandError) as ctx: + cmd.execute(params, db_host='203.0.113.50', dry_run=True) + self.assertIn('cleartext', str(ctx.exception).lower()) + + @patch('keepercommander.commands.pam_import.kcm_import.KCMDatabaseConnector') + @patch('keepercommander.commands.pam_import.kcm_import.getpass.getpass', + return_value='pass') + def test_remote_host_with_ssl_allowed(self, mock_getpass, MockConnector): + """Remote host with --db-ssl should connect normally.""" + mock_conn = MockConnector.return_value + mock_conn.extract_groups.return_value = [] + mock_conn.extract_connections.return_value = ([ + _make_row(parameter_name='hostname', parameter_value='x')], {}) + + cmd = PAMProjectKCMImportCommand() + params = MagicMock() + + with patch('builtins.print'): + cmd.execute(params, db_host='203.0.113.50', db_ssl=True, dry_run=True) + # Should not raise + + @patch('keepercommander.commands.pam_import.kcm_import.KCMDatabaseConnector') + @patch('keepercommander.commands.pam_import.kcm_import.getpass.getpass', + return_value='pass') + def test_remote_host_with_allow_cleartext(self, mock_getpass, MockConnector): + """Remote host with --allow-cleartext should warn but connect.""" + mock_conn = MockConnector.return_value + mock_conn.extract_groups.return_value = [] + mock_conn.extract_connections.return_value = ([ + _make_row(parameter_name='hostname', parameter_value='x')], {}) + + cmd = PAMProjectKCMImportCommand() + params = MagicMock() + + with patch('builtins.print'): + cmd.execute(params, db_host='203.0.113.50', + allow_cleartext=True, dry_run=True) + # Should not raise + + @patch('keepercommander.commands.pam_import.kcm_import.KCMDatabaseConnector') + @patch('keepercommander.commands.pam_import.kcm_import.getpass.getpass', + return_value='pass') + def test_localhost_without_ssl_allowed(self, mock_getpass, MockConnector): + """Localhost connections should work without SSL.""" + mock_conn = MockConnector.return_value + mock_conn.extract_groups.return_value = [] + mock_conn.extract_connections.return_value = ([ + _make_row(parameter_name='hostname', parameter_value='x')], {}) + + cmd = PAMProjectKCMImportCommand() + params = MagicMock() + + with patch('builtins.print'): + cmd.execute(params, db_host='127.0.0.1', dry_run=True) + # Should not raise + + @patch('keepercommander.commands.pam_import.kcm_import.KCMDatabaseConnector') + @patch('keepercommander.commands.pam_import.kcm_import.getpass.getpass', + return_value='pass') + def test_private_ip_without_ssl_allowed(self, mock_getpass, MockConnector): + """RFC1918 addresses should work without SSL.""" + mock_conn = MockConnector.return_value + mock_conn.extract_groups.return_value = [] + mock_conn.extract_connections.return_value = ([ + _make_row(parameter_name='hostname', parameter_value='x')], {}) + + cmd = PAMProjectKCMImportCommand() + params = MagicMock() + + with patch('builtins.print'): + cmd.execute(params, db_host='192.168.1.100', dry_run=True) + # Should not raise + + +class TestCredentialCleanup(unittest.TestCase): + """Sensitive data should be cleared from memory after use.""" + + @patch('keepercommander.commands.pam_import.kcm_import.KCMDatabaseConnector') + @patch('keepercommander.commands.pam_import.kcm_import.getpass.getpass', + return_value='supersecret') + def test_connector_password_cleared_after_close(self, mock_getpass, MockConnector): + """Connector.password should be None after execute completes.""" + mock_conn = MockConnector.return_value + mock_conn.extract_groups.return_value = [] + mock_conn.extract_connections.return_value = ([ + _make_row(parameter_name='hostname', parameter_value='x')], {}) + + cmd = PAMProjectKCMImportCommand() + params = MagicMock() + + with patch('builtins.print'): + cmd.execute(params, db_host='127.0.0.1', dry_run=True) + + # Password should have been cleared in the finally block + self.assertIsNone(mock_conn.password) + + def test_docker_detect_parses_env_vars(self): + """Docker inspect output should be parsed into credentials.""" + mock_result = MagicMock() + mock_result.returncode = 0 + mock_result.stdout = 'MYSQL_PASSWORD=secret123\nMYSQL_HOSTNAME=db\nOTHER_SECRET=xyz\n' + + with patch('keepercommander.commands.pam_import.kcm_import.subprocess.run', return_value=mock_result): + (host, port, db, user), password = \ + PAMProjectKCMImportCommand._detect_docker_credentials('mysql', 'test') + + self.assertEqual(password, 'secret123') + self.assertEqual(host, 'db') + + +class TestDockerLogSanitization(unittest.TestCase): + """Docker detect logs should not contain usernames.""" + + def test_no_username_in_log_output(self): + """The info log from docker detect should not include the db username.""" + import inspect + source = inspect.getsource(PAMProjectKCMImportCommand._detect_docker_credentials) + # Should NOT log user=%s + self.assertNotIn('user=%s', source) + + +############################################################################### +# Estimate feature tests +############################################################################### + +class TestEstimate(unittest.TestCase): + """Tests for --estimate pre-scan output.""" + + @staticmethod + def _make_groups(n=3): + return [{'connection_group_id': i, 'connection_group_name': f'Group {i}', + 'parent_id': None, 'type': 'ORGANIZATIONAL'} for i in range(1, n + 1)] + + @staticmethod + def _make_resources(types=None): + types = types or ['pamMachine', 'pamMachine', 'pamDatabase'] + resources = [] + for i, rtype in enumerate(types): + r = {'title': f'Resource {i}', 'type': rtype, 'host': f'host{i}', + 'pam_settings': {'options': {}, 'connection': {'protocol': 'ssh'}}} + resources.append(r) + return resources + + @staticmethod + def _make_users(n=2): + return [{'title': f'User {i}', 'type': 'pamUser', + 'rotation_settings': {}} for i in range(n)] + + def test_estimate_prints_summary(self): + """--estimate should print resource counts and API estimates.""" + from io import StringIO + import sys + groups = self._make_groups(2) + resources = self._make_resources(['pamMachine', 'pamDatabase', 'pamMachine']) + users = self._make_users(3) + captured = StringIO() + old_stdout = sys.stdout + try: + sys.stdout = captured + PAMProjectKCMImportCommand._print_estimate( + groups, resources, users, + skip_users=False, include_disabled=False, total_connections=3) + finally: + sys.stdout = old_stdout + output = captured.getvalue() + self.assertIn('KCM Migration Estimate', output) + self.assertIn('Connection groups:', output) + self.assertIn('Estimated API calls:', output) + self.assertIn('Conservative', output) + self.assertIn('Enterprise', output) + + def test_estimate_counts_by_type(self): + """Estimate should break down resources by record type.""" + from io import StringIO + import sys + resources = self._make_resources( + ['pamMachine', 'pamMachine', 'pamDatabase', 'pamRemoteBrowser']) + captured = StringIO() + old_stdout = sys.stdout + try: + sys.stdout = captured + PAMProjectKCMImportCommand._print_estimate( + [], resources, [], skip_users=True, + include_disabled=False, total_connections=4) + finally: + sys.stdout = old_stdout + output = captured.getvalue() + self.assertIn('SSH/RDP/VNC', output) + self.assertIn('Database', output) + self.assertIn('RemoteBrowser', output) + self.assertIn('(skipped)', output) + + def test_estimate_api_call_calculation(self): + """Verify API call estimate math.""" + resources = self._make_resources(['pamMachine'] * 10) + users = self._make_users(5) + # Expected: 20 (setup) + 10*20 (resources) + 5*8 (users) = 260 + from io import StringIO + import sys + captured = StringIO() + old_stdout = sys.stdout + try: + sys.stdout = captured + PAMProjectKCMImportCommand._print_estimate( + [], resources, users, skip_users=False, + include_disabled=False, total_connections=10) + finally: + sys.stdout = old_stdout + self.assertIn('~ 260', captured.getvalue()) + + def test_estimate_sftp_no_extra_api_calls(self): + """SFTP connection settings should NOT add extra API calls (not separate records).""" + resources = [ + {'title': 'R1', 'type': 'pamMachine', 'host': 'h1', + 'pam_settings': {'connection': {'sftp': {'host': 'sftp1'}}}}, + {'title': 'R2', 'type': 'pamMachine', 'host': 'h2', + 'pam_settings': {'connection': {}}}, + ] + from io import StringIO + import sys + captured = StringIO() + old_stdout = sys.stdout + try: + sys.stdout = captured + PAMProjectKCMImportCommand._print_estimate( + [], resources, [], skip_users=True, + include_disabled=False, total_connections=2) + finally: + sys.stdout = old_stdout + output = captured.getvalue() + # SFTP is a connection setting, not separate records — no extra line + self.assertNotIn('SFTP sub-resources', output) + # 20 (setup) + 2*20 (resources) + 0 (users) = 60 + self.assertIn('~ 60', output) + + def test_estimate_duration_formatting(self): + """Duration formatter should handle seconds, minutes, and hours.""" + # Test via the output with a large number of resources + resources = self._make_resources(['pamMachine'] * 200) + users = self._make_users(100) + from io import StringIO + import sys + captured = StringIO() + old_stdout = sys.stdout + try: + sys.stdout = captured + PAMProjectKCMImportCommand._print_estimate( + [], resources, users, skip_users=False, + include_disabled=False, total_connections=200) + finally: + sys.stdout = old_stdout + output = captured.getvalue() + # At 5 req/s with 7420 calls = 1484s = 24m 44s + self.assertIn('m', output) + + +############################################################################### +# Pre-import validation and confirmation tests +############################################################################### + +class TestValidateImportData(unittest.TestCase): + """Tests for _validate_import_data pre-import checks.""" + + def test_rotation_settings_on_user_warns(self): + """Users with rotation_settings should trigger a warning.""" + resources = [{'title': 'R1', 'host': '10.0.0.1'}] + users = [{'title': 'U1', 'rotation_settings': {'period': 30}}] + warnings = PAMProjectKCMImportCommand._validate_import_data( + resources, users, skip_users=False) + self.assertTrue(any('rotation_settings' in w for w in warnings)) + + def test_no_rotation_settings_no_warning(self): + """Clean data should produce no rotation warnings.""" + resources = [{'title': 'R1', 'host': '10.0.0.1'}] + users = [{'title': 'U1', 'password': ''}] + warnings = PAMProjectKCMImportCommand._validate_import_data( + resources, users, skip_users=False) + self.assertFalse(any('rotation_settings' in w for w in warnings)) + + def test_unnested_users_warn(self): + """Top-level users (not nested in resources) should warn.""" + resources = [{'title': 'R1', 'host': '10.0.0.1'}] + users = [{'title': 'U1'}, {'title': 'U2'}] + warnings = PAMProjectKCMImportCommand._validate_import_data( + resources, users, skip_users=False) + self.assertTrue(any('2 user(s) not linked' in w for w in warnings)) + + def test_no_users_no_unlinked_warning(self): + """All users nested = no unlinked warning.""" + resources = [{'title': 'R1', 'host': '10.0.0.1', + 'users': [{'title': 'U1'}]}] + users = [] + warnings = PAMProjectKCMImportCommand._validate_import_data( + resources, users, skip_users=False) + self.assertFalse(any('not linked' in w for w in warnings)) + + def test_missing_host_warns(self): + """Resources without host should trigger a warning.""" + resources = [{'title': 'R1', 'host': ''}, {'title': 'R2'}] + warnings = PAMProjectKCMImportCommand._validate_import_data( + resources, [], skip_users=True) + host_warnings = [w for w in warnings if 'no host' in w] + self.assertEqual(len(host_warnings), 2) + + def test_skip_users_skips_user_checks(self): + """With skip_users=True, user checks should be skipped.""" + resources = [{'title': 'R1', 'host': '10.0.0.1'}] + users = [{'title': 'U1', 'rotation_settings': {'bad': True}}] + warnings = PAMProjectKCMImportCommand._validate_import_data( + resources, users, skip_users=True) + self.assertFalse(any('rotation_settings' in w for w in warnings)) + + +class TestImportConfirmation(unittest.TestCase): + """Tests for interactive confirmation prompt.""" + + @patch('keepercommander.api.sync_down') + @patch('keepercommander.commands.pam_import.kcm_import.PAMProjectKCMImportCommand._discover_shared_folder_names', + return_value=(None, None)) + @patch('keepercommander.commands.pam_import.kcm_import.PAMProjectKCMImportCommand._create_project_skeleton', + return_value=('skeleton-cfg', '')) + @patch('keepercommander.commands.pam_import.kcm_import.PAMProjectKCMImportCommand._resolve_gateway', + return_value=None) + @patch('keepercommander.commands.pam_import.kcm_import.KCMDatabaseConnector') + @patch('keepercommander.commands.pam_import.kcm_import.getpass.getpass', + return_value='pass') + def test_user_cancel_aborts_import(self, mock_getpass, MockConnector, + mock_gw, mock_skeleton, mock_discover, + mock_sync): + """Answering 'n' to confirmation should abort the import.""" + mock_conn = MockConnector.return_value + mock_conn.extract_groups.return_value = [] + mock_conn.extract_connections.return_value = ([ + _make_row(parameter_name='hostname', parameter_value='x')], {}) + + cmd = PAMProjectKCMImportCommand() + params = MagicMock() + + with patch('builtins.input', return_value='n'), \ + patch('builtins.print'): + with self.assertRaises(CommandError) as ctx: + cmd.execute(params, db_host='127.0.0.1', + project_name='Cancel Test') + self.assertIn('cancelled', str(ctx.exception)) + + # Skeleton should NOT have been called (cancelled before phase 1) + mock_skeleton.assert_not_called() + + @patch('keepercommander.api.sync_down') + @patch('keepercommander.commands.pam_import.kcm_import.PAMProjectKCMImportCommand._create_summary_record') + @patch('keepercommander.commands.pam_import.kcm_import.PAMProjectKCMImportCommand._get_project_assets', + return_value={}) + @patch('keepercommander.commands.pam_import.kcm_import.PAMProjectKCMImportCommand._discover_shared_folder_names', + return_value=(None, None)) + @patch('keepercommander.commands.pam_import.kcm_import.PAMProjectKCMImportCommand._create_project_skeleton', + return_value=('skeleton-cfg', '')) + @patch('keepercommander.commands.pam_import.kcm_import.PAMProjectKCMImportCommand._resolve_gateway', + return_value=None) + @patch('keepercommander.commands.pam_import.kcm_import.KCMDatabaseConnector') + @patch('keepercommander.commands.pam_import.kcm_import.getpass.getpass', + return_value='pass') + def test_yes_flag_skips_prompt(self, mock_getpass, MockConnector, + mock_gw, mock_skeleton, mock_discover, + mock_assets, mock_summary, mock_sync): + """--yes flag should skip the confirmation prompt entirely.""" + mock_conn = MockConnector.return_value + mock_conn.extract_groups.return_value = [] + mock_conn.extract_connections.return_value = ([ + _make_row(parameter_name='hostname', parameter_value='x')], {}) + + cmd = PAMProjectKCMImportCommand() + params = MagicMock() + fake_cache = {} + params.record_cache = fake_cache + + mock_extend_cmd = MagicMock() + def fake_extend(params, **kwargs): + fake_cache['r1'] = {} + mock_extend_cmd.execute = MagicMock(side_effect=fake_extend) + mock_extend_class = MagicMock(return_value=mock_extend_cmd) + mock_extend_module = MagicMock(PAMProjectExtendCommand=mock_extend_class) + + with patch.dict('sys.modules', + {'keepercommander.commands.pam_import.extend': mock_extend_module}), \ + patch('builtins.input') as mock_input: + cmd.execute(params, db_host='127.0.0.1', + project_name='Auto Test', + auto_confirm=True, + auto_throttle=False) + + # input() should NOT have been called + mock_input.assert_not_called() + # Skeleton and extend should have been called + mock_skeleton.assert_called_once() + mock_extend_cmd.execute.assert_called_once() + + +class TestConvertKcmAutofill(unittest.TestCase): + """_convert_kcm_autofill cleans KCM JSON for Keeper RBI.""" + + def test_single_page_login(self): + """Compact JSON with single step.""" + original = [{"page": "login.example.com", "username-field": "#user", + "password-field": "#pass", "submit": "button.login"}] + # KCM DB often has extra whitespace + kcm_raw = json.dumps(original, indent=4) + resource = {'pam_settings': {'connection': {}}} + KCMParameterMapper._convert_kcm_autofill(resource, kcm_raw) + result = resource['pam_settings']['connection']['autofill_targets'] + self.assertEqual(json.loads(result), original) + # Compact (no indent whitespace) + self.assertNotIn('\n', result) + + def test_multi_step_preserved(self): + """Multi-step flow preserved as JSON array.""" + original = [ + {"page": "*.example.com", "username-field": "#u", + "submit": "input[type='submit']"}, + {"page": "*.example.com", "password-field": "#p", + "submit": "button[type='submit']"}, + {"page": "*.example.com", "totp-code-field": "#otp", + "submit": "button[type='submit']"}, + ] + kcm_raw = json.dumps(original, indent=6) + resource = {'pam_settings': {'connection': {}}} + KCMParameterMapper._convert_kcm_autofill(resource, kcm_raw) + result = resource['pam_settings']['connection']['autofill_targets'] + self.assertEqual(json.loads(result), original) + self.assertEqual(len(json.loads(result)), 3) + + def test_yaml_fallback(self): + """Non-JSON input stored as-is.""" + yaml_val = '- page: "https://example.com"\n username-field: "#u"' + resource = {'pam_settings': {'connection': {}}} + KCMParameterMapper._convert_kcm_autofill(resource, yaml_val) + result = resource['pam_settings']['connection']['autofill_targets'] + self.assertEqual(result, yaml_val.strip()) + + def test_non_array_json_stored_as_is(self): + """JSON object (not array) stored as-is.""" + kcm_raw = json.dumps({"page": "x.com"}) + resource = {'pam_settings': {'connection': {}}} + KCMParameterMapper._convert_kcm_autofill(resource, kcm_raw) + result = resource['pam_settings']['connection']['autofill_targets'] + self.assertEqual(result, kcm_raw.strip()) + + def test_literal_backslash_n_from_kcm_db(self): + """KCM DB extraction produces literal \\n — must parse correctly.""" + # Simulate what PostgreSQL extraction produces: literal \n characters + # instead of real newlines in the JSON + kcm_raw = ( + '[\\n' + ' {\\n' + ' "page": "*.microsoftonline.com",\\n' + ' "username-field": "#i0116",\\n' + ' "password-field": "#i0118",\\n' + ' "submit": "input[type=\'submit\']"\\n' + ' }\\n' + ' ]' + ) + resource = {'pam_settings': {'connection': {}}} + KCMParameterMapper._convert_kcm_autofill(resource, kcm_raw) + result = resource['pam_settings']['connection']['autofill_targets'] + parsed = json.loads(result) + self.assertIsInstance(parsed, list) + self.assertEqual(len(parsed), 1) + self.assertEqual(parsed[0]['page'], '*.microsoftonline.com') + self.assertEqual(parsed[0]['username-field'], '#i0116') + # Must be compact JSON + self.assertNotIn('\\n', result) + self.assertNotIn('\n', result) + + def test_literal_backslash_n_multi_step(self): + """Multi-step autofill with literal \\n parses correctly.""" + kcm_raw = ( + '[\\n' + ' {\\n' + ' "page": "*.example.com",\\n' + ' "username-field": "#user",\\n' + ' "submit": "button.next"\\n' + ' },\\n' + ' {\\n' + ' "page": "*.example.com",\\n' + ' "password-field": "#pass",\\n' + ' "submit": "button.login"\\n' + ' }\\n' + ']' + ) + resource = {'pam_settings': {'connection': {}}} + KCMParameterMapper._convert_kcm_autofill(resource, kcm_raw) + result = resource['pam_settings']['connection']['autofill_targets'] + parsed = json.loads(result) + self.assertEqual(len(parsed), 2) + self.assertEqual(parsed[0]['username-field'], '#user') + self.assertEqual(parsed[1]['password-field'], '#pass') + + def test_double_escaped_quotes_in_css_selector(self): + """KCM DB with double-escaped quotes: \\\\" → \\" in CSS selectors.""" + # Azure Portal / Lineleader style: submit has button[type=\"submit\"] + # but extraction double-escapes to button[type=\\"submit\\"] + # Reconstructed from actual dry-run output repr: + kcm_raw = '[\\n {\\n "page": "login.microsoftonline.com",\\n "username-field": "#i0116",\\n "password-field": "#i0118",\\n "submit": "button[type=\\\\"submit\\\\"]",\\n "cannot-submit": "div[data-testid=\\\\"challenge-widget-container\\\\"]"\\n }\\n]' + resource = {'pam_settings': {'connection': {}}} + KCMParameterMapper._convert_kcm_autofill(resource, kcm_raw) + result = resource['pam_settings']['connection']['autofill_targets'] + parsed = json.loads(result) + self.assertEqual(len(parsed), 1) + self.assertEqual(parsed[0]['page'], 'login.microsoftonline.com') + self.assertEqual(parsed[0]['submit'], 'button[type="submit"]') + self.assertEqual(parsed[0]['cannot-submit'], + 'div[data-testid="challenge-widget-container"]') + # Must be compact JSON + self.assertNotIn('\n', result) + + def test_legacy_autofill_produces_json(self): + """Legacy username-field/password-field → JSON array.""" + resource = {'pam_settings': {'connection': {}}} + KCMParameterMapper._append_legacy_autofill(resource, 'username-field', 'u') + KCMParameterMapper._append_legacy_autofill(resource, 'password-field', 'passwd') + result = resource['pam_settings']['connection']['autofill_targets'] + parsed = json.loads(result) + self.assertEqual(parsed, [{"username-field": "u", "password-field": "passwd"}]) + + +class TestFlagIncompleteRecords(unittest.TestCase): + """Tests for _flag_incomplete_records — moves bad data to special folder.""" + + def _make_resource(self, title, protocol, host='10.0.0.1', **extra): + r = { + 'title': title, + 'host': host, + 'type': 'pamMachine', + 'folder_path': 'Proj - Resources/GroupA', + 'pam_settings': { + 'connection': { + 'protocol': protocol, + 'launch_credentials': f'User - {title}', + } + } + } + r.update(extra) + return r + + def _make_user(self, resource_title, login='admin'): + return { + 'title': f'User - {resource_title}', + 'login': login, + 'type': 'pamUser', + 'folder_path': 'Proj - Users/GroupA', + } + + def test_complete_records_unchanged(self): + """Records with all required fields stay in their original folder.""" + r = self._make_resource('SSHBox', 'ssh') + u = self._make_user('SSHBox') + PAMProjectKCMImportCommand._flag_incomplete_records( + [r], [u], 'Proj - Resources', 'Proj - Users') + self.assertEqual(r['folder_path'], 'Proj - Resources/GroupA') + self.assertNotIn('notes', r) + + def test_missing_host_ssh_flagged(self): + """SSH resource without host is moved to Incomplete folder.""" + r = self._make_resource('NoHost', 'ssh', host='') + u = self._make_user('NoHost') + PAMProjectKCMImportCommand._flag_incomplete_records( + [r], [u], 'Proj - Resources', 'Proj - Users') + self.assertEqual(r['folder_path'], + 'Proj - Resources/Incomplete (KCM Source)') + self.assertIn('Missing host/IP', r['notes']) + self.assertIn('INCOMPLETE DATA AT KCM SOURCE', r['notes']) + + def test_missing_host_vnc_flagged(self): + """VNC resource without host is flagged.""" + r = self._make_resource('NoHostVNC', 'vnc', host='') + u = self._make_user('NoHostVNC') + PAMProjectKCMImportCommand._flag_incomplete_records( + [r], [u], 'Proj - Resources', 'Proj - Users') + self.assertEqual(r['folder_path'], + 'Proj - Resources/Incomplete (KCM Source)') + + def test_http_missing_url_flagged(self): + """HTTP resource without host and without url is flagged.""" + r = self._make_resource('NoURL', 'http', host='') + r['type'] = 'pamRemoteBrowser' + u = self._make_user('NoURL') + PAMProjectKCMImportCommand._flag_incomplete_records( + [r], [u], 'Proj - Resources', 'Proj - Users') + self.assertEqual(r['folder_path'], + 'Proj - Resources/Incomplete (KCM Source)') + self.assertIn('Missing URL', r['notes']) + + def test_http_with_url_not_flagged(self): + """HTTP resource with url but no host is NOT flagged.""" + r = self._make_resource('WebApp', 'http', host='') + r['type'] = 'pamRemoteBrowser' + r['url'] = 'https://example.com' + u = self._make_user('WebApp') + PAMProjectKCMImportCommand._flag_incomplete_records( + [r], [u], 'Proj - Resources', 'Proj - Users') + self.assertEqual(r['folder_path'], 'Proj - Resources/GroupA') + self.assertNotIn('notes', r) + + def test_rdp_sftp_missing_fields_flagged(self): + """RDP with SFTP enabled but missing host/port/login is flagged.""" + r = self._make_resource('RDPBox', 'rdp') + r['pam_settings']['connection']['sftp'] = { + 'enable_sftp': 'true', + 'sftp_root_directory': '/tmp', + # Missing: host, port, login, password + } + u = self._make_user('RDPBox') + PAMProjectKCMImportCommand._flag_incomplete_records( + [r], [u], 'Proj - Resources', 'Proj - Users') + self.assertEqual(r['folder_path'], + 'Proj - Resources/Incomplete (KCM Source)') + self.assertIn('SFTP enabled but missing', r['notes']) + self.assertIn('host', r['notes']) + self.assertIn('login', r['notes']) + + def test_rdp_sftp_complete_not_flagged(self): + """RDP with complete SFTP settings is NOT flagged.""" + r = self._make_resource('RDPGood', 'rdp') + r['pam_settings']['connection']['sftp'] = { + 'enable_sftp': 'true', + 'host': '10.0.0.2', 'port': '22', + 'login': 'admin', 'password': 'pass', + } + u = self._make_user('RDPGood') + PAMProjectKCMImportCommand._flag_incomplete_records( + [r], [u], 'Proj - Resources', 'Proj - Users') + self.assertEqual(r['folder_path'], 'Proj - Resources/GroupA') + + def test_user_missing_login_flagged(self): + """Resource with user missing login for login-required protocol.""" + r = self._make_resource('RDPNoLogin', 'rdp') + u = self._make_user('RDPNoLogin', login='') + PAMProjectKCMImportCommand._flag_incomplete_records( + [r], [u], 'Proj - Resources', 'Proj - Users') + self.assertEqual(r['folder_path'], + 'Proj - Resources/Incomplete (KCM Source)') + self.assertIn('no login', r['notes']) + # User also moved to incomplete folder + self.assertEqual(u['folder_path'], + 'Proj - Users/Incomplete (KCM Source)') + + def test_vnc_user_no_login_not_flagged(self): + """VNC user without login is NOT flagged (VNC uses password only).""" + r = self._make_resource('VNCBox', 'vnc') + u = self._make_user('VNCBox', login='') + PAMProjectKCMImportCommand._flag_incomplete_records( + [r], [u], 'Proj - Resources', 'Proj - Users') + self.assertEqual(r['folder_path'], 'Proj - Resources/GroupA') + + def test_notes_contain_original_folder(self): + """Notes should mention the original folder path.""" + r = self._make_resource('BadHost', 'ssh', host='') + u = self._make_user('BadHost') + PAMProjectKCMImportCommand._flag_incomplete_records( + [r], [u], 'Proj - Resources', 'Proj - Users') + self.assertIn('Proj - Resources/GroupA', r['notes']) + + def test_multiple_issues_all_listed(self): + """Resource with multiple issues lists all of them in notes.""" + r = self._make_resource('RDPBad', 'rdp', host='') + r['pam_settings']['connection']['sftp'] = { + 'enable_sftp': 'true', + } + u = self._make_user('RDPBad', login='') + PAMProjectKCMImportCommand._flag_incomplete_records( + [r], [u], 'Proj - Resources', 'Proj - Users') + self.assertIn('Missing host/IP', r['notes']) + self.assertIn('SFTP enabled but missing', r['notes']) + self.assertIn('no login', r['notes']) + + +class TestPrintImportSummary(unittest.TestCase): + """Tests for _print_import_summary output.""" + + def test_summary_shows_project_and_mode(self): + """Summary should display project name and mode.""" + from io import StringIO + import sys + resources = [ + {'title': 'R1', 'folder_path': 'Proj - Resources/Group1', + 'users': [{'title': 'U1', 'folder_path': 'Proj - Users/Group1'}]}, + ] + captured = StringIO() + old_stdout = sys.stdout + try: + sys.stdout = captured + PAMProjectKCMImportCommand._print_import_summary( + 'Proj', '', 1, 1, resources, [], False) + finally: + sys.stdout = old_stdout + output = captured.getvalue() + self.assertIn('Proj', output) + self.assertIn('New project', output) + self.assertIn('Resources: 1', output) + self.assertIn('Users: 1', output) + self.assertIn('Folders to create:', output) + self.assertIn('Proj - Resources/Group1', output) + + def test_summary_extend_mode(self): + """Extend mode should show 'Extend existing project'.""" + from io import StringIO + import sys + captured = StringIO() + old_stdout = sys.stdout + try: + sys.stdout = captured + PAMProjectKCMImportCommand._print_import_summary( + 'Proj', 'existing-uid', 0, 0, [], [], True) + finally: + sys.stdout = old_stdout + self.assertIn('Extend existing project', captured.getvalue()) + + +############################################################################### +# Live E2E tests — connect to real KCM PostgreSQL database +############################################################################### + +# Connection details (from guacamole-postgres container) +# Credentials loaded from environment to avoid committing secrets. +_LIVE_DB_HOST = os.environ.get('KCM_TEST_DB_HOST', '127.0.0.1') +_LIVE_DB_PORT = int(os.environ.get('KCM_TEST_DB_PORT', '5432')) +_LIVE_DB_NAME = os.environ.get('KCM_TEST_DB_NAME', 'guacamole_db') +_LIVE_DB_USER = os.environ.get('KCM_TEST_DB_USER', 'guacamole_user') +_LIVE_DB_PASS = os.environ.get('KCM_TEST_DB_PASS', '') +_LIVE_DB_TYPE = os.environ.get('KCM_TEST_DB_TYPE', 'postgresql') +_LIVE_DOCKER_CONTAINER = os.environ.get('KCM_TEST_DOCKER', 'guacamole-postgres') + + +_SKIP_LIVE = not os.environ.get('KCM_TEST_DB_HOST') +_SKIP_MSG = 'Set KCM_TEST_DB_HOST to enable live DB tests' + + +@unittest.skipIf(_SKIP_LIVE, _SKIP_MSG) +class TestLiveDBConnection(unittest.TestCase): + """Real connection to KCM PostgreSQL database.""" + + def test_connect_and_validate_schema(self): + """Connect to real DB and validate guacamole schema exists.""" + conn = KCMDatabaseConnector( + _LIVE_DB_TYPE, _LIVE_DB_HOST, _LIVE_DB_PORT, + _LIVE_DB_USER, _LIVE_DB_PASS, _LIVE_DB_NAME) + conn.connect() + conn.validate_schema() # must not raise + conn.close() + + def test_extract_groups_returns_data(self): + """extract_groups should return at least one group.""" + conn = KCMDatabaseConnector( + _LIVE_DB_TYPE, _LIVE_DB_HOST, _LIVE_DB_PORT, + _LIVE_DB_USER, _LIVE_DB_PASS, _LIVE_DB_NAME) + conn.connect() + groups = conn.extract_groups() + conn.close() + self.assertIsInstance(groups, list) + self.assertGreater(len(groups), 0) + # Each group should have required keys + for g in groups: + self.assertIn('connection_group_id', g) + self.assertIn('connection_group_name', g) + + def test_extract_connections_returns_data(self): + """extract_connections should return (rows, attr_map) tuple.""" + conn = KCMDatabaseConnector( + _LIVE_DB_TYPE, _LIVE_DB_HOST, _LIVE_DB_PORT, + _LIVE_DB_USER, _LIVE_DB_PASS, _LIVE_DB_NAME) + conn.connect() + rows, attr_map = conn.extract_connections() + conn.close() + self.assertIsInstance(rows, list) + self.assertIsInstance(attr_map, dict) + self.assertGreater(len(rows), 0) + required_cols = {'connection_id', 'name', 'protocol', 'parameter_name'} + for row in rows: + self.assertTrue(required_cols.issubset(row.keys()), + f"Missing columns: {required_cols - row.keys()}") + + def test_connection_close_is_idempotent(self): + """close() should not raise when called twice.""" + conn = KCMDatabaseConnector( + _LIVE_DB_TYPE, _LIVE_DB_HOST, _LIVE_DB_PORT, + _LIVE_DB_USER, _LIVE_DB_PASS, _LIVE_DB_NAME) + conn.connect() + conn.close() + conn.close() # must not raise + + +@unittest.skipIf(_SKIP_LIVE, _SKIP_MSG) +class TestLiveParameterMapping(unittest.TestCase): + """Transform real KCM data through the mapper pipeline.""" + + @classmethod + def setUpClass(cls): + conn = KCMDatabaseConnector( + _LIVE_DB_TYPE, _LIVE_DB_HOST, _LIVE_DB_PORT, + _LIVE_DB_USER, _LIVE_DB_PASS, _LIVE_DB_NAME) + conn.connect() + cls.groups = conn.extract_groups() + cls.rows, cls.attr_map = conn.extract_connections() + conn.close() + cls.mapper = KCMParameterMapper() + + def test_transform_produces_resources_and_users(self): + """transform() should produce non-empty resource and user lists.""" + resources, users = self.mapper.transform(self.rows, attr_map=self.attr_map) + self.assertGreater(len(resources), 0) + self.assertGreater(len(users), 0) + + def test_resources_have_required_fields(self): + """Each resource should have title, type, host, pam_settings.""" + resources, _ = self.mapper.transform(self.rows) + for r in resources: + self.assertIn('title', r) + self.assertIn('type', r) + self.assertIn('pam_settings', r) + self.assertIn('connection', r['pam_settings']) + self.assertIn('protocol', r['pam_settings']['connection']) + + def test_users_have_required_fields(self): + """Each user should have title, type.""" + _, users = self.mapper.transform(self.rows) + for u in users: + self.assertIn('title', u) + self.assertEqual(u['type'], 'pamUser') + + def test_protocol_mapping_matches_real_data(self): + """Protocols in DB should map to valid PAM record types.""" + resources, _ = self.mapper.transform(self.rows) + valid_types = {'pamMachine', 'pamDatabase', 'pamRemoteBrowser'} + for r in resources: + self.assertIn(r['type'], valid_types, + f"Unexpected type {r['type']} for resource {r['title']}") + + def test_hostnames_extracted(self): + """At least one resource should have a non-empty host.""" + resources, _ = self.mapper.transform(self.rows) + hosts = [r['host'] for r in resources if r.get('host')] + self.assertGreater(len(hosts), 0, "No hostnames extracted from DB") + + +@unittest.skipIf(_SKIP_LIVE, _SKIP_MSG) +class TestLiveFolderModes(unittest.TestCase): + """All three --folder-mode options against real group data.""" + + @classmethod + def setUpClass(cls): + conn = KCMDatabaseConnector( + _LIVE_DB_TYPE, _LIVE_DB_HOST, _LIVE_DB_PORT, + _LIVE_DB_USER, _LIVE_DB_PASS, _LIVE_DB_NAME) + conn.connect() + cls.groups = conn.extract_groups() + cls.rows, cls.attr_map = conn.extract_connections() + conn.close() + + def test_ksm_mode(self): + """ksm mode should produce valid folder paths.""" + resolver = KCMGroupResolver(self.groups, mode='ksm') + for g in self.groups: + path = resolver.resolve_path(g['connection_group_id']) + self.assertIsInstance(path, str) + self.assertNotIn('..', path, "Path traversal in ksm mode") + + def test_exact_mode(self): + """exact mode should produce hierarchical paths.""" + resolver = KCMGroupResolver(self.groups, mode='exact') + for g in self.groups: + path = resolver.resolve_path(g['connection_group_id']) + self.assertIsInstance(path, str) + self.assertNotIn('..', path) + + def test_flat_mode(self): + """flat mode should produce single-level names (no slashes).""" + resolver = KCMGroupResolver(self.groups, mode='flat') + for g in self.groups: + path = resolver.resolve_path(g['connection_group_id']) + self.assertIsInstance(path, str) + self.assertNotIn('/', path, "Flat mode should not have slashes") + self.assertNotIn('..', path) + + def test_shared_folders_generated(self): + """get_shared_folders should return at least one folder.""" + resolver = KCMGroupResolver(self.groups, mode='ksm') + folders = resolver.get_shared_folders() + self.assertIsInstance(folders, list) + self.assertGreater(len(folders), 0) + + +@unittest.skipIf(_SKIP_LIVE, _SKIP_MSG) +class TestLiveDockerDetect(unittest.TestCase): + """Docker detect against real running containers.""" + + def test_detect_from_postgres_container(self): + """docker-detect from guacamole-postgres should return valid creds.""" + (host, port, db, user), password = \ + PAMProjectKCMImportCommand._detect_docker_credentials( + 'postgresql', _LIVE_DOCKER_CONTAINER) + self.assertIsInstance(host, str) + self.assertIsInstance(port, int) + self.assertEqual(db, 'guacamole_db') + self.assertEqual(user, 'guacamole_user') + self.assertGreater(len(password), 0) + + def test_detected_creds_can_connect(self): + """Credentials from docker-detect should work (using known-good host). + + Docker-detect returns host=127.0.0.1 (from container's env) which + may not be reachable from the test runner. We use the detected + user/password/db but connect via the container's Docker IP. + """ + (_, port, db, user), password = \ + PAMProjectKCMImportCommand._detect_docker_credentials( + 'postgresql', _LIVE_DOCKER_CONTAINER) + conn = KCMDatabaseConnector('postgresql', _LIVE_DB_HOST, port, + user, password, db) + conn.connect() + conn.validate_schema() + conn.close() + + +@unittest.skipIf(_SKIP_LIVE, _SKIP_MSG) +class TestLiveExecutePipeline(unittest.TestCase): + """Full execute() pipeline against real DB with --dry-run and --output.""" + + def test_dry_run_real_db(self): + """Full pipeline with real DB: connect → extract → transform → dry-run.""" + cmd = PAMProjectKCMImportCommand() + params = MagicMock() + + with patch('builtins.print') as mock_print: + with patch('keepercommander.commands.pam_import.kcm_import.getpass.getpass', + return_value=_LIVE_DB_PASS): + cmd.execute(params, + db_host=_LIVE_DB_HOST, + db_port=_LIVE_DB_PORT, + db_type=_LIVE_DB_TYPE, + db_name=_LIVE_DB_NAME, + db_user=_LIVE_DB_USER, + dry_run=True) + + # Verify output was printed (dry-run dumps JSON) + printed = ''.join(str(c) for c in mock_print.call_args_list) + self.assertIn('pam_data', printed) + self.assertIn('resources', printed) + + def test_dry_run_with_skip_users(self): + """--skip-users should produce resources but no users.""" + cmd = PAMProjectKCMImportCommand() + params = MagicMock() + + with patch('builtins.print') as mock_print: + with patch('keepercommander.commands.pam_import.kcm_import.getpass.getpass', + return_value=_LIVE_DB_PASS): + cmd.execute(params, + db_host=_LIVE_DB_HOST, + db_port=_LIVE_DB_PORT, + db_type=_LIVE_DB_TYPE, + db_name=_LIVE_DB_NAME, + db_user=_LIVE_DB_USER, + skip_users=True, + dry_run=True) + + printed = ''.join(str(c) for c in mock_print.call_args_list) + # Parse the JSON from the printed output + self.assertIn('"users": []', printed) + + def test_output_file_real_db(self): + """--output should write valid JSON with real data.""" + cmd = PAMProjectKCMImportCommand() + params = MagicMock() + + import stat + tmp_dir = tempfile.mkdtemp() + output_path = os.path.join(tmp_dir, 'kcm_export.json') + try: + with patch('keepercommander.commands.pam_import.kcm_import.getpass.getpass', + return_value=_LIVE_DB_PASS): + cmd.execute(params, + db_host=_LIVE_DB_HOST, + db_port=_LIVE_DB_PORT, + db_type=_LIVE_DB_TYPE, + db_name=_LIVE_DB_NAME, + db_user=_LIVE_DB_USER, + output=output_path) + + self.assertTrue(os.path.isfile(output_path)) + # Check permissions + file_mode = os.stat(output_path).st_mode & 0o777 + self.assertEqual(file_mode, 0o600) + # Parse JSON + with open(output_path) as f: + data = json.load(f) + self.assertIn('pam_data', data) + self.assertIn('resources', data['pam_data']) + self.assertGreater(len(data['pam_data']['resources']), 0) + finally: + if os.path.exists(output_path): + os.unlink(output_path) + os.rmdir(tmp_dir) + + def test_dry_run_redacts_passwords(self): + """Dry-run output should have [REDACTED] for password fields.""" + cmd = PAMProjectKCMImportCommand() + params = MagicMock() + + with patch('builtins.print') as mock_print: + with patch('keepercommander.commands.pam_import.kcm_import.getpass.getpass', + return_value=_LIVE_DB_PASS): + cmd.execute(params, + db_host=_LIVE_DB_HOST, + db_port=_LIVE_DB_PORT, + db_type=_LIVE_DB_TYPE, + db_name=_LIVE_DB_NAME, + db_user=_LIVE_DB_USER, + dry_run=True) + + printed = ''.join(str(c) for c in mock_print.call_args_list) + # If any passwords exist in data, they should be redacted + if 'password' in printed.lower(): + self.assertIn('REDACTED', printed) + + def test_dry_run_all_folder_modes(self): + """Each folder mode should produce valid output without errors.""" + for mode in ('ksm', 'exact', 'flat'): + with self.subTest(mode=mode): + cmd = PAMProjectKCMImportCommand() + params = MagicMock() + with patch('builtins.print'): + with patch('keepercommander.commands.pam_import.kcm_import.getpass.getpass', + return_value=_LIVE_DB_PASS): + cmd.execute(params, + db_host=_LIVE_DB_HOST, + db_port=_LIVE_DB_PORT, + db_type=_LIVE_DB_TYPE, + db_name=_LIVE_DB_NAME, + db_user=_LIVE_DB_USER, + folder_mode=mode, + dry_run=True) + # No exception = pass + + def test_custom_project_name(self): + """--name should be used in the output JSON.""" + cmd = PAMProjectKCMImportCommand() + params = MagicMock() + + tmp_dir = tempfile.mkdtemp() + output_path = os.path.join(tmp_dir, 'named_export.json') + try: + with patch('keepercommander.commands.pam_import.kcm_import.getpass.getpass', + return_value=_LIVE_DB_PASS): + cmd.execute(params, + db_host=_LIVE_DB_HOST, + db_port=_LIVE_DB_PORT, + db_type=_LIVE_DB_TYPE, + db_name=_LIVE_DB_NAME, + db_user=_LIVE_DB_USER, + project_name='MyCustomProject', + output=output_path) + + with open(output_path) as f: + data = json.load(f) + self.assertEqual(data.get('project'), 'MyCustomProject') + finally: + if os.path.exists(output_path): + os.unlink(output_path) + os.rmdir(tmp_dir) + + def test_include_disabled_flag(self): + """--include-disabled should not crash (may or may not change count).""" + cmd = PAMProjectKCMImportCommand() + params = MagicMock() + + with patch('builtins.print'): + with patch('keepercommander.commands.pam_import.kcm_import.getpass.getpass', + return_value=_LIVE_DB_PASS): + cmd.execute(params, + db_host=_LIVE_DB_HOST, + db_port=_LIVE_DB_PORT, + db_type=_LIVE_DB_TYPE, + db_name=_LIVE_DB_NAME, + db_user=_LIVE_DB_USER, + include_disabled=True, + dry_run=True) + + +@unittest.skipIf(_SKIP_LIVE, _SKIP_MSG) +class TestLiveErrorPaths(unittest.TestCase): + """Error conditions against real infrastructure.""" + + def test_wrong_password_raises(self): + """Wrong DB password should raise CommandError.""" + from keepercommander.error import CommandError + cmd = PAMProjectKCMImportCommand() + params = MagicMock() + + with patch('keepercommander.commands.pam_import.kcm_import.getpass.getpass', + return_value='wrong_password'): + with self.assertRaises(CommandError) as ctx: + cmd.execute(params, + db_host=_LIVE_DB_HOST, + db_port=_LIVE_DB_PORT, + db_type=_LIVE_DB_TYPE, + db_name=_LIVE_DB_NAME, + db_user=_LIVE_DB_USER, + dry_run=True) + self.assertIn('Database connection failed', str(ctx.exception)) + + def test_wrong_db_name_raises(self): + """Non-existent database should raise CommandError.""" + from keepercommander.error import CommandError + cmd = PAMProjectKCMImportCommand() + params = MagicMock() + + with patch('keepercommander.commands.pam_import.kcm_import.getpass.getpass', + return_value=_LIVE_DB_PASS): + with self.assertRaises(CommandError) as ctx: + cmd.execute(params, + db_host=_LIVE_DB_HOST, + db_port=_LIVE_DB_PORT, + db_type=_LIVE_DB_TYPE, + db_name='nonexistent_db', + db_user=_LIVE_DB_USER, + dry_run=True) + self.assertIn('Database connection failed', str(ctx.exception)) + + def test_wrong_port_raises(self): + """Wrong port should raise CommandError.""" + from keepercommander.error import CommandError + cmd = PAMProjectKCMImportCommand() + params = MagicMock() + + with patch('keepercommander.commands.pam_import.kcm_import.getpass.getpass', + return_value=_LIVE_DB_PASS): + with self.assertRaises(CommandError) as ctx: + cmd.execute(params, + db_host=_LIVE_DB_HOST, + db_port=9999, + db_type=_LIVE_DB_TYPE, + db_name=_LIVE_DB_NAME, + db_user=_LIVE_DB_USER, + dry_run=True) + self.assertIn('Database connection failed', str(ctx.exception)) + + def test_remote_without_ssl_blocked(self): + """Public IP without --db-ssl should be blocked.""" + from keepercommander.error import CommandError + cmd = PAMProjectKCMImportCommand() + params = MagicMock() + + with patch('keepercommander.commands.pam_import.kcm_import.getpass.getpass', + return_value='pass'): + with self.assertRaises(CommandError) as ctx: + cmd.execute(params, db_host='203.0.113.50', dry_run=True) + self.assertIn('cleartext', str(ctx.exception).lower()) + + def test_docker_detect_nonexistent_container(self): + """Docker detect with wrong container name should raise.""" + from keepercommander.error import CommandError + with self.assertRaises(CommandError): + PAMProjectKCMImportCommand._detect_docker_credentials( + 'postgresql', 'container_that_does_not_exist_xyz') + + +class TestFolderModeVariations(unittest.TestCase): + """--folder-mode variations through full execute() pipeline.""" + + def _groups_and_rows(self): + groups = [ + {'connection_group_id': 1, 'parent_id': None, + 'connection_group_name': 'DC-East', 'ksm_config': 'east-cfg'}, + {'connection_group_id': 2, 'parent_id': 1, + 'connection_group_name': 'Webservers', 'ksm_config': None}, + ] + rows = [ + _make_row(connection_id=1, name='web1', protocol='ssh', + parameter_name='hostname', parameter_value='10.0.1.1', + connection_group_id=2, parent_id=1, group_name='Webservers'), + ] + return groups, rows + + @patch('keepercommander.commands.pam_import.kcm_import.KCMDatabaseConnector') + @patch('keepercommander.commands.pam_import.kcm_import.getpass.getpass', + return_value='pass') + def test_folder_mode_ksm(self, mock_getpass, MockConnector): + """ksm mode: groups with ksm_config become roots, children nest under.""" + groups, rows = self._groups_and_rows() + mock_conn = MockConnector.return_value + mock_conn.extract_groups.return_value = groups + mock_conn.extract_connections.return_value = (rows, {}) + + cmd = PAMProjectKCMImportCommand() + with patch('builtins.print') as mock_print: + cmd.execute(MagicMock(), db_host='127.0.0.1', + folder_mode='ksm', dry_run=True) + + output = ''.join(str(c) for c in mock_print.call_args_list) + self.assertIn('DC-East', output) + self.assertIn('Webservers', output) + + @patch('keepercommander.commands.pam_import.kcm_import.KCMDatabaseConnector') + @patch('keepercommander.commands.pam_import.kcm_import.getpass.getpass', + return_value='pass') + def test_folder_mode_flat(self, mock_getpass, MockConnector): + """flat mode: each group is a standalone folder, no hierarchy.""" + groups, rows = self._groups_and_rows() + mock_conn = MockConnector.return_value + mock_conn.extract_groups.return_value = groups + mock_conn.extract_connections.return_value = (rows, {}) + + cmd = PAMProjectKCMImportCommand() + with patch('builtins.print') as mock_print: + cmd.execute(MagicMock(), db_host='127.0.0.1', + folder_mode='flat', dry_run=True) + + output = ''.join(str(c) for c in mock_print.call_args_list) + # Flat mode sanitizes slashes — no nested paths + self.assertIn('Webservers', output) + + @patch('keepercommander.commands.pam_import.kcm_import.KCMDatabaseConnector') + @patch('keepercommander.commands.pam_import.kcm_import.getpass.getpass', + return_value='pass') + def test_folder_mode_exact(self, mock_getpass, MockConnector): + """exact mode: full parent/child path preserved.""" + groups, rows = self._groups_and_rows() + mock_conn = MockConnector.return_value + mock_conn.extract_groups.return_value = groups + mock_conn.extract_connections.return_value = (rows, {}) + + cmd = PAMProjectKCMImportCommand() + with patch('builtins.print') as mock_print: + cmd.execute(MagicMock(), db_host='127.0.0.1', + folder_mode='exact', dry_run=True) + + output = ''.join(str(c) for c in mock_print.call_args_list) + # Exact mode preserves the full hierarchy + self.assertIn('DC-East', output) + + +class TestDockerContainerName(unittest.TestCase): + """--docker-container should be forwarded to docker inspect.""" + + def test_custom_container_name_passed(self): + """Custom container name is forwarded to subprocess.run.""" + mock_result = MagicMock() + mock_result.returncode = 0 + mock_result.stdout = 'MYSQL_PASSWORD=pass\nMYSQL_HOSTNAME=db\n' + + with patch('keepercommander.commands.pam_import.kcm_import.subprocess.run', return_value=mock_result) as mock_run: + PAMProjectKCMImportCommand._detect_docker_credentials( + 'mysql', container='my-custom-kcm') + + # First call is docker inspect with the container name + call_args = mock_run.call_args_list[0][0][0] + self.assertIn('my-custom-kcm', call_args) + + def test_default_container_name(self): + """Default container name should be 'guacamole'.""" + mock_result = MagicMock() + mock_result.returncode = 0 + mock_result.stdout = 'MYSQL_PASSWORD=pass\n' + + with patch('keepercommander.commands.pam_import.kcm_import.subprocess.run', return_value=mock_result) as mock_run: + PAMProjectKCMImportCommand._detect_docker_credentials('mysql') + + # First call is docker inspect with the container name + call_args = mock_run.call_args_list[0][0][0] + self.assertIn('guacamole', call_args) + + +class TestDBFlagPassthrough(unittest.TestCase): + """--db-port, --db-name, --db-user should be forwarded to connector.""" + + @patch('keepercommander.commands.pam_import.kcm_import.KCMDatabaseConnector') + @patch('keepercommander.commands.pam_import.kcm_import.getpass.getpass', + return_value='pass') + def test_custom_port_forwarded(self, mock_getpass, MockConnector): + mock_conn = MockConnector.return_value + mock_conn.extract_groups.return_value = [] + mock_conn.extract_connections.return_value = ([ + _make_row(parameter_name='hostname', parameter_value='x')], {}) + + cmd = PAMProjectKCMImportCommand() + with patch('builtins.print'): + cmd.execute(MagicMock(), db_host='127.0.0.1', db_port=3307, + dry_run=True) + + call_args = MockConnector.call_args + self.assertEqual(call_args[0][2], 3307) # port is 3rd positional arg + + @patch('keepercommander.commands.pam_import.kcm_import.KCMDatabaseConnector') + @patch('keepercommander.commands.pam_import.kcm_import.getpass.getpass', + return_value='pass') + def test_custom_db_name_forwarded(self, mock_getpass, MockConnector): + mock_conn = MockConnector.return_value + mock_conn.extract_groups.return_value = [] + mock_conn.extract_connections.return_value = ([ + _make_row(parameter_name='hostname', parameter_value='x')], {}) + + cmd = PAMProjectKCMImportCommand() + with patch('builtins.print'): + cmd.execute(MagicMock(), db_host='127.0.0.1', + db_name='custom_db', dry_run=True) + + call_args = MockConnector.call_args + self.assertEqual(call_args[0][5], 'custom_db') # database is 6th positional + + @patch('keepercommander.commands.pam_import.kcm_import.KCMDatabaseConnector') + @patch('keepercommander.commands.pam_import.kcm_import.getpass.getpass', + return_value='pass') + def test_custom_db_user_forwarded(self, mock_getpass, MockConnector): + mock_conn = MockConnector.return_value + mock_conn.extract_groups.return_value = [] + mock_conn.extract_connections.return_value = ([ + _make_row(parameter_name='hostname', parameter_value='x')], {}) + + cmd = PAMProjectKCMImportCommand() + with patch('builtins.print'): + cmd.execute(MagicMock(), db_host='127.0.0.1', + db_user='custom_user', dry_run=True) + + call_args = MockConnector.call_args + self.assertEqual(call_args[0][3], 'custom_user') # user is 4th positional + + @patch('keepercommander.commands.pam_import.kcm_import.KCMDatabaseConnector') + @patch('keepercommander.commands.pam_import.kcm_import.getpass.getpass', + return_value='pass') + def test_default_mysql_port(self, mock_getpass, MockConnector): + mock_conn = MockConnector.return_value + mock_conn.extract_groups.return_value = [] + mock_conn.extract_connections.return_value = ([ + _make_row(parameter_name='hostname', parameter_value='x')], {}) + + cmd = PAMProjectKCMImportCommand() + with patch('builtins.print'): + cmd.execute(MagicMock(), db_host='127.0.0.1', + db_type='mysql', dry_run=True) + + self.assertEqual(MockConnector.call_args[0][2], 3306) + + @patch('keepercommander.commands.pam_import.kcm_import.KCMDatabaseConnector') + @patch('keepercommander.commands.pam_import.kcm_import.getpass.getpass', + return_value='pass') + def test_default_postgresql_port(self, mock_getpass, MockConnector): + mock_conn = MockConnector.return_value + mock_conn.extract_groups.return_value = [] + mock_conn.extract_connections.return_value = ([ + _make_row(parameter_name='hostname', parameter_value='x')], {}) + + cmd = PAMProjectKCMImportCommand() + with patch('builtins.print'): + cmd.execute(MagicMock(), db_host='127.0.0.1', + db_type='postgresql', dry_run=True) + + self.assertEqual(MockConnector.call_args[0][2], 5432) + + +class TestErrorPaths(unittest.TestCase): + """Error conditions that should raise CommandError.""" + + @patch('keepercommander.commands.pam_import.kcm_import.KCMDatabaseConnector') + @patch('keepercommander.commands.pam_import.kcm_import.getpass.getpass', + return_value='pass') + def test_db_connection_failure(self, mock_getpass, MockConnector): + """Database connection error should raise CommandError.""" + mock_conn = MockConnector.return_value + mock_conn.connect.side_effect = Exception('Connection refused') + + cmd = PAMProjectKCMImportCommand() + from keepercommander.error import CommandError + with self.assertRaises(CommandError) as ctx: + cmd.execute(MagicMock(), db_host='127.0.0.1', dry_run=True) + self.assertIn('Database connection failed', str(ctx.exception)) + + @patch('keepercommander.commands.pam_import.kcm_import.KCMDatabaseConnector') + @patch('keepercommander.commands.pam_import.kcm_import.getpass.getpass', + return_value='pass') + def test_schema_validation_failure(self, mock_getpass, MockConnector): + """Missing guacamole tables should raise CommandError.""" + from keepercommander.error import CommandError as CE + mock_conn = MockConnector.return_value + mock_conn.validate_schema.side_effect = CE( + 'kcm-import', 'KCM schema not found') + + cmd = PAMProjectKCMImportCommand() + with self.assertRaises(CE) as ctx: + cmd.execute(MagicMock(), db_host='127.0.0.1', dry_run=True) + self.assertIn('schema', str(ctx.exception).lower()) + + def test_docker_detect_invalid_port(self): + """Invalid port value in Docker env should raise CommandError.""" + mock_result = MagicMock() + mock_result.returncode = 0 + mock_result.stdout = 'MYSQL_PASSWORD=pass\nMYSQL_PORT=not_a_number\n' + + from keepercommander.error import CommandError + with patch('keepercommander.commands.pam_import.kcm_import.subprocess.run', return_value=mock_result): + with self.assertRaises(CommandError) as ctx: + PAMProjectKCMImportCommand._detect_docker_credentials('mysql') + self.assertIn('Invalid port', str(ctx.exception)) + + def test_docker_detect_timeout(self): + """Docker inspect timeout should raise CommandError.""" + import subprocess + from keepercommander.error import CommandError + with patch('keepercommander.commands.pam_import.kcm_import.subprocess.run', side_effect=subprocess.TimeoutExpired('docker', 10)): + with self.assertRaises(CommandError) as ctx: + PAMProjectKCMImportCommand._detect_docker_credentials('mysql') + self.assertIn('Docker inspect failed', str(ctx.exception)) + + def test_docker_not_installed(self): + """Missing docker binary should raise CommandError.""" + from keepercommander.error import CommandError + with patch('keepercommander.commands.pam_import.kcm_import.subprocess.run', side_effect=FileNotFoundError('docker')): + with self.assertRaises(CommandError) as ctx: + PAMProjectKCMImportCommand._detect_docker_credentials('mysql') + self.assertIn('Docker inspect failed', str(ctx.exception)) + + +class TestGatewayResolution(unittest.TestCase): + """_resolve_gateway and _find_config_for_gateway tests. + + Gateway methods use lazy imports. We run these in a subprocess to avoid + cross-test module pollution. For find_config tests, we patch at the + point of use. + """ + + def test_gateway_not_found_raises(self): + """--gateway with unknown name should raise CommandError.""" + from keepercommander.error import CommandError + + mock_gw_helper = MagicMock() + mock_gw_helper.get_all_gateways.return_value = [] + + mock_router_fn = MagicMock(return_value=MagicMock(controllers=[])) + + with patch('keepercommander.commands.pam.gateway_helper.get_all_gateways', + mock_gw_helper.get_all_gateways): + with patch('keepercommander.commands.pam.router_helper.router_get_connected_gateways', + mock_router_fn): + with self.assertRaises(CommandError) as ctx: + PAMProjectKCMImportCommand._resolve_gateway( + MagicMock(), 'nonexistent') + self.assertIn('not found', str(ctx.exception)) + self.assertIn('--dry-run', str(ctx.exception)) + + def test_offline_gateway_warns(self): + """Selecting an offline gateway should log a warning.""" + from keepercommander import utils + gw = MagicMock() + gw.controllerName = 'MyGW' + gw.controllerUid = b'\x01\x02\x03' + + with patch('keepercommander.commands.pam.gateway_helper.get_all_gateways', + return_value=[gw]): + with patch('keepercommander.commands.pam.router_helper.router_get_connected_gateways', + return_value=MagicMock(controllers=[])): + with patch.object(PAMProjectKCMImportCommand, + '_find_config_for_gateway', + return_value='config-uid-123'): + uid_str = utils.base64_url_encode(gw.controllerUid) + with self.assertLogs(level='WARNING') as cm: + result = PAMProjectKCMImportCommand._resolve_gateway( + MagicMock(), uid_str) + self.assertTrue(any('OFFLINE' in msg for msg in cm.output)) + self.assertEqual(result, 'config-uid-123') + + def test_interactive_new_gateway_returns_none(self): + """Choosing 'N' (new gateway) in interactive mode returns None.""" + with patch('keepercommander.commands.pam.gateway_helper.get_all_gateways', + return_value=[]): + with patch('keepercommander.commands.pam.router_helper.router_get_connected_gateways', + return_value=MagicMock(controllers=[])): + with patch('builtins.print'): + result = PAMProjectKCMImportCommand._resolve_gateway( + MagicMock(), None) + self.assertIsNone(result) + + def test_find_config_no_match_raises(self): + """No PAM config for gateway should raise CommandError.""" + mock_gw = MagicMock() + mock_gw.controllerUid = b'\x01\x02\x03' + mock_gw.controllerName = 'TestGW' + + params = MagicMock() + params.record_cache.values.return_value = [ + {'version': 6, 'record_uid': 'rec-1'}, + {'version': 3, 'record_uid': 'rec-2'}, + ] + + from keepercommander.error import CommandError + with patch('keepercommander.commands.pam.config_helper.configuration_controller_get', + return_value=None): + with self.assertRaises(CommandError) as ctx: + PAMProjectKCMImportCommand._find_config_for_gateway(params, mock_gw) + self.assertIn('No PAM configuration', str(ctx.exception)) + + def test_find_config_matches_gateway_uid(self): + """Should return the record UID when controller matches gateway.""" + mock_gw = MagicMock() + mock_gw.controllerUid = b'\x01\x02\x03' + mock_gw.controllerName = 'TestGW' + + mock_controller = MagicMock() + mock_controller.controllerUid = b'\x01\x02\x03' + + params = MagicMock() + params.record_cache.values.return_value = [ + {'version': 6, 'record_uid': 'matching-rec-uid'}, + ] + + with patch('keepercommander.commands.pam.config_helper.configuration_controller_get', + return_value=mock_controller): + result = PAMProjectKCMImportCommand._find_config_for_gateway( + params, mock_gw) + + self.assertEqual(result, 'matching-rec-uid') + + +class TestIsLocalHost(unittest.TestCase): + """Direct tests for _is_local_host classification.""" + + def test_localhost(self): + self.assertTrue(PAMProjectKCMImportCommand._is_local_host('localhost')) + + def test_ipv4_loopback(self): + self.assertTrue(PAMProjectKCMImportCommand._is_local_host('127.0.0.1')) + + def test_ipv6_loopback(self): + self.assertTrue(PAMProjectKCMImportCommand._is_local_host('::1')) + + def test_rfc1918_10(self): + self.assertTrue(PAMProjectKCMImportCommand._is_local_host('10.0.1.50')) + + def test_rfc1918_172(self): + self.assertTrue(PAMProjectKCMImportCommand._is_local_host('172.17.0.2')) + + def test_rfc1918_192(self): + self.assertTrue(PAMProjectKCMImportCommand._is_local_host('192.168.1.100')) + + def test_public_ip(self): + self.assertFalse(PAMProjectKCMImportCommand._is_local_host('8.8.8.8')) + + def test_public_hostname(self): + self.assertFalse(PAMProjectKCMImportCommand._is_local_host('db.example.com')) + + def test_empty_string(self): + self.assertFalse(PAMProjectKCMImportCommand._is_local_host('')) + + +class TestComputeBatchParams(unittest.TestCase): + """_compute_batch_params auto-scales batch sizes by import volume.""" + + def test_small_import(self): + res, usr, delay = PAMProjectKCMImportCommand._compute_batch_params( + 20, 10, None, None) + self.assertEqual(res, 2) + self.assertEqual(usr, 8) + self.assertEqual(delay, 12.0) + + def test_medium_import(self): + res, usr, delay = PAMProjectKCMImportCommand._compute_batch_params( + 200, 60, None, None) + self.assertEqual(res, 2) + self.assertEqual(usr, 8) + self.assertEqual(delay, 15.0) + + def test_large_import(self): + res, usr, delay = PAMProjectKCMImportCommand._compute_batch_params( + 3000, 500, None, None) + self.assertEqual(res, 1) + self.assertEqual(usr, 6) + self.assertEqual(delay, 15.0) + + def test_override_batch_size(self): + res, usr, delay = PAMProjectKCMImportCommand._compute_batch_params( + 200, 60, override_size=10, override_delay=None) + self.assertEqual(res, 10) + # user batch and delay use auto-computed defaults + self.assertEqual(usr, 8) + self.assertEqual(delay, 15.0) + + def test_override_delay(self): + res, usr, delay = PAMProjectKCMImportCommand._compute_batch_params( + 200, 60, override_size=None, override_delay=5.0) + self.assertEqual(res, 2) + self.assertEqual(delay, 5.0) + + +class TestAdaptiveThrottler(unittest.TestCase): + """AdaptiveThrottler: probe-based adaptive batch parameter management.""" + + def test_defaults(self): + """Default state: enabled, standard batch params.""" + from keepercommander.commands.pam_import.kcm_import import AdaptiveThrottler + t = AdaptiveThrottler() + self.assertTrue(t.enabled) + self.assertEqual(t.res_batch_size, 2) + self.assertEqual(t.usr_batch_size, 8) + self.assertEqual(t.res_delay, 15.0) + self.assertEqual(t.usr_delay, 15.0) + self.assertEqual(t.throttle_count, 0) + + def test_disabled_skips_adaptation(self): + """When disabled, record_batch does nothing.""" + from keepercommander.commands.pam_import.kcm_import import AdaptiveThrottler + t = AdaptiveThrottler(enabled=False) + result = t.record_batch(100.0, 2, is_resource=True) + self.assertFalse(result['adapted']) + self.assertEqual(t.res_batch_size, 2) # unchanged + + def test_compute_optimal_no_throttle(self): + """No throttle: budget-based batch, call-proportional delays.""" + from keepercommander.commands.pam_import.kcm_import import AdaptiveThrottler + t = AdaptiveThrottler() + t.base_rtt = 3.0 # fast server + t._compute_optimal_params(probe_throttled=False) + # budget = 50 * 0.7 = 35 + # res_batch = 35/20 = 1, usr_batch = 35/8 = 4 + self.assertEqual(t._optimal_res_batch, 1) + self.assertEqual(t._optimal_usr_batch, 4) + # res_delay = max(3.0, 1 * 20 * 0.6) = 12.0 + self.assertAlmostEqual(t._optimal_res_delay, 12.0, places=1) + # usr_delay = max(3.0, 4 * 8 * 0.6) = 19.2 + self.assertAlmostEqual(t._optimal_usr_delay, 19.2, places=1) + + def test_compute_optimal_with_throttle(self): + """Probe throttle: conservative params.""" + from keepercommander.commands.pam_import.kcm_import import AdaptiveThrottler + t = AdaptiveThrottler() + t.base_rtt = 5.0 + t._compute_optimal_params(probe_throttled=True) + self.assertEqual(t._optimal_res_batch, 1) + # delay = max(15.0, 5.0 * 3) = 15.0 + self.assertEqual(t._optimal_res_delay, 15.0) + self.assertEqual(t._optimal_usr_delay, 15.0) + + def test_adapt_down_on_slow_batch(self): + """Throttle detected: only offending type's batch halved, delay doubled.""" + from keepercommander.commands.pam_import.kcm_import import AdaptiveThrottler + t = AdaptiveThrottler() + t.base_rtt = 2.0 + t.res_batch_size = 4 + t.usr_batch_size = 8 + t.res_delay = 10.0 + t.usr_delay = 5.0 + # Simulate a very slow resource batch (triggers throttle detection) + result = t.record_batch(200.0, 4, is_resource=True) + self.assertTrue(result['adapted']) + self.assertEqual(result['direction'], 'down') + self.assertEqual(t.res_batch_size, 2) # halved + self.assertEqual(t.usr_batch_size, 8) # untouched + self.assertEqual(t.res_delay, 20.0) # doubled + self.assertEqual(t.usr_delay, 5.0) # untouched + self.assertEqual(t.throttle_count, 1) + + def test_adapt_up_after_clean_batches(self): + """Recovery: batch_size increases after N clean batches (type-specific).""" + from keepercommander.commands.pam_import.kcm_import import AdaptiveThrottler + t = AdaptiveThrottler() + t.base_rtt = 2.0 + t._optimal_res_batch = 3 + t._optimal_usr_batch = 8 + t._optimal_res_delay = 5.0 + t._optimal_usr_delay = 5.0 + t.res_batch_size = 1 # currently below optimal + t.res_delay = 20.0 # currently above optimal + t.usr_delay = 5.0 + # Need CLEAN_BATCHES_TO_RECOVER (3) clean batches + for i in range(2): + result = t.record_batch(3.0, 1, is_resource=True) + self.assertFalse(result['adapted']) + # 3rd clean batch triggers recovery + result = t.record_batch(3.0, 1, is_resource=True) + self.assertTrue(result['adapted']) + self.assertEqual(result['direction'], 'up') + self.assertEqual(t.res_batch_size, 2) # 1 → 2 + self.assertLess(t.res_delay, 20.0) # decreased + self.assertEqual(t.usr_delay, 5.0) # untouched + + def test_no_recover_above_optimal(self): + """Don't speed up beyond optimal params.""" + from keepercommander.commands.pam_import.kcm_import import AdaptiveThrottler + t = AdaptiveThrottler() + t.base_rtt = 2.0 + t._optimal_res_batch = 2 + t._optimal_usr_batch = 8 + t._optimal_res_delay = 5.0 + t._optimal_usr_delay = 5.0 + t.res_batch_size = 2 # already at optimal + t.usr_batch_size = 8 + t.res_delay = 5.0 # already at optimal + for _ in range(5): + result = t.record_batch(3.0, 2, is_resource=True) + self.assertFalse(result['adapted']) + # Params unchanged + self.assertEqual(t.res_batch_size, 2) + self.assertEqual(t.res_delay, 5.0) + + def test_adapt_down_min_batch_size(self): + """batch_size never goes below 1.""" + from keepercommander.commands.pam_import.kcm_import import AdaptiveThrottler + t = AdaptiveThrottler() + t.base_rtt = 2.0 + t.res_batch_size = 1 + t.res_delay = 10.0 + # Simulate throttle + t.record_batch(200.0, 1, is_resource=True) + self.assertEqual(t.res_batch_size, 1) # floor + self.assertEqual(t.res_delay, 20.0) + + def test_adapt_down_max_delay(self): + """delay never exceeds MAX_DELAY.""" + from keepercommander.commands.pam_import.kcm_import import AdaptiveThrottler + t = AdaptiveThrottler() + t.base_rtt = 2.0 + t.res_delay = 40.0 + t.record_batch(200.0, 1, is_resource=True) # throttle + self.assertEqual(t.res_delay, 60.0) # MAX_DELAY + t.record_batch(200.0, 1, is_resource=True) # throttle again + self.assertEqual(t.res_delay, 60.0) # capped + + def test_summary(self): + """get_summary returns all fields.""" + from keepercommander.commands.pam_import.kcm_import import AdaptiveThrottler + t = AdaptiveThrottler() + t.base_rtt = 2.5 + t.probe_rtts = [2.0, 2.5, 3.0] + t.record_batch(3.0, 1, is_resource=True) + s = t.get_summary() + self.assertEqual(s['base_rtt'], 2.5) + self.assertEqual(s['probe_rtts'], [2.0, 2.5, 3.0]) + self.assertEqual(s['total_batches'], 1) + self.assertIn('final_res_batch', s) + self.assertIn('final_res_delay', s) + self.assertIn('final_usr_delay', s) + + def test_probe_skipped_when_disabled(self): + """Probe skipped when auto_throttle=False.""" + from keepercommander.commands.pam_import.kcm_import import AdaptiveThrottler + t = AdaptiveThrottler(enabled=False) + result = t.run_probe(None, None, {'pam_data': {'resources': [], 'users': []}}, None) + self.assertTrue(result['skipped']) + + def test_probe_skipped_no_records(self): + """Probe skipped when no records to probe with.""" + from keepercommander.commands.pam_import.kcm_import import AdaptiveThrottler + t = AdaptiveThrottler() + result = t.run_probe(None, 'cfg', {'pam_data': {'resources': [], 'users': []}}, None) + self.assertTrue(result['skipped']) + + def test_user_batch_adaptation_independent(self): + """User throttle only affects user params, not resource params.""" + from keepercommander.commands.pam_import.kcm_import import AdaptiveThrottler + t = AdaptiveThrottler() + t.base_rtt = 1.0 + t.usr_batch_size = 6 + t.res_batch_size = 2 + t.usr_delay = 10.0 + t.res_delay = 8.0 + # Throttle on user batch + t.record_batch(200.0, 6, is_resource=False) + self.assertEqual(t.usr_batch_size, 3) # halved + self.assertEqual(t.res_batch_size, 2) # untouched + self.assertEqual(t.usr_delay, 20.0) # doubled + self.assertEqual(t.res_delay, 8.0) # untouched + + def test_consecutive_clean_resets_on_throttle(self): + """Throttle resets the clean batch counter.""" + from keepercommander.commands.pam_import.kcm_import import AdaptiveThrottler + t = AdaptiveThrottler() + t.base_rtt = 2.0 + t._optimal_res_batch = 5 + t.res_batch_size = 1 + t.res_delay = 15.0 + # 2 clean batches + t.record_batch(3.0, 1, is_resource=True) + t.record_batch(3.0, 1, is_resource=True) + self.assertEqual(t.consecutive_clean, 2) + # Throttle resets counter + t.record_batch(200.0, 1, is_resource=True) + self.assertEqual(t.consecutive_clean, 0) + + +class TestRewriteFolderPaths(unittest.TestCase): + """_rewrite_folder_paths should fix roots when edit.py adds #N suffix.""" + + def test_rewrite_resources_and_users(self): + """Should replace project_name prefix with actual folder names.""" + pam_json = { + 'pam_data': { + 'resources': [ + {'folder_path': 'Proj - Resources/GroupA/Sub1', 'title': 'r1'}, + {'folder_path': 'Proj - Resources', 'title': 'r2'}, + ], + 'users': [ + {'folder_path': 'Proj - Users/GroupA/Sub1', 'title': 'u1'}, + {'folder_path': 'Proj - Users', 'title': 'u2'}, + ], + } + } + PAMProjectKCMImportCommand._rewrite_folder_paths( + pam_json, 'Proj #2 - Resources', 'Proj #2 - Users', 'Proj') + + self.assertEqual(pam_json['pam_data']['resources'][0]['folder_path'], + 'Proj #2 - Resources/GroupA/Sub1') + self.assertEqual(pam_json['pam_data']['resources'][1]['folder_path'], + 'Proj #2 - Resources') + self.assertEqual(pam_json['pam_data']['users'][0]['folder_path'], + 'Proj #2 - Users/GroupA/Sub1') + self.assertEqual(pam_json['pam_data']['users'][1]['folder_path'], + 'Proj #2 - Users') + + def test_noop_when_names_match(self): + """No rewriting when actual names match computed names.""" + pam_json = { + 'pam_data': { + 'resources': [ + {'folder_path': 'Proj - Resources/A', 'title': 'r1'}, + ], + 'users': [], + } + } + PAMProjectKCMImportCommand._rewrite_folder_paths( + pam_json, 'Proj - Resources', 'Proj - Users', 'Proj') + + self.assertEqual(pam_json['pam_data']['resources'][0]['folder_path'], + 'Proj - Resources/A') + + def test_sftp_subpaths_rewritten(self): + """SFTP sub-paths under resources should also be rewritten.""" + pam_json = { + 'pam_data': { + 'resources': [ + {'folder_path': 'P - Resources/G/SFTP Resources', 'title': 'sftp1'}, + ], + 'users': [ + {'folder_path': 'P - Users/G/SFTP Users', 'title': 'sftp_u1'}, + ], + } + } + PAMProjectKCMImportCommand._rewrite_folder_paths( + pam_json, 'P #3 - Resources', 'P #3 - Users', 'P') + + self.assertEqual(pam_json['pam_data']['resources'][0]['folder_path'], + 'P #3 - Resources/G/SFTP Resources') + self.assertEqual(pam_json['pam_data']['users'][0]['folder_path'], + 'P #3 - Users/G/SFTP Users') + + def test_empty_pam_data(self): + """Should handle empty resources/users gracefully.""" + pam_json = {'pam_data': {'resources': [], 'users': []}} + PAMProjectKCMImportCommand._rewrite_folder_paths( + pam_json, 'X - Resources', 'X - Users', 'X') + # No exception = pass + + +class TestDiscoverSharedFolderNames(unittest.TestCase): + """_discover_shared_folder_names should find Resources/Users from KSM app.""" + + def test_discovers_by_convention(self): + """Should match folder names ending with '- Resources' and '- Users'.""" + mock_controller = MagicMock() + mock_controller.controllerUid = b'\x01\x02' + + mock_gw = MagicMock() + mock_gw.controllerUid = b'\x01\x02' + mock_gw.applicationUid = b'\x03\x04' + + mock_extend_instance = MagicMock() + mock_extend_instance.get_app_shared_folders.return_value = [ + {'name': 'MyProject - Resources', 'uid': 'sf-1'}, + {'name': 'MyProject - Users', 'uid': 'sf-2'}, + ] + mock_extend_module = MagicMock() + mock_extend_module.PAMProjectExtendCommand.return_value = mock_extend_instance + + mock_rec = MagicMock() + # Must be valid base64url for CommonHelperMethods.url_safe_str_to_bytes + mock_rec.record_uid = 'AQIDBA' + + params = MagicMock() + + # Mock extend module via sys.modules to avoid importing pydantic + # (not available in all CI environments). Use direct patch for others. + with patch.dict('sys.modules', + {'keepercommander.commands.pam_import.extend': mock_extend_module}): + with patch('keepercommander.api.sync_down'): + with patch('keepercommander.commands.pam_import.kcm_import.vault.KeeperRecord.load', + return_value=mock_rec): + with patch('keepercommander.commands.pam.config_helper.configuration_controller_get', + return_value=mock_controller): + with patch('keepercommander.commands.pam.gateway_helper.get_all_gateways', + return_value=[mock_gw]): + res, usr = PAMProjectKCMImportCommand._discover_shared_folder_names( + params, 'AQIDBA') + + self.assertEqual(res, 'MyProject - Resources') + self.assertEqual(usr, 'MyProject - Users') + + def test_returns_none_for_missing_config(self): + """Should return (None, None) if config UID not found.""" + params = MagicMock() + + with patch('keepercommander.api.sync_down'): + with patch('keepercommander.commands.pam_import.kcm_import.vault.KeeperRecord.load', + return_value=None): + res, usr = PAMProjectKCMImportCommand._discover_shared_folder_names( + params, 'nonexistent') + + self.assertIsNone(res) + self.assertIsNone(usr) + + +class TestKCMCleanupCommand(unittest.TestCase): + """Tests for PAMProjectKCMCleanupCommand.""" + + def test_missing_args_raises(self): + """Should require --name or --config.""" + cmd = PAMProjectKCMCleanupCommand() + params = MagicMock() + with self.assertRaises(CommandError) as ctx: + cmd.execute(params) + self.assertIn('--name or --config', str(ctx.exception)) + + @patch('keepercommander.api.sync_down') + def test_config_not_found_raises(self, mock_sync): + """Should raise if config UID doesn't exist in vault.""" + cmd = PAMProjectKCMCleanupCommand() + params = MagicMock() + with patch('keepercommander.commands.pam_import.kcm_import.vault.KeeperRecord.load', + return_value=None): + with self.assertRaises(CommandError) as ctx: + cmd.execute(params, config_uid='nonexistent') + self.assertIn('not found', str(ctx.exception)) + + @patch('keepercommander.api.sync_down') + def test_project_name_not_found_raises(self, mock_sync): + """Should raise if project name doesn't match any config.""" + cmd = PAMProjectKCMCleanupCommand() + params = MagicMock() + params.shared_folder_cache = {} + params.folder_cache = {} + params.subfolder_record_cache = {} + + # vault_extensions is imported locally inside execute() + mock_ve = MagicMock() + mock_ve.find_records.return_value = [] + with patch.dict('sys.modules', + {'keepercommander.vault_extensions': mock_ve}): + with self.assertRaises(CommandError) as ctx: + cmd.execute(params, project_name='DoesNotExist') + self.assertIn('not found', str(ctx.exception)) + + @patch('keepercommander.api.sync_down') + @patch('keepercommander.api.communicate') + @patch('keepercommander.api.delete_record') + def test_dry_run_no_deletions(self, mock_del, mock_comm, mock_sync): + """Dry run should not delete anything.""" + cmd = PAMProjectKCMCleanupCommand() + params = MagicMock() + params.shared_folder_cache = {} + params.folder_cache = {} + params.subfolder_record_cache = {} + + mock_config = MagicMock() + mock_config.title = 'TestProject Configuration' + mock_config.record_uid = 'cfg_uid_123' + + mock_config_helper = MagicMock() + mock_config_helper.configuration_controller_get.side_effect = Exception('skip') + mock_gw_helper = MagicMock() + + with patch('keepercommander.commands.pam_import.kcm_import.vault.KeeperRecord.load', + return_value=mock_config): + with patch.dict('sys.modules', { + 'keepercommander.commands.pam.config_helper': mock_config_helper, + 'keepercommander.commands.pam.gateway_helper': mock_gw_helper, + }): + cmd.execute(params, config_uid='cfg_uid_123', dry_run=True) + + mock_comm.assert_not_called() + mock_del.assert_not_called() + + +class TestDetectDbType(unittest.TestCase): + """Tests for _detect_db_type_from_docker.""" + + @patch('subprocess.run') + def test_postgresql_detected(self, mock_run): + mock_run.return_value = MagicMock( + returncode=0, + stdout='POSTGRES_USER=guac\nPOSTGRES_PASSWORD=secret\n') + result = PAMProjectKCMImportCommand._detect_db_type_from_docker('db-1') + self.assertEqual(result, 'postgresql') + + @patch('subprocess.run') + def test_mysql_detected(self, mock_run): + mock_run.return_value = MagicMock( + returncode=0, + stdout='MYSQL_USER=guac\nMYSQL_PASSWORD=secret\n') + result = PAMProjectKCMImportCommand._detect_db_type_from_docker('db-1') + self.assertEqual(result, 'mysql') + + @patch('subprocess.run') + def test_both_prefers_postgresql(self, mock_run): + mock_run.return_value = MagicMock( + returncode=0, + stdout='POSTGRES_PASSWORD=x\nMYSQL_PASSWORD=y\n') + result = PAMProjectKCMImportCommand._detect_db_type_from_docker('db-1') + self.assertEqual(result, 'postgresql') + + @patch('subprocess.run') + def test_fallback_mysql(self, mock_run): + mock_run.return_value = MagicMock( + returncode=0, stdout='PATH=/usr/bin\nHOME=/root\n') + result = PAMProjectKCMImportCommand._detect_db_type_from_docker('db-1') + self.assertEqual(result, 'mysql') + + @patch('subprocess.run', side_effect=FileNotFoundError) + def test_docker_not_found(self, mock_run): + result = PAMProjectKCMImportCommand._detect_db_type_from_docker('db-1') + self.assertEqual(result, 'mysql') + + +class TestDiscoverDockerContainer(unittest.TestCase): + """Tests for _discover_docker_container.""" + + @patch('subprocess.run') + def test_single_candidate(self, mock_run): + def side_effect(cmd, **kw): + if 'ps' in cmd: + return MagicMock(returncode=0, stdout='web-1\ndb-1\nredis-1\n') + # docker inspect for each container + name = cmd[-1] + if name == 'db-1': + return MagicMock(returncode=0, + stdout='POSTGRES_PASSWORD=secret\n') + return MagicMock(returncode=0, stdout='PATH=/usr/bin\n') + mock_run.side_effect = side_effect + result = PAMProjectKCMImportCommand._discover_docker_container() + self.assertEqual(result, 'db-1') + + @patch('subprocess.run') + def test_kcm_db_preferred(self, mock_run): + """Prefer container with 'kcm' + 'db' in name over others.""" + def side_effect(cmd, **kw): + if 'ps' in cmd: + return MagicMock(returncode=0, + stdout='app-db-1\nkcm-setup-db-1\nother-db-1\n') + return MagicMock(returncode=0, + stdout='POSTGRES_PASSWORD=secret\n') + mock_run.side_effect = side_effect + result = PAMProjectKCMImportCommand._discover_docker_container() + self.assertEqual(result, 'kcm-setup-db-1') + + @patch('subprocess.run') + def test_guacamole_db_preferred(self, mock_run): + """Prefer container with 'guacamole' + 'db' over just 'guacamole'.""" + def side_effect(cmd, **kw): + if 'ps' in cmd: + return MagicMock(returncode=0, + stdout='app-db-1\nguacamole-db-1\nguacamole-app-1\n') + return MagicMock(returncode=0, + stdout='POSTGRES_PASSWORD=secret\n') + mock_run.side_effect = side_effect + result = PAMProjectKCMImportCommand._discover_docker_container() + self.assertEqual(result, 'guacamole-db-1') + + @patch('subprocess.run') + def test_no_candidates_raises(self, mock_run): + def side_effect(cmd, **kw): + if 'ps' in cmd: + return MagicMock(returncode=0, stdout='web-1\nredis-1\n') + return MagicMock(returncode=0, stdout='PATH=/usr/bin\n') + mock_run.side_effect = side_effect + with self.assertRaises(CommandError): + PAMProjectKCMImportCommand._discover_docker_container() + + +class TestBuildRedactedCommand(unittest.TestCase): + """Tests for _build_redacted_command.""" + + def test_redacts_password_record(self): + kwargs = { + 'docker_detect': True, + 'db_type': 'postgresql', + 'db_host': '192.168.64.5', + 'db_password_record': 'secret-uid-123', + 'auto_confirm': True, + } + result = PAMProjectKCMImportCommand._build_redacted_command(kwargs) + self.assertIn('--docker-detect', result) + self.assertIn('--db-type "postgresql"', result) + self.assertIn('--db-host "192.168.64.5"', result) + self.assertIn('[REDACTED]', result) + self.assertNotIn('secret-uid-123', result) + self.assertIn('--yes', result) + + def test_empty_kwargs(self): + result = PAMProjectKCMImportCommand._build_redacted_command({}) + self.assertEqual(result, 'pam project kcm-import') + + +class TestBuildImportReport(unittest.TestCase): + """Tests for _build_import_report.""" + + def test_report_contains_sections(self): + report = PAMProjectKCMImportCommand._build_import_report( + project_name='Test Project', + config_uid='cfg-123', + is_new_project=True, + assets={'gateway_name': 'Test GW', 'gateway_uid': 'gw-1', + 'app_uid': 'app-1', + 'res_sf_name': 'Test - Resources', 'res_sf_uid': 'sf-1', + 'usr_sf_name': 'Test - Users', 'usr_sf_uid': 'sf-2'}, + num_resources=100, + num_users=50, + created=145, + expected=150, + total_time=3600.0, + throttler_summary={ + 'throttle_count': 2, 'total_batches': 50, + 'final_res_batch': 1, 'final_res_delay': 12.0, + 'final_usr_batch': 4, 'final_usr_delay': 19.0, + 'base_rtt': 1.5}, + warnings=['5 records missing password'], + kwargs={'docker_detect': True, 'db_type': 'postgresql'}, + ) + self.assertIn('Test Project', report) + self.assertIn('PROJECT ASSETS', report) + self.assertIn('Test GW', report) + self.assertIn('IMPORT RESULTS', report) + self.assertIn('145', report) + self.assertIn('THROTTLE STATISTICS', report) + self.assertIn('WARNINGS', report) + self.assertIn('GATEWAY DEPLOYMENT', report) + self.assertIn('WHAT TO DO NEXT', report) + self.assertIn('COMMAND USED (redacted)', report) + + def test_no_gateway_deploy_for_extend(self): + report = PAMProjectKCMImportCommand._build_import_report( + project_name='Existing', + config_uid='cfg-456', + is_new_project=False, + assets={}, + num_resources=10, + num_users=5, + created=15, + expected=15, + total_time=60.0, + throttler_summary={'throttle_count': 0, 'total_batches': 5, + 'final_res_batch': 2, 'final_res_delay': 12.0, + 'final_usr_batch': 4, 'final_usr_delay': 19.0}, + warnings=[], + kwargs={}, + ) + self.assertNotIn('GATEWAY DEPLOYMENT', report) + self.assertIn('EXISTING', report) + + def test_report_includes_gateway_token(self): + """Gateway token should appear in report when captured.""" + report = PAMProjectKCMImportCommand._build_import_report( + project_name='Token Project', + config_uid='cfg-789', + is_new_project=True, + assets={'gateway_name': 'GW', 'gateway_uid': 'gw-1', + 'gateway_token': 'MY_SECRET_TOKEN_123'}, + num_resources=5, num_users=2, created=7, expected=7, + total_time=30.0, throttler_summary=None, warnings=[], + kwargs={}, + ) + self.assertIn('GATEWAY DEPLOYMENT', report) + self.assertIn('Access Token: MY_SECRET_TOKEN_123', report) + self.assertIn('GATEWAY_CONFIG="MY_SECRET_TOKEN_123"', report) + + def test_report_missing_token_placeholder(self): + """When no token captured, report shows placeholder.""" + report = PAMProjectKCMImportCommand._build_import_report( + project_name='No Token', + config_uid='cfg-000', + is_new_project=True, + assets={}, + num_resources=5, num_users=2, created=7, expected=7, + total_time=30.0, throttler_summary=None, warnings=[], + kwargs={}, + ) + self.assertIn('Token not captured', report) + self.assertIn('', report) + + def test_report_per_record_breakdown(self): + """Per-record tracking should render in the report.""" + results = [ + {'name': 'Server1', 'type': 'pamMachine', 'phase': 'resource', + 'status': 'ok', 'reason': ''}, + {'name': 'Server2', 'type': 'pamMachine', 'phase': 'resource', + 'status': 'ok', 'reason': ''}, + {'name': 'DB1', 'type': 'pamDatabase', 'phase': 'resource', + 'status': 'skipped', 'reason': 'missing field X'}, + {'name': 'admin', 'type': 'pamUser', 'phase': 'user', + 'status': 'ok', 'reason': ''}, + {'name': 'broke_user', 'type': 'login', 'phase': 'user', + 'status': 'error', 'reason': 'API timeout'}, + ] + report = PAMProjectKCMImportCommand._build_import_report( + project_name='Detail Test', + config_uid='cfg-det', + is_new_project=False, + assets={}, + num_resources=3, num_users=2, created=3, expected=5, + total_time=120.0, throttler_summary=None, warnings=[], + kwargs={}, import_results=results, + ) + self.assertIn('FAILED / SKIPPED RECORDS', report) + self.assertIn('RECORD BREAKDOWN', report) + self.assertIn('SKIP', report) + self.assertIn('ERR', report) + self.assertIn('pamMachine', report) + self.assertIn('pamDatabase', report) + self.assertIn('DB1', report) + self.assertIn('broke_user', report) + # Check the breakdown table has TOTAL row + self.assertIn('TOTAL', report) + + def test_report_no_failures_no_detail_section(self): + """When all records succeed, no FAILED section shown.""" + results = [ + {'name': 'Server1', 'type': 'pamMachine', 'phase': 'resource', + 'status': 'ok', 'reason': ''}, + ] + report = PAMProjectKCMImportCommand._build_import_report( + project_name='All OK', + config_uid='cfg-ok', + is_new_project=False, + assets={}, + num_resources=1, num_users=0, created=1, expected=1, + total_time=10.0, throttler_summary=None, warnings=[], + kwargs={}, import_results=results, + ) + self.assertNotIn('FAILED / SKIPPED RECORDS', report) + self.assertIn('RECORD BREAKDOWN', report) + + +class TestGatewayTokenParsing(unittest.TestCase): + """Test gateway token extraction from captured stdout.""" + + def test_parses_token_from_json_output(self): + """Should extract access_token from edit.py's JSON output.""" + import re + captured = json.dumps({ + 'access_token': 'CAPTURED_TOKEN_XYZ', + 'device_uid': 'dev-1', + }, indent=2) + # Replicate the parsing logic from _create_project_skeleton + gateway_token = '' + for line in captured.splitlines(): + stripped = line.strip() + if stripped.startswith('{'): + try: + parsed = json.loads(stripped) + if 'access_token' in parsed: + gateway_token = parsed['access_token'] + break + except json.JSONDecodeError: + continue + if not gateway_token: + match = re.search( + r'\{[^{}]*"access_token"\s*:\s*"([^"]*)"[^{}]*\}', + captured, re.DOTALL) + if match: + gateway_token = match.group(1) + self.assertEqual(gateway_token, 'CAPTURED_TOKEN_XYZ') + + def test_returns_empty_for_no_token(self): + """Should return empty string when no access_token in output.""" + import re + captured = 'some random output\nno json here' + gateway_token = '' + for line in captured.splitlines(): + stripped = line.strip() + if stripped.startswith('{'): + try: + parsed = json.loads(stripped) + if 'access_token' in parsed: + gateway_token = parsed['access_token'] + break + except json.JSONDecodeError: + continue + if not gateway_token: + match = re.search( + r'\{[^{}]*"access_token"\s*:\s*"([^"]*)"[^{}]*\}', + captured, re.DOTALL) + if match: + gateway_token = match.group(1) + self.assertEqual(gateway_token, '') + + def test_parses_multiline_json(self): + """Should handle multi-line pretty-printed JSON.""" + import re + captured = 'Starting import...\n' + json.dumps({ + 'access_token': 'MULTI_LINE_TOKEN', + 'device_uid': 'dev-2', + 'shared_folder_resources_uid': 'sf-1', + }, indent=2) + '\nDone.' + gateway_token = '' + for line in captured.splitlines(): + stripped = line.strip() + if stripped.startswith('{'): + try: + parsed = json.loads(stripped) + if 'access_token' in parsed: + gateway_token = parsed['access_token'] + break + except json.JSONDecodeError: + continue + if not gateway_token: + match = re.search( + r'\{[^{}]*"access_token"\s*:\s*"([^"]*)"[^{}]*\}', + captured, re.DOTALL) + if match: + gateway_token = match.group(1) + self.assertEqual(gateway_token, 'MULTI_LINE_TOKEN') + + +class TestFilterByGroups(unittest.TestCase): + """Tests for _filter_by_groups.""" + + def _make_groups(self): + return [ + {'connection_group_id': 1, 'connection_group_name': 'Production', + 'parent_id': None, 'ksm_config': None}, + {'connection_group_id': 2, 'connection_group_name': 'Staging', + 'parent_id': None, 'ksm_config': None}, + {'connection_group_id': 3, 'connection_group_name': 'SSH Connections', + 'parent_id': 1, 'ksm_config': None}, + {'connection_group_id': 4, 'connection_group_name': 'Incomplete Stuff', + 'parent_id': None, 'ksm_config': None}, + {'connection_group_id': 5, 'connection_group_name': 'Test Lab', + 'parent_id': 2, 'ksm_config': None}, + ] + + def _make_items(self): + resources = [ + {'title': 'Prod SSH 1', '_group_id': 3}, + {'title': 'Prod SSH 2', '_group_id': 3}, + {'title': 'Staging DB', '_group_id': 2}, + {'title': 'Root Item', '_group_id': None}, + {'title': 'Incomplete Box', '_group_id': 4}, + {'title': 'Test VM', '_group_id': 5}, + ] + users = [ + {'title': 'admin', '_group_id': 1}, + {'title': 'tester', '_group_id': 5}, + ] + return resources, users + + def test_include_filter_wildcard(self): + groups = self._make_groups() + resolver = KCMGroupResolver(groups, mode='exact') + resources, users = self._make_items() + filtered_res, filtered_usr = PAMProjectKCMImportCommand._filter_by_groups( + resources, users, groups, resolver, + include_pattern='Production*', exclude_pattern='') + titles = [r['title'] for r in filtered_res] + self.assertIn('Prod SSH 1', titles) + self.assertIn('Prod SSH 2', titles) + self.assertNotIn('Staging DB', titles) + self.assertNotIn('Root Item', titles) + # admin is in group 1 (Production) — should match via path segment + usr_titles = [u['title'] for u in filtered_usr] + self.assertIn('admin', usr_titles) + + def test_exclude_filter_wildcard(self): + groups = self._make_groups() + resolver = KCMGroupResolver(groups, mode='exact') + resources, users = self._make_items() + filtered_res, filtered_usr = PAMProjectKCMImportCommand._filter_by_groups( + resources, users, groups, resolver, + include_pattern='', exclude_pattern='Incomplete*,Test*') + titles = [r['title'] for r in filtered_res] + self.assertIn('Prod SSH 1', titles) + self.assertIn('Staging DB', titles) + self.assertNotIn('Incomplete Box', titles) + self.assertNotIn('Test VM', titles) + # Root items kept when only --exclude-groups is active (no --groups) + self.assertIn('Root Item', titles) + + def test_include_excludes_root_items(self): + groups = self._make_groups() + resolver = KCMGroupResolver(groups, mode='exact') + resources, users = self._make_items() + filtered_res, _ = PAMProjectKCMImportCommand._filter_by_groups( + resources, users, groups, resolver, + include_pattern='Staging*', exclude_pattern='') + titles = [r['title'] for r in filtered_res] + self.assertNotIn('Root Item', titles) + self.assertIn('Staging DB', titles) + self.assertIn('Test VM', titles) # child of Staging + + def test_combined_include_and_exclude(self): + groups = self._make_groups() + resolver = KCMGroupResolver(groups, mode='exact') + resources, users = self._make_items() + filtered_res, _ = PAMProjectKCMImportCommand._filter_by_groups( + resources, users, groups, resolver, + include_pattern='Staging*', + exclude_pattern='Test*') + titles = [r['title'] for r in filtered_res] + self.assertIn('Staging DB', titles) + self.assertNotIn('Test VM', titles) # excluded by Test* + + def test_no_filters_returns_all(self): + groups = self._make_groups() + resolver = KCMGroupResolver(groups, mode='exact') + resources, users = self._make_items() + filtered_res, filtered_usr = PAMProjectKCMImportCommand._filter_by_groups( + resources, users, groups, resolver, + include_pattern='', exclude_pattern='') + self.assertEqual(len(filtered_res), len(resources)) + self.assertEqual(len(filtered_usr), len(users)) + + def test_no_match_returns_empty(self): + groups = self._make_groups() + resolver = KCMGroupResolver(groups, mode='exact') + resources, users = self._make_items() + filtered_res, filtered_usr = PAMProjectKCMImportCommand._filter_by_groups( + resources, users, groups, resolver, + include_pattern='NonExistent*', exclude_pattern='') + self.assertEqual(len(filtered_res), 0) + self.assertEqual(len(filtered_usr), 0) + + +class TestGetContainerIp(unittest.TestCase): + """Tests for _get_container_ip.""" + + @patch('subprocess.run') + def test_returns_first_ip(self, mock_run): + mock_run.return_value = MagicMock( + returncode=0, stdout='192.168.64.5 172.17.0.3 ') + ip = PAMProjectKCMImportCommand._get_container_ip('mydb') + self.assertEqual(ip, '192.168.64.5') + + @patch('subprocess.run') + def test_returns_empty_on_failure(self, mock_run): + mock_run.return_value = MagicMock(returncode=1, stdout='') + ip = PAMProjectKCMImportCommand._get_container_ip('missing') + self.assertEqual(ip, '') + + @patch('subprocess.run') + def test_handles_timeout(self, mock_run): + import subprocess + mock_run.side_effect = subprocess.TimeoutExpired('docker', 10) + ip = PAMProjectKCMImportCommand._get_container_ip('slow') + self.assertEqual(ip, '') + + +if __name__ == '__main__': + unittest.main() diff --git a/unit-tests/pam/test_pam_import_dedup.py b/unit-tests/pam/test_pam_import_dedup.py new file mode 100644 index 000000000..a11c64b84 --- /dev/null +++ b/unit-tests/pam/test_pam_import_dedup.py @@ -0,0 +1,88 @@ +"""Test that pam project import rejects duplicate UIDs.""" +import logging +import sys +import unittest + +if sys.version_info >= (3, 8): + from keepercommander.commands.pam_import.edit import PAMProjectImportCommand + + def _minimal_project(resources, users=None): + """Build a minimal project dict matching the structure process_data expects.""" + return { + "data": { + "pam_data": { + "resources": resources, + "users": users or [], + "rotation_profiles": {}, + } + }, + "pam_config": {"pam_config_uid": "test-config-uid"}, + "folders": { + "resources_folder_uid": "sfr-test", + "users_folder_uid": "sfu-test", + }, + } + + class TestPAMImportDuplicateUid(unittest.TestCase): + """process_data must abort when the import JSON contains duplicate uid values.""" + + def test_duplicate_uid_logs_error_and_returns(self): + """process_data aborts with logging.error when two resources share a uid.""" + from unittest.mock import MagicMock + project = _minimal_project([ + {'type': 'pamMachine', 'title': 'Machine A', 'uid': 'duplicate-uid-1'}, + {'type': 'pamMachine', 'title': 'Machine B', 'uid': 'duplicate-uid-1'}, + ]) + cmd = PAMProjectImportCommand() + params = MagicMock() + params.record_cache = {} + params.shared_folder_cache = {} + params.folder_cache = {} + + # assertLogs with no logger name captures from root logger (where logging.error writes) + with self.assertLogs(level='ERROR') as log_ctx: + try: + cmd.process_data(params, project) + except Exception: + pass # early return path may surface as exception in some code paths + + self.assertTrue( + any('duplicate uid' in msg.lower() or 'duplicate-uid-1' in msg + for msg in log_ctx.output), + f'Expected duplicate UID error in logs, got: {log_ctx.output}' + ) + + def test_unique_uids_pass_dedup_check(self): + """process_data does NOT emit a duplicate-uid error when all UIDs are unique.""" + from unittest.mock import MagicMock + import io + + project = _minimal_project([ + {'type': 'pamMachine', 'title': 'Machine A', 'uid': 'uid-alpha'}, + {'type': 'pamMachine', 'title': 'Machine B', 'uid': 'uid-beta'}, + ]) + cmd = PAMProjectImportCommand() + params = MagicMock() + params.record_cache = {} + params.shared_folder_cache = {} + params.folder_cache = {} + + stream = io.StringIO() + handler = logging.StreamHandler(stream) + handler.setLevel(logging.ERROR) + root_logger = logging.getLogger() + root_logger.addHandler(handler) + try: + try: + cmd.process_data(params, project) + except Exception: + pass + output = stream.getvalue() + self.assertNotIn('duplicate uid', output.lower(), + f'Unexpected duplicate UID error for unique UIDs: {output}') + finally: + root_logger.removeHandler(handler) + + +if __name__ == '__main__': + unittest.main() diff --git a/unit-tests/pam/test_pam_project_export.py b/unit-tests/pam/test_pam_project_export.py new file mode 100644 index 000000000..d10ecac60 --- /dev/null +++ b/unit-tests/pam/test_pam_project_export.py @@ -0,0 +1,397 @@ +# _ __ +# | |/ /___ ___ _ __ ___ _ _ ® +# | ' = (3, 8): + + from unittest.mock import MagicMock + + class TestPAMProjectExportCommand(unittest.TestCase): + + def setUp(self): + from keepercommander.commands.pam_import.export import PAMProjectExportCommand + self.cmd = PAMProjectExportCommand() + self.params = MagicMock() + self.params.record_cache = {uid: {} for uid in _RECORDS} + + def _execute(self, project_uid=CONFIG_UID, output=None): + """Run execute() with vault.KeeperRecord.load mocked.""" + with patch("keepercommander.vault.KeeperRecord.load", side_effect=_fake_load): + with patch.object(self.cmd, "_get_allowed_settings", + return_value=dict(_DEFAULT_ALLOWED)): + kwargs = {"project_uid": project_uid} + if output: + kwargs["output"] = output + return self.cmd.execute(self.params, **kwargs) + + # ── basic output ────────────────────────────────────────────── + + def test_returns_string(self): + result = self._execute() + self.assertIsInstance(result, str, + "execute() should return a JSON string when --output is not set") + + def test_valid_json(self): + parsed = json.loads(self._execute()) + self.assertIsInstance(parsed, dict) + + # ── required top-level keys ─────────────────────────────────── + + def test_has_project_key(self): + parsed = json.loads(self._execute()) + self.assertIn("project", parsed) + self.assertEqual(parsed["project"], "Test Project") + + def test_has_pam_configuration_key(self): + parsed = json.loads(self._execute()) + self.assertIn("pam_configuration", parsed) + + def test_has_pam_data_key(self): + parsed = json.loads(self._execute()) + self.assertIn("pam_data", parsed) + self.assertIn("resources", parsed["pam_data"]) + self.assertIn("users", parsed["pam_data"]) + + def test_has_tool_version(self): + parsed = json.loads(self._execute()) + self.assertIn("tool_version", parsed) + self.assertEqual(parsed["tool_version"], "commander-export-1.0") + + # ── pam_configuration fields ────────────────────────────────── + + def test_pam_configuration_environment(self): + parsed = json.loads(self._execute()) + self.assertEqual(parsed["pam_configuration"]["environment"], "local") + + def test_pam_configuration_on_off_values(self): + parsed = json.loads(self._execute()) + cfg = parsed["pam_configuration"] + for key in ("connections", "rotation", "tunneling", "remote_browser_isolation"): + self.assertIn(cfg[key], ("on", "off"), f"{key} must be 'on' or 'off'") + + # ── resources ──────────────────────────────────────────────── + + def test_resources_count(self): + parsed = json.loads(self._execute()) + self.assertEqual(len(parsed["pam_data"]["resources"]), 2) + + def test_resource_has_required_keys(self): + parsed = json.loads(self._execute()) + for res in parsed["pam_data"]["resources"]: + for key in ("uid", "type", "title", "users"): + self.assertIn(key, res, f"resource missing key: {key}") + + def test_resource_uids_are_unique(self): + parsed = json.loads(self._execute()) + uids = [r["uid"] for r in parsed["pam_data"]["resources"]] + self.assertEqual(len(uids), len(set(uids)), "resource UIDs must be unique") + + def test_resource_types(self): + parsed = json.loads(self._execute()) + types = {r["type"] for r in parsed["pam_data"]["resources"]} + self.assertIn("pamMachine", types) + self.assertIn("pamDatabase", types) + + # ── users ──────────────────────────────────────────────────── + + def test_top_level_users_deduplication(self): + # USER1 appears in both machine and database resources; + # must only appear once in pam_data.users + parsed = json.loads(self._execute()) + top_uids = [u["uid"] for u in parsed["pam_data"]["users"]] + self.assertEqual(len(top_uids), len(set(top_uids)), + "top-level user UIDs must be unique (de-duplicated)") + + def test_top_level_users_count(self): + # USER1 shared across both resources, USER2 only in DB → 2 unique users + parsed = json.loads(self._execute()) + self.assertEqual(len(parsed["pam_data"]["users"]), 2) + + def test_user_has_required_keys(self): + parsed = json.loads(self._execute()) + for usr in parsed["pam_data"]["users"]: + for key in ("uid", "type", "title", "login"): + self.assertIn(key, usr, f"user missing key: {key}") + + # ── --output flag ──────────────────────────────────────────── + + def test_output_flag_writes_file(self): + with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as tmp: + tmp_path = tmp.name + try: + result = self._execute(output=tmp_path) + # When --output is set, execute() should return None + self.assertIsNone(result) + self.assertTrue(os.path.exists(tmp_path)) + with open(tmp_path, encoding="utf-8") as fh: + content = fh.read() + parsed = json.loads(content) + self.assertIn("project", parsed) + self.assertIn("tool_version", parsed) + finally: + if os.path.exists(tmp_path): + os.unlink(tmp_path) + + # ── error handling ─────────────────────────────────────────── + + def test_missing_project_uid_returns_none(self): + with patch("keepercommander.vault.KeeperRecord.load", side_effect=_fake_load): + result = self.cmd.execute(self.params, project_uid="", output=None) + self.assertIsNone(result) + + def test_unknown_uid_returns_none(self): + with patch("keepercommander.vault.KeeperRecord.load", return_value=None): + result = self.cmd.execute(self.params, project_uid="unknown-uid", output=None) + self.assertIsNone(result) + + def test_non_v6_record_returns_none(self): + v3_rec = vault.TypedRecord(version=3) + v3_rec.type_name = "pamMachine" + v3_rec.title = "some" + v3_rec.record_uid = "some-uid" + with patch("keepercommander.vault.KeeperRecord.load", return_value=v3_rec): + result = self.cmd.execute(self.params, project_uid="some-uid", output=None) + self.assertIsNone(result) + + # ── round-trip / determinism ───────────────────────────────── + + def test_sort_keys_determinism(self): + result1 = self._execute() + result2 = self._execute() + self.assertEqual(result1, result2, "Output must be deterministic across calls") + + def test_output_is_sorted(self): + result = self._execute() + parsed = json.loads(result) + keys = list(parsed.keys()) + self.assertEqual(keys, sorted(keys), + "Top-level keys should be sorted (sort_keys=True)") + + + # ──────────────────────────────────────────────────────────────────── + # KCM-import compatibility (PR #1942) + # ──────────────────────────────────────────────────────────────────── + + class TestKCMImportRoundTrip(unittest.TestCase): + """KCM-imported records (PR #1942) reference users by *title* in + ``pam_settings.connection.launch_credentials`` rather than by UID + in ``userRecords[]``. Export must resolve these title references + so the exported JSON re-imports with the user link intact. + """ + + KCM_CFG = "kcm-cfg-1" + KCM_RES = "kcm-res-prod-db" + KCM_USR = "kcm-usr-prod-db" + + def _make_kcm_records(self): + """Build the KCM-shaped vault state (PR #1942 import output).""" + cfg = vault.TypedRecord(version=6) + cfg.type_name = "pamNetworkConfiguration" + cfg.title = "KCM Migration" + cfg.record_uid = self.KCM_CFG + cfg.fields.append(_make_typed_field("pamResources", [{ + "controllerUid": "gw-uid", + "folderUid": "sf-uid", + "resourceRef": [self.KCM_RES], + }])) + + res = vault.TypedRecord(version=3) + res.type_name = "pamMachine" + res.title = "KCM Resource - prod-db" + res.record_uid = self.KCM_RES + res.fields.append(_make_typed_field("pamSettings", [{ + "connection": { + "protocol": "ssh", + "port": "22", + "launch_credentials": "KCM User - prod-db", + }, + "options": {"connections": "on", "rotation": "off"}, + }])) + + usr = vault.TypedRecord(version=3) + usr.type_name = "pamUser" + usr.title = "KCM User - prod-db" + usr.record_uid = self.KCM_USR + usr.fields.append(_make_typed_field("login", ["root"])) + + return {self.KCM_CFG: cfg, self.KCM_RES: res, self.KCM_USR: usr} + + def setUp(self): + from keepercommander.commands.pam_import.export import PAMProjectExportCommand + from unittest.mock import MagicMock + self.cmd = PAMProjectExportCommand() + self.records = self._make_kcm_records() + self.params = MagicMock() + self.params.record_cache = {uid: {} for uid in self.records} + + def _execute(self): + def _load(_p, uid): + return self.records.get(uid) + with patch("keepercommander.vault.KeeperRecord.load", side_effect=_load): + with patch.object(self.cmd, "_get_allowed_settings", + return_value=dict(_DEFAULT_ALLOWED)): + return self.cmd.execute(self.params, project_uid=self.KCM_CFG) + + def test_title_based_user_link_resolved(self): + """KCM resource → export must include the user via title resolution.""" + parsed = json.loads(self._execute()) + resources = parsed["pam_data"]["resources"] + self.assertEqual(len(resources), 1, "expected one KCM resource") + res = resources[0] + self.assertEqual(len(res["users"]), 1, + "KCM resource must export 1 user (resolved by title)") + self.assertEqual(res["users"][0]["uid"], self.KCM_USR) + self.assertEqual(res["users"][0]["title"], "KCM User - prod-db") + + def test_top_level_users_includes_resolved_user(self): + parsed = json.loads(self._execute()) + top_users = parsed["pam_data"]["users"] + self.assertEqual(len(top_users), 1) + self.assertEqual(top_users[0]["uid"], self.KCM_USR) + + def test_pam_settings_preserved_for_round_trip(self): + """Round-trip safety: KCM-specific pam_settings keys preserved verbatim.""" + parsed = json.loads(self._execute()) + res = parsed["pam_data"]["resources"][0] + conn = res["pam_settings"]["connection"] + self.assertEqual(conn["protocol"], "ssh") + self.assertEqual(conn["port"], "22") + self.assertEqual(conn["launch_credentials"], "KCM User - prod-db") + + def test_uid_in_launch_credentials_accepted(self): + """If launch_credentials already holds a 22-char UID (non-KCM path), keep it as-is.""" + uid_22 = "AAAAAAAAAAAAAAAAAAAAAA" # 22 chars, no slash, no space + usr = vault.TypedRecord(version=3) + usr.type_name = "pamUser" + usr.title = "Direct UID User" + usr.record_uid = uid_22 + usr.fields.append(_make_typed_field("login", ["alice"])) + self.records[uid_22] = usr + self.params.record_cache[uid_22] = {} + + res = self.records[self.KCM_RES] + ps = res.get_typed_field("pamSettings").value[0] + ps["connection"]["launch_credentials"] = uid_22 + parsed = json.loads(self._execute()) + users = parsed["pam_data"]["resources"][0]["users"] + self.assertEqual(len(users), 1) + self.assertEqual(users[0]["uid"], uid_22) + + +else: + class TestPAMProjectExportCommand(unittest.TestCase): + def test_skip(self): + self.skipTest("Requires Python 3.8+") + + +if __name__ == "__main__": + unittest.main() diff --git a/unit-tests/pam/test_pam_rotation.py b/unit-tests/pam/test_pam_rotation.py index 01ce68208..87e4cc157 100644 --- a/unit-tests/pam/test_pam_rotation.py +++ b/unit-tests/pam/test_pam_rotation.py @@ -58,7 +58,7 @@ def create_mock_params(): from keepercommander import crypto, utils from keepercommander.commands.discoveryrotation import (PAMCreateRecordRotationCommand, PAMListRecordRotationCommand, - PAMGatewayListCommand) + PAMGatewayListCommand, PAMRouterGetRotationInfo) class TestPAMCreateRecordRotationCommand(unittest.TestCase): @@ -506,4 +506,99 @@ def test_execute_no_gateways(self, mock_get_all_gateways, self.assertTrue(mock_router_get_connected_gateways.called) self.assertTrue(mock_get_all_gateways.called) - self.assertTrue(mock_get_router_url.called) \ No newline at end of file + self.assertTrue(mock_get_router_url.called) + + class TestPAMRouterGetRotationInfo(unittest.TestCase): + + def _make_rri(self, status_name='RRS_ONLINE'): + """Build a minimal RouterRotationInfo mock.""" + from keepercommander.proto import router_pb2 + rri = MagicMock() + rri.status = router_pb2.RouterRotationStatus.Value(status_name) + rri.configurationUid = utils.base64_url_decode('config_uid_____') + rri.nodeId = 42 + rri.controllerName = 'gw-test' + rri.controllerUid = utils.base64_url_decode('gw_uid_________') + rri.resourceUid = b'' + rri.pwdComplexity = '' + rri.disabled = False + rri.scriptName = '' + return rri + + def _make_schedule(self, record_uid_bytes, no_schedule=False, schedule_data='daily.0.12.1'): + s = MagicMock() + s.recordUid = record_uid_bytes + s.noSchedule = no_schedule + s.scheduleData = schedule_data + return s + + @patch('keepercommander.commands.discoveryrotation.router_get_rotation_schedules') + @patch('keepercommander.commands.discoveryrotation.record_rotation_get') + def test_json_online_status(self, mock_rrg, mock_schedules): + """Online status + --format json returns valid JSON with expected keys.""" + from keeper_secrets_manager_core.utils import url_safe_str_to_bytes + record_uid = 'test_record_uid_' + record_uid_bytes = url_safe_str_to_bytes(record_uid) + + mock_rrg.return_value = self._make_rri('RRS_ONLINE') + + sched_mock = MagicMock() + sched_mock.schedules = [self._make_schedule(record_uid_bytes, no_schedule=False, + schedule_data='daily.0.12.1')] + mock_schedules.return_value = sched_mock + + mock_params = create_mock_params() + mock_params.record_cache = {} + + cmd = PAMRouterGetRotationInfo() + result = cmd.execute(mock_params, record_uid=record_uid, format='json') + + self.assertIsNotNone(result, "Expected JSON string, got None") + data = json.loads(result) + self.assertIn('status', data) + self.assertTrue(data['ready_to_rotate']) + self.assertIn('pam_config_uid', data) + self.assertIn('gateway_name', data) + self.assertEqual(data['gateway_name'], 'gw-test') + self.assertIn('schedule_type', data) + self.assertEqual(data['schedule_type'], 'scheduled') + + @patch('keepercommander.commands.discoveryrotation.router_get_rotation_schedules') + @patch('keepercommander.commands.discoveryrotation.record_rotation_get') + def test_json_non_online_status(self, mock_rrg, mock_schedules): + """Non-online status + --format json returns minimal JSON with ready_to_rotate=false.""" + record_uid = 'test_record_uid_' + + mock_rrg.return_value = self._make_rri('RRS_NO_ROTATION') + + mock_params = create_mock_params() + + cmd = PAMRouterGetRotationInfo() + result = cmd.execute(mock_params, record_uid=record_uid, format='json') + + self.assertIsNotNone(result, "Expected JSON string, got None") + data = json.loads(result) + self.assertIn('status', data) + self.assertFalse(data['ready_to_rotate']) + + @patch('keepercommander.commands.discoveryrotation.router_get_rotation_schedules') + @patch('keepercommander.commands.discoveryrotation.record_rotation_get') + def test_table_mode_returns_none(self, mock_rrg, mock_schedules): + """Table mode (default) prints to stdout and returns None.""" + from keeper_secrets_manager_core.utils import url_safe_str_to_bytes + record_uid = 'test_record_uid_' + record_uid_bytes = url_safe_str_to_bytes(record_uid) + + mock_rrg.return_value = self._make_rri('RRS_ONLINE') + + sched_mock = MagicMock() + sched_mock.schedules = [self._make_schedule(record_uid_bytes)] + mock_schedules.return_value = sched_mock + + mock_params = create_mock_params() + mock_params.record_cache = {} + + cmd = PAMRouterGetRotationInfo() + result = cmd.execute(mock_params, record_uid=record_uid, format='table') + self.assertIsNone(result) + diff --git a/unit-tests/test_command_ksm.py b/unit-tests/test_command_ksm.py new file mode 100644 index 000000000..0cf618225 --- /dev/null +++ b/unit-tests/test_command_ksm.py @@ -0,0 +1,54 @@ +"""Unit tests for secrets-manager (KSM) CLI commands.""" +import unittest +from unittest.mock import MagicMock, patch + +from keepercommander.commands.ksm import KSMCommand + + +class TestKSMTokenAdd(unittest.TestCase): + """secrets-manager token add → calls add_client.""" + + def _make_params(self, record_uid='test-app-uid'): + params = MagicMock() + params.record_cache = {} + return params + + @patch('keepercommander.commands.ksm.KSMCommand.add_client') + def test_token_add_calls_add_client(self, mock_add_client): + mock_add_client.return_value = [{'oneTimeToken': 'US:abc123', 'deviceToken': 'dt1'}] + params = self._make_params() + cmd = KSMCommand() + result = cmd.execute(params, command=['token', 'add', 'MyApp'], + count=1, unlockIp=False, firstAccessExpiresIn=None, + accessExpireInMin=None, name=None, config_init=None, + returnTokens=False, format='table') + mock_add_client.assert_called_once() + call_args = mock_add_client.call_args + assert call_args[0][1] == 'MyApp', f"Expected 'MyApp', got {call_args[0][1]}" + + @patch('keepercommander.commands.ksm.KSMCommand.add_client') + def test_token_add_return_tokens(self, mock_add_client): + mock_add_client.return_value = [{'oneTimeToken': 'US:tok1'}, {'oneTimeToken': 'US:tok2'}] + params = self._make_params() + cmd = KSMCommand() + result = cmd.execute(params, command=['token', 'add', 'MyApp'], + count=2, unlockIp=False, firstAccessExpiresIn=None, + accessExpireInMin=None, name=None, config_init=None, + returnTokens=True, format='table') + assert result == 'US:tok1, US:tok2', f"Expected 'US:tok1, US:tok2', got {result!r}" + + def test_token_add_missing_app_prints_help(self): + params = self._make_params() + cmd = KSMCommand() + # Should print help and return None without calling add_client + with patch('keepercommander.commands.ksm.KSMCommand.add_client') as mock_ac: + result = cmd.execute(params, command=['token', 'add'], + count=1, unlockIp=False, firstAccessExpiresIn=None, + accessExpireInMin=None, name=None, config_init=None, + returnTokens=False, format='table') + mock_ac.assert_not_called() + assert result is None, f"Expected None, got {result!r}" + + +if __name__ == '__main__': + unittest.main() diff --git a/unit-tests/test_keeper_drive.py b/unit-tests/test_keeper_drive.py new file mode 100644 index 000000000..ac3f478a6 --- /dev/null +++ b/unit-tests/test_keeper_drive.py @@ -0,0 +1,613 @@ +""" +Unit tests for KeeperDrive commands and key helpers. + +Follows the same patterns as test_command_folder.py and test_command_record.py: + - Command execute() happy paths and error cases + - Key utility/parsing functions +""" + +import os +import time +from unittest import TestCase, mock +from unittest.mock import Mock, MagicMock, patch + +from keepercommander import utils, crypto +from keepercommander.error import CommandError + + +_DATA_KEY = utils.generate_aes_key() +_ACCOUNT_UID = utils.generate_uid() + + +def _make_params(**overrides): + p = Mock() + p.data_key = _DATA_KEY + p.account_uid_bytes = utils.base64_url_decode(_ACCOUNT_UID) + p.rsa_key2 = None + p.ecc_key = None + p.user_cache = {} + p.record_cache = {} + p.meta_data_cache = {} + p.folder_cache = {} + p.subfolder_cache = {} + p.subfolder_record_cache = {} + p.record_owner_cache = {} + p.keeper_drive_folders = {} + p.keeper_drive_folder_keys = {} + p.keeper_drive_folder_accesses = {} + p.keeper_drive_records = {} + p.keeper_drive_record_data = {} + p.keeper_drive_record_keys = {} + p.keeper_drive_record_accesses = {} + p.keeper_drive_folder_records = {} + p.keeper_drive_record_sharing_states = {} + p.keeper_drive_record_links = {} + p.keeper_drive_raw_dag_data = [] + p.sync_data = False + p.enterprise = None + for k, v in overrides.items(): + setattr(p, k, v) + return p + + +def _make_folder(folder_uid=None, name='Test Folder', parent_uid=None): + fuid = folder_uid or utils.generate_uid() + key = utils.generate_aes_key() + return fuid, { + 'folder_uid': fuid, 'name': name, + 'parent_uid': parent_uid, 'folder_key_unencrypted': key, + } + + +def _make_record(record_uid=None, title='Test Record'): + ruid = record_uid or utils.generate_uid() + key = utils.generate_aes_key() + return ruid, { + 'record_uid': ruid, 'revision': 1, 'version': 3, + 'shared': False, 'record_key_unencrypted': key, 'title': title, + } + + +class TestCommandHelpers(TestCase): + + def test_parse_expiration_none(self): + from keepercommander.commands.keeper_drive.helpers import parse_expiration + self.assertIsNone(parse_expiration(None, None, 'test')) + + def test_parse_expiration_never(self): + from keepercommander.commands.keeper_drive.helpers import parse_expiration + self.assertEqual(parse_expiration('never', None, 'test'), -1) + + def test_parse_expiration_iso_date(self): + from keepercommander.commands.keeper_drive.helpers import parse_expiration + result = parse_expiration('2027-01-01T00:00:00Z', None, 'test') + self.assertIsInstance(result, int) + self.assertGreater(result, 0) + + def test_parse_expiration_relative(self): + from keepercommander.commands.keeper_drive.helpers import parse_expiration + for unit in ('30d', '24h', '30mi', '6mo', '1y'): + result = parse_expiration(None, unit, 'test') + self.assertIsInstance(result, int) + self.assertGreater(result, int(time.time() * 1000)) + + def test_parse_expiration_invalid(self): + from keepercommander.commands.keeper_drive.helpers import parse_expiration + with self.assertRaises(CommandError): + parse_expiration('not-a-date', None, 'test') + with self.assertRaises(CommandError): + parse_expiration(None, 'invalid', 'test') + + def test_infer_role(self): + from keepercommander.commands.keeper_drive.helpers import infer_role + self.assertEqual(infer_role({'can_change_ownership': True}), 'full-manager') + self.assertEqual(infer_role({'can_update_access': True, 'can_approve_access': True}), 'content-share-manager') + self.assertEqual(infer_role({'can_update_access': True}), 'shared-manager') + self.assertEqual(infer_role({'can_edit': True}), 'content-manager') + self.assertEqual(infer_role({'can_view': True, 'can_list_access': True}), 'viewer') + self.assertEqual(infer_role({'can_view_title': True}), 'requestor') + self.assertEqual(infer_role({}), 'navigator') + + def test_normalize_parent_uid(self): + from keepercommander.commands.keeper_drive.helpers import normalize_parent_uid, ROOT_FOLDER_UID + self.assertEqual(normalize_parent_uid(ROOT_FOLDER_UID), 'root') + self.assertEqual(normalize_parent_uid('root'), 'root') + self.assertEqual(normalize_parent_uid(None), '') + self.assertEqual(normalize_parent_uid('abc123'), 'abc123') + + def test_format_timestamp(self): + from keepercommander.commands.keeper_drive.helpers import format_timestamp + self.assertIn('2024', format_timestamp(1704067200000)) + self.assertEqual(format_timestamp(0), '') + self.assertEqual(format_timestamp(None), '') + + def test_command_error_handler(self): + from keepercommander.commands.keeper_drive.helpers import command_error_handler + with command_error_handler('kd-test'): + pass + with self.assertRaises(CommandError): + with command_error_handler('kd-test'): + raise CommandError('kd-test', 'specific') + with self.assertRaises(CommandError): + with command_error_handler('kd-test'): + raise RuntimeError('generic') + + def test_check_result(self): + from keepercommander.commands.keeper_drive.helpers import check_result + check_result({'success': True}, 'kd-test') + with self.assertRaises(CommandError): + check_result({'success': False, 'message': 'failed'}, 'kd-test') + + def test_find_folder_location(self): + from keepercommander.commands.keeper_drive.helpers import find_folder_location, ROOT_FOLDER_UID + ruid = utils.generate_uid() + fuid, fobj = _make_folder(name='Docs') + params = _make_params( + keeper_drive_folder_records={fuid: {ruid}}, + keeper_drive_folders={fuid: fobj}, + ) + self.assertEqual(find_folder_location(params, ruid), 'Docs') + params2 = _make_params(keeper_drive_folder_records={ROOT_FOLDER_UID: {ruid}}) + self.assertEqual(find_folder_location(params2, ruid), 'root') + self.assertEqual(find_folder_location(_make_params(), 'missing'), '') + + def test_load_record_metadata_from_cache(self): + from keepercommander.commands.keeper_drive.helpers import load_record_metadata + ruid = utils.generate_uid() + params = _make_params( + keeper_drive_record_data={ruid: { + 'data_json': {'title': 'Cached', 'type': 'login', 'fields': [], 'notes': 'n'} + }}, + keeper_drive_records={ruid: {'revision': 5, 'version': 3}}, + ) + result = load_record_metadata(params, ruid) + self.assertEqual(result['title'], 'Cached') + self.assertEqual(result['revision'], 5) + + +class TestSync(TestCase): + + def test_accumulator_and_has_data(self): + from keepercommander.keeper_drive.sync import create_accumulator, has_data + acc = create_accumulator() + self.assertFalse(has_data(acc)) + acc['folders'].append('x') + self.assertTrue(has_data(acc)) + + def test_clear_caches(self): + from keepercommander.keeper_drive.sync import clear_caches + params = _make_params() + params.keeper_drive_folders['f1'] = {'name': 'x'} + params.keeper_drive_records['r1'] = {'title': 'y'} + clear_caches(params) + self.assertEqual(len(params.keeper_drive_folders), 0) + self.assertEqual(len(params.keeper_drive_records), 0) + + def test_process_empty(self): + from keepercommander.keeper_drive.sync import process, create_accumulator + process(_make_params(), create_accumulator()) + + +class TestKeeperDriveFolderCommands(TestCase): + + def setUp(self): + mock.patch('keepercommander.api.communicate_rest').start() + mock.patch('keepercommander.api.communicate').start() + + def tearDown(self): + mock.patch.stopall() + + @patch('keepercommander.keeper_drive.folder_api.create_folder_v3') + def test_mkdir(self, mock_create): + from keepercommander.commands.keeper_drive import KeeperDriveMkdirCommand + mock_create.return_value = { + 'folder_uid': utils.generate_uid(), 'status': 'SUCCESS', + 'message': '', 'success': True, + } + cmd = KeeperDriveMkdirCommand() + with mock.patch('builtins.print'): + cmd.execute(_make_params(), folder='NewFolder') + mock_create.assert_called_once() + + @patch('keepercommander.keeper_drive.folder_api.update_folder_v3') + def test_update_folder(self, mock_update): + from keepercommander.commands.keeper_drive import KeeperDriveUpdateFolderCommand + mock_update.return_value = { + 'folder_uid': 'fuid', 'status': 'SUCCESS', + 'message': '', 'success': True, + } + fuid, fobj = _make_folder(name='OldName') + cmd = KeeperDriveUpdateFolderCommand() + with mock.patch('builtins.print'): + cmd.execute(_make_params(keeper_drive_folders={fuid: fobj}), + folder=fuid, folder_name='NewName') + mock_update.assert_called_once() + + def test_list_empty(self): + from keepercommander.commands.keeper_drive import KeeperDriveListCommand + cmd = KeeperDriveListCommand() + with mock.patch('builtins.print'): + cmd.execute(_make_params()) + + def test_list_with_data(self): + from keepercommander.commands.keeper_drive import KeeperDriveListCommand + fuid, fobj = _make_folder(name='Documents') + ruid, robj = _make_record(title='Note') + params = _make_params( + keeper_drive_folders={fuid: fobj}, + keeper_drive_records={ruid: robj}, + keeper_drive_record_data={ruid: {'data_json': {'title': 'Note', 'type': 'general'}}}, + ) + cmd = KeeperDriveListCommand() + with mock.patch('builtins.print'): + cmd.execute(params, folders=True) + cmd.execute(params, records=True) + + +class TestKeeperDriveRecordCommands(TestCase): + + def setUp(self): + mock.patch('keepercommander.api.communicate_rest').start() + mock.patch('keepercommander.api.communicate').start() + + def tearDown(self): + mock.patch.stopall() + + @patch('keepercommander.keeper_drive.record_api.create_record_v3') + def test_add_record(self, mock_create): + from keepercommander.commands.keeper_drive import KeeperDriveAddRecordCommand + mock_create.return_value = { + 'record_uid': utils.generate_uid(), 'status': 'SUCCESS', + 'message': '', 'success': True, 'revision': 1, + } + fuid, fobj = _make_folder() + cmd = KeeperDriveAddRecordCommand() + with mock.patch('builtins.print'): + cmd.execute(_make_params(keeper_drive_folders={fuid: fobj}, record_type_cache={}), + title='New Record', folder_uid=fuid, force=True, + record_type='general', fields=[]) + + @patch('keepercommander.keeper_drive.folder_record_api.add_record_to_folder_v3') + def test_add_record_to_folder(self, mock_add): + pass + + @patch('keepercommander.keeper_drive.folder_record_api.remove_record_from_folder_v3') + def test_remove_record_from_folder(self, mock_remove): + pass + + +class TestCrossTypeGuards(TestCase): + """Legacy and KeeperDrive folders/records use different permission + structures. Commands must refuse cross-type operations.""" + + def setUp(self): + mock.patch('keepercommander.api.communicate_rest').start() + mock.patch('keepercommander.api.communicate').start() + + def tearDown(self): + mock.patch.stopall() + + def test_is_keeper_drive_record(self): + from keepercommander.commands.keeper_drive.helpers import is_keeper_drive_record + kd_uid, _ = _make_record() + legacy_uid = utils.generate_uid() + params = _make_params( + keeper_drive_records={kd_uid: {'revision': 1}}, + record_cache={legacy_uid: {'revision': 1}, kd_uid: {'revision': 1}}, + ) + self.assertTrue(is_keeper_drive_record(params, kd_uid)) + self.assertFalse(is_keeper_drive_record(params, legacy_uid)) + self.assertFalse(is_keeper_drive_record(params, None)) + + def test_is_keeper_drive_folder(self): + from keepercommander.commands.keeper_drive.helpers import ( + is_keeper_drive_folder, ROOT_FOLDER_UID, + ) + kd_fuid, kd_fobj = _make_folder() + legacy_fuid = utils.generate_uid() + params = _make_params( + keeper_drive_folders={kd_fuid: kd_fobj}, + folder_cache={legacy_fuid: object()}, + ) + self.assertTrue(is_keeper_drive_folder(params, kd_fuid)) + self.assertTrue(is_keeper_drive_folder(params, ROOT_FOLDER_UID)) + self.assertFalse(is_keeper_drive_folder(params, legacy_fuid)) + self.assertFalse(is_keeper_drive_folder(params, None)) + + @patch('keepercommander.keeper_drive.folder_record_api.add_record_to_folder_v3') + def test_kd_ln_rejects_legacy_record(self, mock_link): + """kd-ln must refuse a legacy record even when the dest folder is KD.""" + from keepercommander.commands.keeper_drive import KeeperDriveLnCommand + kd_fuid, kd_fobj = _make_folder() + legacy_ruid = utils.generate_uid() + params = _make_params( + keeper_drive_folders={kd_fuid: kd_fobj}, + record_cache={legacy_ruid: {'revision': 1}}, + ) + cmd = KeeperDriveLnCommand() + with self.assertRaises(CommandError) as ctx: + cmd.execute(params, src=legacy_ruid, dst=kd_fuid) + self.assertIn('legacy', str(ctx.exception).lower()) + mock_link.assert_not_called() + + @patch('keepercommander.keeper_drive.folder_record_api.add_record_to_folder_v3') + def test_kd_ln_rejects_legacy_folder(self, mock_link): + """kd-ln must refuse a legacy folder even when the source is a KD record.""" + from keepercommander.commands.keeper_drive import KeeperDriveLnCommand + kd_ruid, kd_robj = _make_record() + legacy_fuid = utils.generate_uid() + + class _Folder: + uid = legacy_fuid + name = 'Legacy' + type = 'user_folder' + subfolders = [] + + params = _make_params( + keeper_drive_records={kd_ruid: kd_robj}, + folder_cache={legacy_fuid: _Folder()}, + ) + cmd = KeeperDriveLnCommand() + with self.assertRaises(CommandError) as ctx: + cmd.execute(params, src=kd_ruid, dst=legacy_fuid) + self.assertIn('legacy', str(ctx.exception).lower()) + mock_link.assert_not_called() + + @patch('keepercommander.keeper_drive.record_api.create_record_v3') + def test_kd_record_add_rejects_legacy_folder(self, mock_create): + from keepercommander.commands.keeper_drive import KeeperDriveAddRecordCommand + legacy_fuid = utils.generate_uid() + + class _Folder: + uid = legacy_fuid + name = 'LegacyFolder' + type = 'user_folder' + subfolders = [] + + params = _make_params(folder_cache={legacy_fuid: _Folder()}) + cmd = KeeperDriveAddRecordCommand() + with self.assertRaises(CommandError) as ctx: + cmd.execute(params, title='New', record_type='general', + folder_uid=legacy_fuid, fields=[], force=True) + self.assertIn('legacy', str(ctx.exception).lower()) + mock_create.assert_not_called() + + @patch('keepercommander.keeper_drive.record_api.update_record_v3') + def test_kd_record_update_rejects_legacy_record(self, mock_update): + from keepercommander.commands.keeper_drive import KeeperDriveUpdateRecordCommand + legacy_ruid = utils.generate_uid() + params = _make_params(record_cache={legacy_ruid: {'revision': 1}}) + cmd = KeeperDriveUpdateRecordCommand() + with self.assertRaises(CommandError) as ctx: + cmd.execute(params, record_uids=[legacy_ruid], title='X', fields=[]) + self.assertIn('legacy', str(ctx.exception).lower()) + mock_update.assert_not_called() + + @patch('keepercommander.keeper_drive.removal_api.remove_record_v3') + def test_kd_rm_rejects_legacy_record(self, mock_rm): + from keepercommander.commands.keeper_drive import KeeperDriveRemoveRecordCommand + legacy_ruid = utils.generate_uid() + params = _make_params(record_cache={legacy_ruid: {'revision': 1}}) + cmd = KeeperDriveRemoveRecordCommand() + with self.assertRaises(CommandError) as ctx: + cmd.execute(params, records=[legacy_ruid], operation='owner-trash') + self.assertIn('legacy', str(ctx.exception).lower()) + mock_rm.assert_not_called() + + @patch('keepercommander.keeper_drive.folder_api.update_folder_v3') + def test_kd_rndir_rejects_legacy_folder(self, mock_update): + from keepercommander.commands.keeper_drive import KeeperDriveUpdateFolderCommand + legacy_fuid = utils.generate_uid() + params = _make_params(folder_cache={legacy_fuid: object()}) + cmd = KeeperDriveUpdateFolderCommand() + with self.assertRaises(CommandError) as ctx: + cmd.execute(params, folder=legacy_fuid, folder_name='New') + self.assertIn('legacy', str(ctx.exception).lower()) + mock_update.assert_not_called() + + @patch('keepercommander.keeper_drive.folder_api.grant_folder_access_v3') + def test_kd_share_folder_rejects_legacy_folder(self, mock_grant): + from keepercommander.commands.keeper_drive import KeeperDriveShareFolderCommand + legacy_fuid = utils.generate_uid() + params = _make_params(folder_cache={legacy_fuid: object()}) + cmd = KeeperDriveShareFolderCommand() + with self.assertRaises(CommandError) as ctx: + cmd.execute(params, folder=[legacy_fuid], user=['user@x.com'], + action='grant', role='viewer') + self.assertIn('legacy', str(ctx.exception).lower()) + mock_grant.assert_not_called() + + @patch('keepercommander.keeper_drive.removal_api.remove_folder_v3') + def test_kd_rmdir_rejects_legacy_folder(self, mock_rmdir): + from keepercommander.commands.keeper_drive import KeeperDriveRemoveFolderCommand + legacy_fuid = utils.generate_uid() + params = _make_params(folder_cache={legacy_fuid: object()}) + cmd = KeeperDriveRemoveFolderCommand() + with self.assertRaises(CommandError) as ctx: + cmd.execute(params, folders=[legacy_fuid], operation='folder-trash') + self.assertIn('legacy', str(ctx.exception).lower()) + mock_rmdir.assert_not_called() + + @patch('keepercommander.keeper_drive.record_api.share_record_v3') + def test_kd_share_record_rejects_legacy_record(self, mock_share): + from keepercommander.commands.keeper_drive import KeeperDriveShareRecordCommand + legacy_ruid = utils.generate_uid() + params = _make_params(record_cache={legacy_ruid: {'revision': 1}}) + cmd = KeeperDriveShareRecordCommand() + with self.assertRaises(CommandError) as ctx: + cmd.execute(params, record=legacy_ruid, email=['x@y.com'], + action='grant', role='viewer') + self.assertIn('legacy', str(ctx.exception).lower()) + mock_share.assert_not_called() + + @patch('keepercommander.keeper_drive.record_api.transfer_record_ownership_v3') + def test_kd_transfer_record_rejects_legacy_record(self, mock_transfer): + from keepercommander.commands.keeper_drive import KeeperDriveTransferRecordCommand + legacy_ruid = utils.generate_uid() + params = _make_params(record_cache={legacy_ruid: {'revision': 1}}) + cmd = KeeperDriveTransferRecordCommand() + with self.assertRaises(CommandError) as ctx: + cmd.execute(params, record_uids=[legacy_ruid], + new_owner_email='owner@example.com') + self.assertIn('legacy', str(ctx.exception).lower()) + mock_transfer.assert_not_called() + + @patch('keepercommander.keeper_drive.record_api.get_record_details_v3') + def test_kd_record_details_rejects_legacy_record(self, mock_details): + from keepercommander.commands.keeper_drive import KeeperDriveGetRecordDetailsCommand + legacy_ruid = utils.generate_uid() + params = _make_params(record_cache={legacy_ruid: {'revision': 1}}) + cmd = KeeperDriveGetRecordDetailsCommand() + with self.assertRaises(CommandError) as ctx: + cmd.execute(params, record_uids=[legacy_ruid]) + self.assertIn('legacy', str(ctx.exception).lower()) + mock_details.assert_not_called() + + +class TestLegacyToKeeperDriveGuards(TestCase): + """Legacy mv/ln must refuse to bridge legacy records into KD folders + (and vice-versa) because their permission structures differ.""" + + def setUp(self): + mock.patch('keepercommander.api.communicate_rest').start() + + def tearDown(self): + mock.patch.stopall() + + def _make_legacy_params(self, kd_folder_uid, legacy_record_uid): + from keepercommander.subfolder import ( + UserFolderNode, KeeperDriveFolderNode, RootFolderNode, + ) + params = _make_params() + legacy_folder_uid = utils.generate_uid() + params.root_folder = RootFolderNode() + params.current_folder = '' + + legacy_folder = UserFolderNode() + legacy_folder.uid = legacy_folder_uid + legacy_folder.name = 'Legacy' + + kd_folder = KeeperDriveFolderNode() + kd_folder.uid = kd_folder_uid + kd_folder.name = 'Drive' + + params.folder_cache = { + legacy_folder_uid: legacy_folder, + kd_folder_uid: kd_folder, + } + params.record_cache = {legacy_record_uid: {'data_unencrypted': b'{"title":"x"}'}} + params.subfolder_record_cache = {legacy_folder_uid: {legacy_record_uid}} + params.keeper_drive_folders = {kd_folder_uid: {'name': 'Drive'}} + params.keeper_drive_records = {} + return params, legacy_folder_uid + + @patch('keepercommander.api.communicate') + def test_legacy_ln_rejects_record_into_kd_folder(self, mock_communicate): + from keepercommander.commands.folder import FolderLinkCommand + kd_fuid, _ = _make_folder() + legacy_ruid = utils.generate_uid() + params, _ = self._make_legacy_params(kd_fuid, legacy_ruid) + cmd = FolderLinkCommand() + with self.assertRaises(CommandError) as ctx: + cmd.execute(params, src=legacy_ruid, dst=kd_fuid) + self.assertIn('keeperdrive', str(ctx.exception).lower()) + mock_communicate.assert_not_called() + + @patch('keepercommander.api.communicate') + def test_legacy_mv_rejects_kd_record_into_legacy_folder(self, mock_communicate): + """Symmetric guard: KD record cannot be moved into a legacy folder.""" + from keepercommander.commands.folder import FolderMoveCommand + kd_fuid, _ = _make_folder() + kd_ruid = utils.generate_uid() + params, legacy_fuid = self._make_legacy_params(kd_fuid, kd_ruid) + params.keeper_drive_records[kd_ruid] = {'revision': 1} + # Place the KD record only in the KD folder (not in the legacy folder). + params.subfolder_record_cache = {kd_fuid: {kd_ruid}} + cmd = FolderMoveCommand() + with self.assertRaises(CommandError): + cmd.execute(params, src=kd_ruid, dst=legacy_fuid) + mock_communicate.assert_not_called() + + +class TestKeeperDriveSharingCommands(TestCase): + + def setUp(self): + mock.patch('keepercommander.api.communicate_rest').start() + mock.patch('keepercommander.api.communicate').start() + + def tearDown(self): + mock.patch.stopall() + + @patch('keepercommander.keeper_drive.record_api.share_record_v3') + def test_share_record(self, mock_share): + from keepercommander.commands.keeper_drive import KeeperDriveShareRecordCommand + ruid, robj = _make_record() + mock_share.return_value = { + 'success': True, 'message': '', + 'results': [{'record_uid': ruid, 'success': True, 'message': '', 'pending': False}], + } + cmd = KeeperDriveShareRecordCommand() + with mock.patch('builtins.print'): + cmd.execute(_make_params(keeper_drive_records={ruid: robj}), + record=ruid, email='user@example.com', + action='grant', role='viewer') + + @patch('keepercommander.keeper_drive.record_api.unshare_record_v3') + def test_share_record_revoke(self, mock_unshare): + from keepercommander.commands.keeper_drive import KeeperDriveShareRecordCommand + ruid, robj = _make_record() + mock_unshare.return_value = { + 'success': True, 'message': '', + 'results': [{'record_uid': ruid, 'success': True, 'message': ''}], + } + cmd = KeeperDriveShareRecordCommand() + with mock.patch('builtins.print'): + cmd.execute(_make_params(keeper_drive_records={ruid: robj}), + record=ruid, email='user@example.com', + action='revoke') + + +class TestKeeperDriveDisplayCommands(TestCase): + + def setUp(self): + mock.patch('keepercommander.api.communicate_rest').start() + mock.patch('keepercommander.api.communicate').start() + + def tearDown(self): + mock.patch.stopall() + + @patch('keepercommander.keeper_drive.record_api.get_record_details_v3') + def test_get_record_details(self, mock_details): + from keepercommander.commands.keeper_drive import KeeperDriveGetRecordDetailsCommand + mock_details.return_value = {'data': [], 'errors': []} + ruid, robj = _make_record() + cmd = KeeperDriveGetRecordDetailsCommand() + with mock.patch('builtins.print'): + cmd.execute(_make_params(keeper_drive_records={ruid: robj}), + record_uids=[ruid]) + + @patch('keepercommander.keeper_drive.record_api.get_record_accesses_v3') + def test_get_record_access(self, mock_accesses): + pass + + +class TestCommandRegistration(TestCase): + + def test_all_commands_registered(self): + from keepercommander.commands.keeper_drive import register_commands + commands = {} + register_commands(commands) + expected = [ + 'kd-mkdir', 'kd-record-add', 'kd-record-update', 'kd-rndir', + 'kd-list', 'kd-share-folder', 'kd-record-details', + 'kd-share-record', 'kd-record-permission', 'kd-transfer-record', + 'kd-ln', 'kd-rm', 'kd-rmdir', 'kd-shortcut', 'kd-get', + ] + for name in expected: + self.assertIn(name, commands) + removed = ['kd-grant-access', 'kd-update-access', 'kd-revoke-access', + 'kd-update-record-share', 'kd-unshare-record', + 'kd-add-record-to-folder', 'kd-remove-record-from-folder', + 'kd-record-access', 'kd-folder-access'] + for name in removed: + self.assertNotIn(name, commands) diff --git a/unit-tests/test_tunnel_registry.py b/unit-tests/test_tunnel_registry.py new file mode 100644 index 000000000..931260e8f --- /dev/null +++ b/unit-tests/test_tunnel_registry.py @@ -0,0 +1,253 @@ +# _ __ +# | |/ /___ ___ _ __ ___ _ _ ® +# Unit tests for tunnel_registry and pam tunnel start parser / batch-mode guard. +# + +import json +import os +import shutil +import sys +import tempfile +import unittest +from pathlib import Path +from unittest import mock + +import keepercommander.commands.tunnel_registry as tunnel_registry_mod +from keepercommander.commands.tunnel_registry import ( + PARENT_GRACE_SECONDS, + is_pid_alive, + list_registered_tunnels, + normalize_bind_host, + register_tunnel, + stop_tunnel_process, + unregister_tunnel, +) +from keepercommander.error import CommandError + +if sys.version_info < (3, 8): + raise unittest.SkipTest('pam tunnel tests require Python 3.8+') + + +def _patch_registry_dir(testcase, tmp: Path): + """Point tunnel_registry_dir at tmp for the duration of a test.""" + patcher = mock.patch.object( + tunnel_registry_mod, + 'tunnel_registry_dir', + return_value=tmp, + ) + patcher.start() + testcase.addCleanup(patcher.stop) + tunnel_registry_mod._registry_dir_initialized = False + + +class TestNormalizeBindHost(unittest.TestCase): + def test_localhost_maps(self): + self.assertEqual(normalize_bind_host('localhost'), '127.0.0.1') + self.assertEqual(normalize_bind_host('LOCALHOST'), '127.0.0.1') + + def test_other_preserved_lower(self): + self.assertEqual(normalize_bind_host('10.0.0.5'), '10.0.0.5') + + +class TestTunnelRegistryDir(unittest.TestCase): + def test_creates_with_permissions(self): + tmp = Path(tempfile.mkdtemp()) + self.addCleanup(shutil.rmtree, tmp, ignore_errors=True) + _patch_registry_dir(self, tmp) + d = tunnel_registry_mod.tunnel_registry_dir() + self.assertTrue(d.exists()) + self.assertEqual(d, tmp) + + +class TestRegisterUnregister(unittest.TestCase): + def setUp(self): + self.tmp = Path(tempfile.mkdtemp()) + _patch_registry_dir(self, self.tmp) + + def tearDown(self): + shutil.rmtree(self.tmp, ignore_errors=True) + + def test_register_writes_json_unregister_removes(self): + register_tunnel( + 12345, 'rec1', 'tube1', '127.0.0.1', 5432, + target_host='host', target_port=22, mode='foreground', + record_title='t', + ) + self.assertTrue((self.tmp / '12345.json').exists()) + with open(self.tmp / '12345.json', encoding='utf-8') as f: + data = json.load(f) + self.assertEqual(data['record_uid'], 'rec1') + self.assertEqual(data['tube_id'], 'tube1') + self.assertEqual(data['port'], 5432) + unregister_tunnel(12345) + self.assertFalse((self.tmp / '12345.json').exists()) + + +class TestListRegisteredTunnels(unittest.TestCase): + def setUp(self): + self.tmp = Path(tempfile.mkdtemp()) + _patch_registry_dir(self, self.tmp) + + def tearDown(self): + shutil.rmtree(self.tmp, ignore_errors=True) + + def test_list_removes_stale_entries_when_clean(self): + dead_pid = 999999991 + p = self.tmp / f'{dead_pid}.json' + with open(p, 'w', encoding='utf-8') as f: + json.dump({'pid': dead_pid, 'record_uid': 'x', 'host': '127.0.0.1', 'port': 1}, f) + out = list_registered_tunnels(clean_stale=True) + self.assertFalse(p.exists()) + self.assertEqual(out, []) + + +class TestIsPidAlive(unittest.TestCase): + def test_current_process_alive(self): + self.assertTrue(is_pid_alive(os.getpid())) + + def test_nonexistent_pid(self): + self.assertFalse(is_pid_alive(999999997)) + + +class TestDuplicatePortDetection(unittest.TestCase): + def setUp(self): + self.tmp = Path(tempfile.mkdtemp()) + _patch_registry_dir(self, self.tmp) + + def tearDown(self): + shutil.rmtree(self.tmp, ignore_errors=True) + + @mock.patch('keepercommander.commands.tunnel_registry.is_pid_alive', return_value=True) + def test_duplicate_host_port_raises(self, _mock_alive): + register_tunnel( + 111, 'a', 't1', '127.0.0.1', 5432, mode='foreground', + ) + with self.assertRaises(CommandError) as ctx: + register_tunnel( + 222, 'b', 't2', 'localhost', 5432, mode='foreground', + ) + msg = str(ctx.exception) + self.assertIn('pam tunnel start', msg) + self.assertIn('5432', msg) + self.assertIn('111', msg) + self.assertIn('pam tunnel stop', msg) + + +class TestStopTunnelProcess(unittest.TestCase): + def test_dead_pid_returns_false(self): + self.assertFalse(stop_tunnel_process(999999996)) + + +class TestPamTunnelStartParser(unittest.TestCase): + def test_defaults(self): + from keepercommander.commands.tunnel_and_connections import PAMTunnelStartCommand + p = PAMTunnelStartCommand.pam_cmd_parser + ns = p.parse_args(['recuid']) + self.assertFalse(ns.foreground) + self.assertFalse(ns.background) + self.assertIsNone(ns.run_command) + self.assertEqual(ns.connect_timeout, 30) + self.assertIsNone(ns.pid_file) + + def test_flags_parse(self): + from keepercommander.commands.tunnel_and_connections import PAMTunnelStartCommand + p = PAMTunnelStartCommand.pam_cmd_parser + ns = p.parse_args([ + 'recuid', '-fg', '--pid-file', '/tmp/p.pid', '--timeout', '60', + '-R', 'echo hi', + ]) + self.assertTrue(ns.foreground) + self.assertEqual(ns.pid_file, '/tmp/p.pid') + self.assertEqual(ns.connect_timeout, 60) + self.assertEqual(ns.run_command, 'echo hi') + + def test_mutual_exclusive_flags_parse_together(self): + from keepercommander.commands.tunnel_and_connections import PAMTunnelStartCommand + p = PAMTunnelStartCommand.pam_cmd_parser + ns = p.parse_args(['recuid', '--foreground', '--background']) + self.assertTrue(ns.foreground) + self.assertTrue(ns.background) + + +class _DummyTypedRecord: + """Stand-in for vault.TypedRecord when patching isinstance checks.""" + + +class TestBatchModeTargetHostPort(unittest.TestCase): + @mock.patch('keepercommander.commands.workflow.check_workflow_and_prompt_2fa', + return_value=(True, None), create=True) + @mock.patch('keepercommander.commands.tunnel_and_connections.vault.TypedRecord', _DummyTypedRecord) + @mock.patch('keepercommander.commands.tunnel_and_connections.find_open_port') + @mock.patch('keepercommander.commands.tunnel_and_connections.get_or_create_tube_registry') + @mock.patch('keepercommander.commands.tunnel_and_connections.api.sync_down') + @mock.patch('keepercommander.commands.tunnel_and_connections.vault.KeeperRecord.load') + def test_input_not_called_batch_missing_target_host( + self, mock_load, mock_sync, mock_tr, mock_fop, mock_wf, + ): + mock_tr.return_value = mock.MagicMock() + mock_fop.return_value = 5432 + + pam = mock.MagicMock() + pam.get_default_value.return_value = {'allowSupplyHost': True, 'portForward': {'port': 22}} + rec = _DummyTypedRecord() + rec.record_uid = 'rec1' + rec.title = 'rec1-title' + rec.get_typed_field = lambda name, *a, **kw: pam if name == 'pamSettings' else None + + mock_load.return_value = rec + + from keepercommander.commands.tunnel_and_connections import PAMTunnelStartCommand + p = mock.MagicMock() + p.batch_mode = True + p.config_filename = None + p.server = None + + with mock.patch('builtins.input', side_effect=AssertionError('input must not be called')): + cmd = PAMTunnelStartCommand() + with self.assertRaises(CommandError) as ctx: + cmd.execute( + p, + uid='rec1', + host='127.0.0.1', + port=5432, + target_host=None, + target_port=None, + no_trickle_ice=False, + foreground=False, + background=False, + run_command=None, + connect_timeout=30, + pid_file=None, + ) + msg = str(ctx.exception) + self.assertIn('--target-host', msg) + self.assertIn('--target-port', msg) + + +class TestParentGraceConstant(unittest.TestCase): + def test_parent_grace_seconds(self): + self.assertEqual(PARENT_GRACE_SECONDS, 10) + + +class TestListCleanStaleFalse(unittest.TestCase): + """Stale files are listed but not deleted when clean_stale=False.""" + + def setUp(self): + self.tmp = Path(tempfile.mkdtemp()) + _patch_registry_dir(self, self.tmp) + + def tearDown(self): + shutil.rmtree(self.tmp, ignore_errors=True) + + def test_clean_stale_false_keeps_dead_file(self): + dead_pid = 999999992 + p = self.tmp / f'{dead_pid}.json' + with open(p, 'w', encoding='utf-8') as f: + json.dump({'pid': dead_pid, 'record_uid': 'x', 'host': '127.0.0.1', 'port': 1}, f) + out = list_registered_tunnels(clean_stale=False) + self.assertTrue(p.exists()) + self.assertEqual(out, []) + + +if __name__ == '__main__': + unittest.main()