diff --git a/README.md b/README.md index dd3d22d..d12205b 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,7 @@ jobs: - uses: actions/checkout@v4 - name: Run Socket Basics - uses: SocketDev/socket-basics@v1.0.2 + uses: SocketDev/socket-basics@v1.0.3 with: github_token: ${{ secrets.GITHUB_TOKEN }} socket_security_api_key: ${{ secrets.SOCKET_SECURITY_API_KEY }} @@ -106,7 +106,7 @@ Configure scanning policies, notification channels, and rule sets for your entir **Dashboard-Configured (Enterprise):** ```yaml -- uses: SocketDev/socket-basics@v1.0.2 +- uses: SocketDev/socket-basics@v1.0.3 with: github_token: ${{ secrets.GITHUB_TOKEN }} socket_security_api_key: ${{ secrets.SOCKET_SECURITY_API_KEY }} @@ -115,7 +115,7 @@ Configure scanning policies, notification channels, and rule sets for your entir **CLI-Configured:** ```yaml -- uses: SocketDev/socket-basics@v1.0.2 +- uses: SocketDev/socket-basics@v1.0.3 with: github_token: ${{ secrets.GITHUB_TOKEN }} python_sast_enabled: 'true' @@ -129,7 +129,7 @@ Configure scanning policies, notification channels, and rule sets for your entir ```bash # Build with version tag -docker build -t socketdev/socket-basics:1.0.2 . +docker build -t socketdev/socket-basics:1.0.3 . # Run scan docker run --rm -v "$PWD:/workspace" socketdev/socket-basics:1.0.3 \ diff --git a/pyproject.toml b/pyproject.toml index 7258fa0..3ad5b2a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "socket_basics" -version = "1.0.2" +version = "1.0.3" description = "Socket Basics with integrated SAST, secret scanning, and container analysis" readme = "README.md" requires-python = ">=3.10" diff --git a/socket_basics/connectors.yaml b/socket_basics/connectors.yaml index 2fea787..ae6f741 100644 --- a/socket_basics/connectors.yaml +++ b/socket_basics/connectors.yaml @@ -20,7 +20,6 @@ connectors: enables: - python_sast_enabled - javascript_sast_enabled - - typescript_sast_enabled - go_sast_enabled - golang_sast_enabled - java_sast_enabled @@ -56,12 +55,6 @@ connectors: type: bool default: false group: "SAST Javascript" - - name: typescript_sast_enabled - option: --typescript - description: "Enable TypeScript SAST scanning" - env_variable: INPUT_TYPESCRIPT_SAST_ENABLED - type: bool - default: false - name: go_sast_enabled option: --go description: "Enable Go SAST scanning" diff --git a/socket_basics/core/connector/opengrep/github_pr.py b/socket_basics/core/connector/opengrep/github_pr.py index 2bb0e72..d523fab 100644 --- a/socket_basics/core/connector/opengrep/github_pr.py +++ b/socket_basics/core/connector/opengrep/github_pr.py @@ -15,7 +15,7 @@ def _get_github_pr_result_limit() -> int: """Get the result limit for GitHub PR notifications.""" try: - notifications_yaml = Path(__file__).parent.parent.parent / 'notifications.yaml' + notifications_yaml = Path(__file__).parent.parent.parent.parent / 'notifications.yaml' with open(notifications_yaml, 'r') as f: config = yaml.safe_load(f) return config.get('settings', {}).get('result_limits', {}).get('github_pr', 100) @@ -30,124 +30,156 @@ def format_notifications(groups: Dict[str, List[Dict[str, Any]]], config=None) - # Map subtypes to friendly display names subtype_names = { - 'sast-python': 'SAST Python', - 'sast-javascript': 'SAST JavaScript', - 'sast-golang': 'SAST Go', - 'sast-java': 'SAST Java', - 'sast-php': 'SAST PHP', - 'sast-ruby': 'SAST Ruby', - 'sast-csharp': 'SAST C#', - 'sast-dotnet': 'SAST .NET', - 'sast-c': 'SAST C', - 'sast-cpp': 'SAST C++', - 'sast-kotlin': 'SAST Kotlin', - 'sast-scala': 'SAST Scala', - 'sast-swift': 'SAST Swift', - 'sast-rust': 'SAST Rust', + 'sast-python': 'Socket SAST Python', + 'sast-javascript': 'Socket SAST JavaScript', + 'sast-golang': 'Socket SAST Go', + 'sast-java': 'Socket SAST Java', + 'sast-php': 'Socket SAST PHP', + 'sast-ruby': 'Socket SAST Ruby', + 'sast-csharp': 'Socket SAST C#', + 'sast-dotnet': 'Socket SAST .NET', + 'sast-c': 'Socket SAST C', + 'sast-cpp': 'Socket SAST C++', + 'sast-kotlin': 'Socket SAST Kotlin', + 'sast-scala': 'Socket SAST Scala', + 'sast-swift': 'Socket SAST Swift', + 'sast-rust': 'Socket SAST Rust', } severity_order = {'critical': 0, 'high': 1, 'medium': 2, 'low': 3} + severity_emoji = { + 'critical': 'šŸ”“', + 'high': '🟠', + 'medium': '🟔', + 'low': '⚪' + } for subtype, items in groups.items(): - rows = [] + # Group findings by file path, then by rule within each file + file_groups = {} # {file_path: {rule_id: [(severity, start, end, code_snippet), ...]}} + severity_counts = {'critical': 0, 'high': 0, 'medium': 0, 'low': 0} + for item in items: c = item['component'] a = item['alert'] props = a.get('props', {}) or {} full_path = props.get('filePath', a.get('location', {}).get('path')) or '-' + rule_id = props.get('ruleId', a.get('title', '')) + severity = a.get('severity', '').lower() + start_line = props.get('startLine', '') + end_line = props.get('endLine', '') + code_snippet = props.get('codeSnippet', '') or '' - try: - file_name = Path(full_path).name - except Exception: - file_name = full_path + # Count by severity + if severity in severity_counts: + severity_counts[severity] += 1 - # Format code snippets with
 tags and 
for line breaks - code_snippet = props.get('codeSnippet', '') or '' - if code_snippet: - # Use
 tags for better code formatting as requested
-                code_formatted = code_snippet.replace('\n', '
') - if len(code_formatted) > 200: - code_formatted = code_formatted[:200] + '...' - code_snippet = f"
{code_formatted}
" - else: - code_snippet = '-' + # Group by file path + if full_path not in file_groups: + file_groups[full_path] = {} - severity = a.get('severity', '').lower() - rows.append(( - severity_order.get(severity, 4), - [ - f"**{props.get('ruleId', a.get('title', ''))}**", - f"*{a.get('severity', '')}*", - f"`{file_name}`", - f"`{full_path}`", - f"Lines {props.get('startLine','')}-{props.get('endLine','')}", - code_snippet - ] - )) - - # Sort by severity and extract rows - rows.sort(key=lambda x: x[0]) - rows = [row[1] for row in rows] + # Group by rule within file + if rule_id not in file_groups[full_path]: + file_groups[full_path][rule_id] = [] + + file_groups[full_path][rule_id].append({ + 'severity': severity, + 'start_line': start_line, + 'end_line': end_line, + 'code_snippet': code_snippet + }) - # Apply truncation - # result_limit = _get_github_pr_result_limit() - total_results = len(rows) - was_truncated = False - # - # if total_results > result_limit: - # logger.info(f"Truncating GitHub PR OpenGrep results from {total_results} to {result_limit} (prioritized by severity)") - # rows = rows[:result_limit] - # was_truncated = True + # Build content in requested format + display_name = subtype_names.get(subtype, f"Socket {subtype.upper()}") - # Create markdown table for this subtype - display_name = subtype_names.get(subtype, subtype.upper()) - if not rows: - content = f"No {display_name} issues found." + if not file_groups: + content = f"āœ… No issues found." else: - headers = ['Rule', 'Severity', 'File', 'Path', 'Lines', 'Code'] - header_row = '| ' + ' | '.join(headers) + ' |' - separator_row = '| ' + ' | '.join(['---'] * len(headers)) + ' |' - content_rows = [] - for row in rows: - content_rows.append('| ' + ' | '.join(str(cell) for cell in row) + ' |') + content_lines = [] - content = '\n'.join([header_row, separator_row] + content_rows) + # Add summary + content_lines.append("### Summary") + content_lines.append(f"{severity_emoji.get('critical', 'šŸ”“')} Critical: {severity_counts['critical']} | " + f"{severity_emoji.get('high', '🟠')} High: {severity_counts['high']} | " + f"{severity_emoji.get('medium', '🟔')} Medium: {severity_counts['medium']} | " + f"{severity_emoji.get('low', '⚪')} Low: {severity_counts['low']}") + content_lines.append("") + content_lines.append("### Details") + content_lines.append("") - # Add truncation notice if needed - # if was_truncated: - # content += f"\n\nāš ļø **Results truncated to {result_limit} highest severity findings** (total: {total_results}). See full scan URL for complete results." + # Sort files by highest severity finding in each file + file_severity_list = [] + for file_path in file_groups.keys(): + # Find the highest severity (lowest number) in this file + min_severity = 999 + for rule_id, locations in file_groups[file_path].items(): + for loc in locations: + sev = severity_order.get(loc['severity'], 4) + if sev < min_severity: + min_severity = sev + file_severity_list.append((min_severity, file_path)) + + # Sort by severity first, then by file path + file_severity_list.sort(key=lambda x: (x[0], x[1])) + + for _, file_path in file_severity_list: + try: + file_name = Path(file_path).name + except Exception: + file_name = file_path + + # File header + content_lines.append(f"#### `{file_path}`") + content_lines.append("") + + # Sort rules by severity within file + rules_in_file = [] + for rule_id, locations in file_groups[file_path].items(): + # Get highest severity for this rule + min_severity = min(severity_order.get(loc['severity'], 4) for loc in locations) + rules_in_file.append((min_severity, rule_id, locations)) + + rules_in_file.sort(key=lambda x: x[0]) + + # Output each rule with its locations + for _, rule_id, locations in rules_in_file: + # Get severity from first location (they should all be same rule) + rule_severity = locations[0]['severity'] + emoji = severity_emoji.get(rule_severity, '⚪') + + content_lines.append(f"**{rule_id}** ") + content_lines.append(f"{emoji} *{rule_severity.upper()}*") + content_lines.append("") + + # Output each location with code snippet + for loc in locations: + content_lines.append(f"**Lines {loc['start_line']}:{loc['end_line']}**") + if loc['code_snippet']: + # Format code snippet in code block + content_lines.append("```") + content_lines.append(loc['code_snippet']) + content_lines.append("```") + content_lines.append("") + + content = '\n'.join(content_lines) - # Build title with repo/branch/commit info from config - title_parts = ["Socket Security Results"] + # Build title + title_parts = [display_name] if config: if config.repo: title_parts.append(config.repo) if config.branch: title_parts.append(config.branch) if config.commit_hash: - title_parts.append(config.commit_hash) + title_parts.append(config.commit_hash[:8]) # Short hash title = " - ".join(title_parts) - # Count total findings for summary - total_findings = total_results if not was_truncated else total_results - - # Add summary section with scanner findings - summary_content = f"""## Summary - -| Scanner | Findings | -|---------|----------| -| {display_name} | {total_findings} | - -## Details - -{content}""" - # Wrap content with HTML comment markers for section updates wrapped_content = f""" # {title} -{summary_content} +{content} """ tables.append({ diff --git a/socket_basics/core/connector/opengrep/ms_sentinel.py b/socket_basics/core/connector/opengrep/ms_sentinel.py index 6608b5c..6750bb4 100644 --- a/socket_basics/core/connector/opengrep/ms_sentinel.py +++ b/socket_basics/core/connector/opengrep/ms_sentinel.py @@ -15,10 +15,10 @@ def _get_ms_sentinel_result_limit() -> int: """Get the result limit for MS Sentinel notifications.""" try: - notifications_yaml = Path(__file__).parent.parent.parent / 'notifications.yaml' + notifications_yaml = Path(__file__).parent.parent.parent.parent / 'notifications.yaml' with open(notifications_yaml, 'r') as f: config = yaml.safe_load(f) - return config.get('settings', {}).get('result_limits', {}).get('ms_sentinel', 500) + return config.get('settings', {}).get('result_limits', {}).get('ms_sentinel', 100) except Exception as e: logger.warning(f"Could not load MS Sentinel result limit from notifications.yaml: {e}, using default 500") return 500 diff --git a/socket_basics/core/connector/opengrep/ms_teams.py b/socket_basics/core/connector/opengrep/ms_teams.py index 3af3db4..5369f53 100644 --- a/socket_basics/core/connector/opengrep/ms_teams.py +++ b/socket_basics/core/connector/opengrep/ms_teams.py @@ -15,7 +15,7 @@ def _get_ms_teams_result_limit() -> int: """Get the result limit for MS Teams notifications.""" try: - notifications_yaml = Path(__file__).parent.parent.parent / 'notifications.yaml' + notifications_yaml = Path(__file__).parent.parent.parent.parent / 'notifications.yaml' with open(notifications_yaml, 'r') as f: config = yaml.safe_load(f) return config.get('settings', {}).get('result_limits', {}).get('ms_teams', 50) @@ -25,27 +25,27 @@ def _get_ms_teams_result_limit() -> int: def format_notifications(groups: Dict[str, List[Dict[str, Any]]]) -> List[Dict[str, Any]]: - """Format for Microsoft Teams - return multiple tables grouped by subtype.""" + """Format for Microsoft Teams - return formatted sections grouped by subtype.""" tables = [] # Map subtypes to friendly display names subtype_names = { - 'sast-python': 'SAST Python', - 'sast-javascript': 'SAST JavaScript', - 'sast-golang': 'SAST Go', - 'sast-java': 'SAST Java', - 'sast-php': 'SAST PHP', - 'sast-ruby': 'SAST Ruby', - 'sast-csharp': 'SAST C#', - 'sast-dotnet': 'SAST .NET', - 'sast-c': 'SAST C', - 'sast-cpp': 'SAST C++', - 'sast-kotlin': 'SAST Kotlin', - 'sast-scala': 'SAST Scala', - 'sast-swift': 'SAST Swift', - 'sast-rust': 'SAST Rust', - 'sast-elixir': 'SAST Elixir', - 'sast-generic': 'SAST Generic' + 'sast-python': 'Socket SAST Python', + 'sast-javascript': 'Socket SAST JavaScript', + 'sast-golang': 'Socket SAST Go', + 'sast-java': 'Socket SAST Java', + 'sast-php': 'Socket SAST PHP', + 'sast-ruby': 'Socket SAST Ruby', + 'sast-csharp': 'Socket SAST C#', + 'sast-dotnet': 'Socket SAST .NET', + 'sast-c': 'Socket SAST C', + 'sast-cpp': 'Socket SAST C++', + 'sast-kotlin': 'Socket SAST Kotlin', + 'sast-scala': 'Socket SAST Scala', + 'sast-swift': 'Socket SAST Swift', + 'sast-rust': 'Socket SAST Rust', + 'sast-elixir': 'Socket SAST Elixir', + 'sast-generic': 'Socket SAST Generic' } severity_order = {'critical': 0, 'high': 1, 'medium': 2, 'low': 3} @@ -54,7 +54,9 @@ def format_notifications(groups: Dict[str, List[Dict[str, Any]]]) -> List[Dict[s if not items: # Skip empty groups continue - rows = [] + findings = [] + severity_counts = {'critical': 0, 'high': 0, 'medium': 0, 'low': 0} + for item in items: c = item['component'] a = item['alert'] @@ -67,46 +69,81 @@ def format_notifications(groups: Dict[str, List[Dict[str, Any]]]) -> List[Dict[s file_name = full_path severity = a.get('severity', '').lower() - rows.append(( + + # Count by severity + if severity in severity_counts: + severity_counts[severity] += 1 + + # Add severity emojis + severity_emoji = { + 'critical': 'šŸ”“', + 'high': '🟠', + 'medium': '🟔', + 'low': '⚪' + }.get(severity, '⚪') + + code_snippet = (props.get('codeSnippet', '') or '')[:150] + if len(props.get('codeSnippet', '') or '') > 150: + code_snippet += '...' + + findings.append(( severity_order.get(severity, 4), - [ - props.get('ruleId', a.get('title', '')), - a.get('severity', ''), - file_name, - full_path, - f"{props.get('startLine','')}-{props.get('endLine','')}", - (props.get('codeSnippet', '') or '')[:150] # Truncate for Teams - ] + { + 'rule': props.get('ruleId', a.get('title', '')), + 'severity': a.get('severity', ''), + 'severity_emoji': severity_emoji, + 'file_name': file_name, + 'full_path': full_path, + 'lines': f"{props.get('startLine','')}-{props.get('endLine','')}", + 'code': code_snippet + } )) - # Sort by severity and extract rows - rows.sort(key=lambda x: x[0]) - rows = [row[1] for row in rows] + # Sort by severity and extract findings + findings.sort(key=lambda x: x[0]) + findings = [f[1] for f in findings] # Apply truncation result_limit = _get_ms_teams_result_limit() - total_results = len(rows) + total_results = len(findings) was_truncated = False if total_results > result_limit: logger.info(f"Truncating MS Teams OpenGrep results from {total_results} to {result_limit} (prioritized by severity)") - rows = rows[:result_limit] + findings = findings[:result_limit] was_truncated = True - # Create a separate table for each subtype/language group - display_name = subtype_names.get(subtype, subtype.upper()) - headers = ['Rule', 'Severity', 'File', 'Path', 'Lines', 'Code'] - header_row = ' | '.join(headers) - separator_row = ' | '.join(['---'] * len(headers)) - content_rows = [] - for row in rows: - content_rows.append(' | '.join(str(cell) for cell in row)) + # Create MS Teams-formatted content + display_name = subtype_names.get(subtype, f"Socket {subtype.upper()}") - content = '\n'.join([header_row, separator_row] + content_rows) if rows else f"No {display_name} issues found." - - # Add truncation notice if needed - if was_truncated: - content += f"\n\nāš ļø Results truncated to {result_limit} highest severity findings (total: {total_results}). See full scan URL for complete results." + if not findings: + content = f"āœ… No issues found." + else: + # Create summary table + content_lines = [ + "**Summary**\n\n", + f"šŸ”“ Critical: {severity_counts['critical']} | 🟠 High: {severity_counts['high']} | 🟔 Medium: {severity_counts['medium']} | ⚪ Low: {severity_counts['low']}\n\n", + "---\n\n", + "**Details**\n\n" + ] + + # Format findings list + for idx, f in enumerate(findings, 1): + content_lines.append( + f"{f['severity_emoji']} **{f['rule']}** ({f['severity'].upper()})\n\n" + f"**File:** `{f['file_name']}`\n\n" + f"**Path:** {f['full_path']}\n\n" + f"**Lines:** {f['lines']}" + ) + if f['code'].strip(): + content_lines.append(f"\n\n**Code:** `{f['code']}`") + content_lines.append("\n\n---\n") + + content = "".join(content_lines) + + # Add truncation notice if needed + if was_truncated: + content += f"\nāš ļø **Results truncated to {result_limit} highest severity findings (total: {total_results}). View more in full scan.**" tables.append({ 'title': display_name, diff --git a/socket_basics/core/connector/opengrep/slack.py b/socket_basics/core/connector/opengrep/slack.py index 9409bfe..273a78d 100644 --- a/socket_basics/core/connector/opengrep/slack.py +++ b/socket_basics/core/connector/opengrep/slack.py @@ -15,7 +15,7 @@ def _get_slack_result_limit() -> int: """Get the result limit for Slack notifications.""" try: - notifications_yaml = Path(__file__).parent.parent.parent / 'notifications.yaml' + notifications_yaml = Path(__file__).parent.parent.parent.parent / 'notifications.yaml' with open(notifications_yaml, 'r') as f: config = yaml.safe_load(f) return config.get('settings', {}).get('result_limits', {}).get('slack', 50) @@ -25,27 +25,27 @@ def _get_slack_result_limit() -> int: def format_notifications(groups: Dict[str, List[Dict[str, Any]]]) -> List[Dict[str, Any]]: - """Format for Slack notifications - return multiple tables grouped by subtype.""" + """Format for Slack notifications - return formatted sections grouped by subtype.""" tables = [] # Map subtypes to friendly display names subtype_names = { - 'sast-python': 'SAST Python', - 'sast-javascript': 'SAST JavaScript', - 'sast-golang': 'SAST Go', - 'sast-java': 'SAST Java', - 'sast-php': 'SAST PHP', - 'sast-ruby': 'SAST Ruby', - 'sast-csharp': 'SAST C#', - 'sast-dotnet': 'SAST .NET', - 'sast-c': 'SAST C', - 'sast-cpp': 'SAST C++', - 'sast-kotlin': 'SAST Kotlin', - 'sast-scala': 'SAST Scala', - 'sast-swift': 'SAST Swift', - 'sast-rust': 'SAST Rust', - 'sast-elixir': 'SAST Elixir', - 'sast-generic': 'SAST Generic' + 'sast-python': 'Socket SAST Python', + 'sast-javascript': 'Socket SAST JavaScript', + 'sast-golang': 'Socket SAST Go', + 'sast-java': 'Socket SAST Java', + 'sast-php': 'Socket SAST PHP', + 'sast-ruby': 'Socket SAST Ruby', + 'sast-csharp': 'Socket SAST C#', + 'sast-dotnet': 'Socket SAST .NET', + 'sast-c': 'Socket SAST C', + 'sast-cpp': 'Socket SAST C++', + 'sast-kotlin': 'Socket SAST Kotlin', + 'sast-scala': 'Socket SAST Scala', + 'sast-swift': 'Socket SAST Swift', + 'sast-rust': 'Socket SAST Rust', + 'sast-elixir': 'Socket SAST Elixir', + 'sast-generic': 'Socket SAST Generic' } severity_order = {'critical': 0, 'high': 1, 'medium': 2, 'low': 3} @@ -54,7 +54,9 @@ def format_notifications(groups: Dict[str, List[Dict[str, Any]]]) -> List[Dict[s if not items: # Skip empty groups continue - rows = [] + findings = [] + severity_counts = {'critical': 0, 'high': 0, 'medium': 0, 'low': 0} + for item in items: c = item['component'] a = item['alert'] @@ -72,49 +74,82 @@ def format_notifications(groups: Dict[str, List[Dict[str, Any]]]) -> List[Dict[s 'critical': 'šŸ”“', 'high': '🟠', 'medium': '🟔', - 'low': '🟢' + 'low': '⚪' }.get(severity, '⚪') - rows.append(( + # Count by severity + if severity in severity_counts: + severity_counts[severity] += 1 + + # Get full code snippet (will be formatted in code block) + code_snippet = (props.get('codeSnippet', '') or '') + # Truncate long snippets to avoid message bloat + if len(code_snippet) > 200: + code_snippet = code_snippet[:200] + '\n...' + + findings.append(( severity_order.get(severity, 4), - [ - props.get('ruleId', a.get('title', '')), - f"{severity_emoji} {a.get('severity', '')}", - file_name, - full_path, - f"{props.get('startLine','')}-{props.get('endLine','')}", - (props.get('codeSnippet', '') or '')[:100] + ('...' if len(props.get('codeSnippet', '') or '') > 100 else '') - ] + { + 'rule': props.get('ruleId', a.get('title', '')), + 'severity': a.get('severity', ''), + 'severity_emoji': severity_emoji, + 'file_name': file_name, + 'full_path': full_path, + 'lines': f"{props.get('startLine','')}-{props.get('endLine','')}", + 'code': code_snippet + } )) - # Sort by severity and extract rows - rows.sort(key=lambda x: x[0]) - rows = [row[1] for row in rows] + # Sort by severity and extract findings + findings.sort(key=lambda x: x[0]) + findings = [f[1] for f in findings] # Apply truncation result_limit = _get_slack_result_limit() - total_results = len(rows) + total_results = len(findings) was_truncated = False if total_results > result_limit: logger.info(f"Truncating Slack OpenGrep results from {total_results} to {result_limit} (prioritized by severity)") - rows = rows[:result_limit] + findings = findings[:result_limit] was_truncated = True - # Create a separate table for each subtype/language group - from tabulate import tabulate - - display_name = subtype_names.get(subtype, subtype.upper()) - headers = ['Rule', 'Severity', 'File', 'Path', 'Lines', 'Code'] - table_content = tabulate(rows, headers=headers, tablefmt='pipe') if rows else f"No {display_name} issues found." + # Create Slack-formatted content + display_name = subtype_names.get(subtype, f"Socket {subtype.upper()}") - # Add truncation notice if needed - if was_truncated: - table_content += f"\n\nāš ļø *Results truncated to {result_limit} highest severity findings (total: {total_results}). See full scan URL for complete results.*" + if not findings: + content = f"āœ… No issues found." + else: + # Create summary table + content_lines = [ + "*Summary*", + f"šŸ”“ Critical: {severity_counts['critical']} | 🟠 High: {severity_counts['high']} | 🟔 Medium: {severity_counts['medium']} | ⚪ Low: {severity_counts['low']}", + "", + "*Details*", + "" + ] + + # Format findings list + for idx, f in enumerate(findings, 1): + content_lines.append( + f"{f['severity_emoji']} *{f['rule']}* ({f['severity'].upper()})\n" + f"File: `{f['file_name']}` (lines {f['lines']})\n" + f"Path: {f['full_path']}" + ) + if f['code'].strip(): + # Use code blocks for code snippets + content_lines.append(f"```\n{f['code']}\n```") + content_lines.append("") # Empty line between findings + + content = "\n".join(content_lines) + + # Add truncation notice if needed + if was_truncated: + content += f"\nāš ļø *Showing {result_limit} of {total_results} findings (highest severity first).*" tables.append({ 'title': display_name, - 'content': table_content + 'content': content }) # Return list of tables - one per language group diff --git a/socket_basics/core/connector/opengrep/sumologic.py b/socket_basics/core/connector/opengrep/sumologic.py index 2935acf..16ea440 100644 --- a/socket_basics/core/connector/opengrep/sumologic.py +++ b/socket_basics/core/connector/opengrep/sumologic.py @@ -13,12 +13,12 @@ def _get_sumologic_result_limit() -> int: - """Get the result limit for SumoLogic notifications.""" + """Get the result limit for Sumo Logic notifications.""" try: - notifications_yaml = Path(__file__).parent.parent.parent / 'notifications.yaml' + notifications_yaml = Path(__file__).parent.parent.parent.parent / 'notifications.yaml' with open(notifications_yaml, 'r') as f: config = yaml.safe_load(f) - return config.get('settings', {}).get('result_limits', {}).get('sumologic', 500) + return config.get('settings', {}).get('result_limits', {}).get('sumologic', 100) except Exception as e: logger.warning(f"Could not load SumoLogic result limit from notifications.yaml: {e}, using default 500") return 500 diff --git a/socket_basics/core/connector/socket_tier1/github_pr.py b/socket_basics/core/connector/socket_tier1/github_pr.py index 769919c..e10f967 100644 --- a/socket_basics/core/connector/socket_tier1/github_pr.py +++ b/socket_basics/core/connector/socket_tier1/github_pr.py @@ -24,8 +24,21 @@ def _make_purl(comp: Dict[str, Any]) -> str: def format_notifications(components_list: List[Dict[str, Any]], config=None) -> List[Dict[str, Any]]: - """Format for GitHub PR comments - detailed with markdown formatting.""" - rows = [] + """Format for GitHub PR comments - grouped by PURL and reachability.""" + from collections import defaultdict + + severity_order = {'critical': 0, 'high': 1, 'medium': 2, 'low': 3} + severity_emoji = { + 'critical': 'šŸ”“', + 'high': '🟠', + 'medium': '🟔', + 'low': '⚪' + } + + # Group by PURL -> Reachability -> Findings + purl_groups = defaultdict(lambda: {'reachable': [], 'unknown': [], 'error': [], 'unreachable': []}) + severity_counts = {'critical': 0, 'high': 0, 'medium': 0, 'low': 0} + for comp in components_list: comp_name = str(comp.get('name') or comp.get('id') or '-') @@ -33,10 +46,14 @@ def format_notifications(components_list: List[Dict[str, Any]], config=None) -> props = a.get('props', {}) or {} purl = str(props.get('purl') or _make_purl(comp) or comp_name) cve_id = str(props.get('ghsaId') or props.get('cveId') or a.get('title') or '') - severity = str(a.get('severity') or props.get('severity') or '') - reachability = str(props.get('reachability') or '').lower() + severity = str(a.get('severity') or props.get('severity') or '').lower() + reachability = str(props.get('reachability') or 'unknown').lower() - # Format with markdown for better GitHub display + # Count by severity + if severity in severity_counts: + severity_counts[severity] += 1 + + # Get trace data trace_raw = props.get('trace') or '' trace_str = '' if isinstance(trace_raw, list): @@ -44,36 +61,99 @@ def format_notifications(components_list: List[Dict[str, Any]], config=None) -> elif isinstance(trace_raw, str): trace_str = trace_raw - if reachability == 'reachable' and trace_str: - # Convert newlines to
tags for GitHub markdown tables - trace_formatted = trace_str.replace('\n', '
') - # Use
 tags for better code formatting as requested
-                if len(trace_formatted) > 300:
-                    trace_formatted = trace_formatted[:300] + '...'
-                trace_formatted = f"
{trace_formatted}
" - else: - trace_formatted = f"`{purl}`" + # Truncate long traces + if trace_str and len(trace_str) > 500: + trace_str = trace_str[:500] + '\n...' + + finding = { + 'cve_id': cve_id, + 'severity': severity, + 'severity_order': severity_order.get(severity, 4), + 'trace': trace_str + } - rows.append([ - f"**{cve_id}**", - f"*{severity}*", - f"**{reachability.upper()}**" if reachability == 'reachable' else reachability, - f"`{purl}`", - trace_formatted - ]) + # Group by reachability + if reachability in purl_groups[purl]: + purl_groups[purl][reachability].append(finding) - # Create markdown table - if not rows: - content = "No reachability issues found." + # Sort findings within each group by severity (Critical -> High -> Medium -> Low) + for purl in purl_groups: + for reach_type in ['reachable', 'unknown', 'error', 'unreachable']: + purl_groups[purl][reach_type].sort(key=lambda x: x['severity_order']) + + # Build content + if not purl_groups: + content = "āœ… No reachability issues found." else: - headers = ['CVE/GHSA', 'Severity', 'Reachability', 'PURL', 'Trace'] - header_row = '| ' + ' | '.join(headers) + ' |' - separator_row = '| ' + ' | '.join(['---'] * len(headers)) + ' |' - content_rows = [] - for row in rows: - content_rows.append('| ' + ' | '.join(str(cell) for cell in row) + ' |') + content_lines = [] + + # Add summary + content_lines.append("### Summary") + content_lines.append(f"{severity_emoji.get('critical', 'šŸ”“')} Critical: {severity_counts['critical']} | " + f"{severity_emoji.get('high', '🟠')} High: {severity_counts['high']} | " + f"{severity_emoji.get('medium', '🟔')} Medium: {severity_counts['medium']} | " + f"{severity_emoji.get('low', '⚪')} Low: {severity_counts['low']}") + content_lines.append("") + content_lines.append("### Details") + content_lines.append("") + + # Sort PURLs by highest severity finding (critical first) + purl_severity_list = [] + for purl in purl_groups: + min_sev = 999 + for reach_type in ['reachable', 'unknown', 'error', 'unreachable']: + for finding in purl_groups[purl][reach_type]: + if finding['severity_order'] < min_sev: + min_sev = finding['severity_order'] + purl_severity_list.append((min_sev, purl)) + + purl_severity_list.sort(key=lambda x: x[0]) - content = '\n'.join([header_row, separator_row] + content_rows) + for _, purl in purl_severity_list: + content_lines.append(f"#### `{purl}`") + content_lines.append("") + + # Reachable findings (highest priority) + if purl_groups[purl]['reachable']: + content_lines.append("**Reachable**") + content_lines.append("") + for finding in purl_groups[purl]['reachable']: + emoji = severity_emoji.get(finding['severity'], '⚪') + content_lines.append(f"{emoji} **{finding['cve_id']}**: *{finding['severity'].upper()}*") + if finding['trace']: + content_lines.append("```") + content_lines.append(finding['trace']) + content_lines.append("```") + content_lines.append("") + + # Unknown reachability findings + if purl_groups[purl]['unknown']: + content_lines.append("**Unknown**") + content_lines.append("") + for finding in purl_groups[purl]['unknown']: + emoji = severity_emoji.get(finding['severity'], '⚪') + content_lines.append(f"{emoji} **{finding['cve_id']}**: *{finding['severity'].upper()}*") + content_lines.append("") + + # Error reachability findings + if purl_groups[purl]['error']: + content_lines.append("**Error**") + content_lines.append("") + for finding in purl_groups[purl]['error']: + emoji = severity_emoji.get(finding['severity'], '⚪') + content_lines.append(f"{emoji} **{finding['cve_id']}**: *{finding['severity'].upper()}*") + content_lines.append("") + + # Unreachable findings (lowest priority) + if purl_groups[purl]['unreachable']: + content_lines.append("**Unreachable**") + content_lines.append("") + for finding in purl_groups[purl]['unreachable']: + emoji = severity_emoji.get(finding['severity'], '⚪') + content_lines.append(f"{emoji} **{finding['cve_id']}**: *{finding['severity'].upper()}*") + content_lines.append("") + + content = '\n'.join(content_lines) # Build title with repo/branch/commit info from config title_parts = ["Socket Security Tier 1 Results"] @@ -87,19 +167,11 @@ def format_notifications(components_list: List[Dict[str, Any]], config=None) -> title = " - ".join(title_parts) - # Count total findings for summary - total_findings = len(rows) + # Count total findings + total_findings = sum(severity_counts.values()) - # Add summary section with scanner findings - summary_content = f"""## Summary - -| Scanner | Findings | -|---------|----------| -| Socket Tier1 | {total_findings} | - -## Details - -{content}""" + # Content already includes summary and details sections + summary_content = content # Wrap content with HTML comment markers for section updates wrapped_content = f""" diff --git a/socket_basics/core/connector/socket_tier1/jira.py b/socket_basics/core/connector/socket_tier1/jira.py index 5fc5972..a7dd31b 100644 --- a/socket_basics/core/connector/socket_tier1/jira.py +++ b/socket_basics/core/connector/socket_tier1/jira.py @@ -1,6 +1,23 @@ """Jira notifier formatting for Socket Tier1 reachability analysis.""" from typing import Dict, Any, List +from pathlib import Path +import logging +import yaml + +logger = logging.getLogger(__name__) + + +def _get_jira_result_limit() -> int: + """Get the result limit for Jira notifications.""" + try: + notifications_yaml = Path(__file__).parent.parent.parent.parent / 'notifications.yaml' + with open(notifications_yaml, 'r') as f: + config = yaml.safe_load(f) + return config.get('settings', {}).get('result_limits', {}).get('jira', 30) + except Exception as e: + logger.warning(f"Could not load Jira result limit from notifications.yaml: {e}, using default 30") + return 30 def _detect_language_from_purl(purl: str) -> str: @@ -76,7 +93,8 @@ def _make_purl(comp: Dict[str, Any]) -> str: def format_notifications(components_list: List[Dict[str, Any]], config=None) -> List[Dict[str, Any]]: - """Format for Jira tickets - using panels for better layout control.""" + """Format for Jira tickets - grouped by PURL and reachability.""" + from collections import defaultdict # Define severity ranking for sorting severity_rank = { @@ -86,8 +104,9 @@ def format_notifications(components_list: List[Dict[str, Any]], config=None) -> 'low': 3 } - # Collect all alerts with component info - all_alerts = [] + # Group by PURL -> Reachability -> Findings + purl_groups = defaultdict(lambda: {'reachable': [], 'unknown': [], 'error': [], 'unreachable': []}) + for comp in components_list: comp_name = str(comp.get('name') or comp.get('id') or '-') @@ -96,7 +115,7 @@ def format_notifications(components_list: List[Dict[str, Any]], config=None) -> purl = str(props.get('purl') or _make_purl(comp) or comp_name) cve_id = str(props.get('ghsaId') or props.get('cveId') or a.get('title') or '') severity = str(a.get('severity') or props.get('severity') or '').lower() - reachability = str(props.get('reachability') or '').lower() + reachability = str(props.get('reachability') or 'unknown').lower() # Format trace data trace_raw = props.get('trace') or '' @@ -106,15 +125,27 @@ def format_notifications(components_list: List[Dict[str, Any]], config=None) -> elif isinstance(trace_raw, str): trace_str = trace_raw - all_alerts.append({ + # Truncate long traces + if trace_str and len(trace_str) > 2000: + trace_str = trace_str[:2000] + '\n...' + + finding = { 'cve_id': cve_id, 'severity': severity, - 'reachability': reachability, - 'purl': purl, + 'severity_rank': severity_rank.get(severity, 999), 'trace_str': trace_str - }) + } + + # Group by reachability + if reachability in purl_groups[purl]: + purl_groups[purl][reachability].append(finding) + + # Sort findings within each group by severity (Critical -> High -> Medium -> Low) + for purl in purl_groups: + for reach_type in ['reachable', 'unknown', 'error', 'unreachable']: + purl_groups[purl][reach_type].sort(key=lambda x: x['severity_rank']) - if not all_alerts: + if not purl_groups: content = { "type": "doc", "version": 1, @@ -126,96 +157,218 @@ def format_notifications(components_list: List[Dict[str, Any]], config=None) -> ] } else: - # Sort alerts by severity (Critical -> High -> Medium -> Low) - sorted_alerts = sorted( - all_alerts, - key=lambda x: severity_rank.get(x['severity'], 999) + # Apply truncation at finding level + result_limit = _get_jira_result_limit() + total_results = sum( + len(purl_groups[purl]['reachable']) + + len(purl_groups[purl]['unreachable']) + + len(purl_groups[purl]['unknown']) + for purl in purl_groups ) panels = [] + findings_shown = 0 + was_truncated = False + + # Sort PURLs by highest severity finding (critical first) + purl_severity_list = [] + for purl in purl_groups: + min_sev = 999 + for reach_type in ['reachable', 'unknown', 'error', 'unreachable']: + for finding in purl_groups[purl][reach_type]: + if finding['severity_rank'] < min_sev: + min_sev = finding['severity_rank'] + purl_severity_list.append((min_sev, purl)) + + purl_severity_list.sort(key=lambda x: x[0]) - for alert in sorted_alerts: - # Map severity to Jira priority - jira_priority = { - 'critical': 'Highest', - 'high': 'High', - 'medium': 'Medium', - 'low': 'Low' - }.get(alert['severity'], 'Medium') + # Detect language once per PURL + for _, purl in purl_severity_list: + if findings_shown >= result_limit: + was_truncated = True + break - # Determine panel color based on priority - panel_type = { - 'Highest': 'error', - 'High': 'warning', - 'Medium': 'note', - 'Low': 'info' - }.get(jira_priority, 'note') + language = _detect_language_from_purl(purl) - # Build panel content - panel_content = [ - { + # Add PURL header + panels.append({ + "type": "heading", + "attrs": {"level": 2}, + "content": [ + {"type": "text", "text": "šŸ“¦ Package: ", "marks": [{"type": "strong"}]}, + {"type": "text", "text": purl, "marks": [{"type": "code"}]} + ] + }) + + # Reachable findings (highest priority) + if purl_groups[purl]['reachable']: + panels.append({ "type": "heading", "attrs": {"level": 3}, - "content": [{"type": "text", "text": f"šŸ”’ {alert['cve_id']}", "marks": [{"type": "strong"}]}] - }, - { - "type": "paragraph", - "content": [ - {"type": "text", "text": "Severity: ", "marks": [{"type": "strong"}]}, - {"type": "text", "text": jira_priority} - ] - }, - { - "type": "paragraph", - "content": [ - {"type": "text", "text": "Reachability: ", "marks": [{"type": "strong"}]}, - {"type": "text", "text": alert['reachability'].upper() if alert['reachability'] == 'reachable' else alert['reachability']} - ] - }, - { - "type": "paragraph", - "content": [ - {"type": "text", "text": "Package: ", "marks": [{"type": "strong"}]}, - {"type": "text", "text": alert['purl'], "marks": [{"type": "code"}]} + "content": [{"type": "text", "text": "Reachable", "marks": [{"type": "strong"}]}] + }) + + for finding in purl_groups[purl]['reachable']: + if findings_shown >= result_limit: + was_truncated = True + break + + jira_priority = { + 'critical': 'Highest', + 'high': 'High', + 'medium': 'Medium', + 'low': 'Low' + }.get(finding['severity'], 'Medium') + + panel_type = { + 'Highest': 'error', + 'High': 'warning', + 'Medium': 'note', + 'Low': 'info' + }.get(jira_priority, 'note') + + panel_content = [ + { + "type": "paragraph", + "content": [ + {"type": "text", "text": f"šŸ”’ {finding['cve_id']}: ", "marks": [{"type": "strong"}]}, + {"type": "text", "text": jira_priority} + ] + } ] - } - ] + + if finding['trace_str']: + panel_content.append({ + "type": "codeBlock", + "attrs": {"language": language.lower()}, + "content": [{"type": "text", "text": finding['trace_str']}] + }) + + panels.append({ + "type": "panel", + "attrs": {"panelType": panel_type}, + "content": panel_content + }) + + findings_shown += 1 - # Add trace if reachable and trace exists - if alert['reachability'] == 'reachable' and alert['trace_str']: - # Dynamically determine language from PURL - language = _detect_language_from_purl(alert['purl']) + # Unknown reachability findings + if purl_groups[purl]['unknown'] and findings_shown < result_limit: + panels.append({ + "type": "heading", + "attrs": {"level": 3}, + "content": [{"type": "text", "text": "Unknown", "marks": [{"type": "strong"}]}] + }) - panel_content.extend([ - { + for finding in purl_groups[purl]['unknown']: + if findings_shown >= result_limit: + was_truncated = True + break + + jira_priority = { + 'critical': 'Highest', + 'high': 'High', + 'medium': 'Medium', + 'low': 'Low' + }.get(finding['severity'], 'Medium') + + panels.append({ "type": "paragraph", "content": [ - {"type": "text", "text": "Call Trace:", "marks": [{"type": "strong"}]} + {"type": "text", "text": f"šŸ”’ {finding['cve_id']}: ", "marks": [{"type": "strong"}]}, + {"type": "text", "text": jira_priority} ] - }, - { - "type": "codeBlock", - "attrs": {"language": language.lower()}, - "content": [{"type": "text", "text": alert['trace_str']}] - } - ]) + }) + + findings_shown += 1 - # Create the panel - panels.append({ - "type": "panel", - "attrs": {"panelType": panel_type}, - "content": panel_content - }) + # Error reachability findings + if purl_groups[purl]['error'] and findings_shown < result_limit: + panels.append({ + "type": "heading", + "attrs": {"level": 3}, + "content": [{"type": "text", "text": "Error", "marks": [{"type": "strong"}]}] + }) + + for finding in purl_groups[purl]['error']: + if findings_shown >= result_limit: + was_truncated = True + break + + jira_priority = { + 'critical': 'Highest', + 'high': 'High', + 'medium': 'Medium', + 'low': 'Low' + }.get(finding['severity'], 'Medium') + + panels.append({ + "type": "paragraph", + "content": [ + {"type": "text", "text": f"šŸ”’ {finding['cve_id']}: ", "marks": [{"type": "strong"}]}, + {"type": "text", "text": jira_priority} + ] + }) + + findings_shown += 1 - # Add a rule/divider between issues - panels.append({ - "type": "rule" - }) + # Unreachable findings (lowest priority) + if purl_groups[purl]['unreachable'] and findings_shown < result_limit: + panels.append({ + "type": "heading", + "attrs": {"level": 3}, + "content": [{"type": "text", "text": "Unreachable", "marks": [{"type": "strong"}]}] + }) + + for finding in purl_groups[purl]['unreachable']: + if findings_shown >= result_limit: + was_truncated = True + break + + jira_priority = { + 'critical': 'Highest', + 'high': 'High', + 'medium': 'Medium', + 'low': 'Low' + }.get(finding['severity'], 'Medium') + + panels.append({ + "type": "paragraph", + "content": [ + {"type": "text", "text": f"šŸ”’ {finding['cve_id']}: ", "marks": [{"type": "strong"}]}, + {"type": "text", "text": jira_priority} + ] + }) + + findings_shown += 1 + + # Add divider between packages + panels.append({"type": "rule"}) # Remove the last rule if panels and panels[-1]["type"] == "rule": panels.pop() + # Add truncation notice if needed + if was_truncated: + panels.extend([ + { + "type": "rule" + }, + { + "type": "panel", + "attrs": {"panelType": "warning"}, + "content": [ + { + "type": "paragraph", + "content": [ + {"type": "text", "text": f"āš ļø Results truncated to {result_limit} highest severity findings (total: {total_results}). View more in full scan.", "marks": [{"type": "strong"}]} + ] + } + ] + } + ]) + content = { "type": "doc", "version": 1, diff --git a/socket_basics/core/connector/socket_tier1/ms_teams.py b/socket_basics/core/connector/socket_tier1/ms_teams.py index a112e30..136b446 100644 --- a/socket_basics/core/connector/socket_tier1/ms_teams.py +++ b/socket_basics/core/connector/socket_tier1/ms_teams.py @@ -1,6 +1,23 @@ """Microsoft Teams notifier formatting for Socket Tier1 reachability analysis.""" from typing import Dict, Any, List +from pathlib import Path +import logging +import yaml + +logger = logging.getLogger(__name__) + + +def _get_ms_teams_result_limit() -> int: + """Get the result limit for MS Teams notifications.""" + try: + notifications_yaml = Path(__file__).parent.parent.parent.parent / 'notifications.yaml' + with open(notifications_yaml, 'r') as f: + config = yaml.safe_load(f) + return config.get('settings', {}).get('result_limits', {}).get('ms_teams', 50) + except Exception as e: + logger.warning(f"Could not load MS Teams result limit from notifications.yaml: {e}, using default 50") + return 50 def _make_purl(comp: Dict[str, Any]) -> str: @@ -24,8 +41,21 @@ def _make_purl(comp: Dict[str, Any]) -> str: def format_notifications(components_list: List[Dict[str, Any]]) -> List[Dict[str, Any]]: - """Format for Microsoft Teams - clean tabular format.""" - rows = [] + """Format for Microsoft Teams - grouped by PURL and reachability.""" + from collections import defaultdict + + severity_order = {'critical': 0, 'high': 1, 'medium': 2, 'low': 3} + severity_emoji = { + 'critical': 'šŸ”“', + 'high': '🟠', + 'medium': '🟔', + 'low': '⚪' + } + + # Group by PURL -> Reachability -> Findings + purl_groups = defaultdict(lambda: {'reachable': [], 'unknown': [], 'error': [], 'unreachable': []}) + severity_counts = {'critical': 0, 'high': 0, 'medium': 0, 'low': 0} + for comp in components_list: comp_name = str(comp.get('name') or comp.get('id') or '-') @@ -33,32 +63,131 @@ def format_notifications(components_list: List[Dict[str, Any]]) -> List[Dict[str props = a.get('props', {}) or {} purl = str(props.get('purl') or _make_purl(comp) or comp_name) cve_id = str(props.get('ghsaId') or props.get('cveId') or a.get('title') or '') - severity = str(a.get('severity') or props.get('severity') or '') - reachability = str(props.get('reachability') or '').lower() + severity = str(a.get('severity') or props.get('severity') or '').lower() + reachability = str(props.get('reachability') or 'unknown').lower() - # Clean format for Teams - rows.append([ - cve_id, - severity, - reachability.upper(), - purl[:60] + '...' if len(purl) > 60 else purl, # Truncate for Teams - 'Reachable' if reachability == 'reachable' else 'Not Reachable' - ]) + # Count by severity + if severity in severity_counts: + severity_counts[severity] += 1 + + # Get trace data + trace_raw = props.get('trace') or '' + trace_str = '' + if isinstance(trace_raw, list): + trace_str = '\n'.join(str(x) for x in trace_raw) + elif isinstance(trace_raw, str): + trace_str = trace_raw + + # Truncate long traces + if trace_str and len(trace_str) > 500: + trace_str = trace_str[:500] + '\n...' + + finding = { + 'cve_id': cve_id, + 'severity': severity, + 'severity_order': severity_order.get(severity, 4), + 'severity_emoji': severity_emoji.get(severity, '⚪'), + 'trace': trace_str + } + + # Group by reachability + if reachability in purl_groups[purl]: + purl_groups[purl][reachability].append(finding) + + # Sort findings within each group by severity (Critical -> High -> Medium -> Low) + for purl in purl_groups: + for reach_type in ['reachable', 'unknown', 'error', 'unreachable']: + purl_groups[purl][reach_type].sort(key=lambda x: x['severity_order']) - # Format as markdown table for MS Teams - if not rows: - content = "No Socket Tier1 vulnerabilities found." + # Apply truncation at finding level + result_limit = _get_ms_teams_result_limit() + total_results = sum(severity_counts.values()) + + # Format for MS Teams + if not purl_groups: + content = "āœ… No vulnerabilities found." else: - headers = ['CVE/GHSA', 'Severity', 'Reachability', 'Package', 'Status'] - header_row = ' | '.join(headers) - separator_row = ' | '.join(['---'] * len(headers)) - content_rows = [] - for row in rows: - content_rows.append(' | '.join(str(cell) for cell in row)) + # Add summary table + content_lines = [ + "**Summary**\n\n", + f"šŸ”“ Critical: {severity_counts['critical']} | 🟠 High: {severity_counts['high']} | 🟔 Medium: {severity_counts['medium']} | ⚪ Low: {severity_counts['low']}\n\n", + "---\n\n", + "**Details**\n\n" + ] + + findings_shown = 0 + was_truncated = False + + # Sort PURLs by highest severity finding + purl_severity_list = [] + for purl in purl_groups: + min_sev = 999 + for reach_type in ['reachable', 'unknown', 'error', 'unreachable']: + for finding in purl_groups[purl][reach_type]: + if finding['severity_order'] < min_sev: + min_sev = finding['severity_order'] + purl_severity_list.append((min_sev, purl)) + + purl_severity_list.sort(key=lambda x: x[0]) + + for _, purl in purl_severity_list: + if findings_shown >= result_limit: + was_truncated = True + break + + content_lines.append(f"**Package:** `{purl}`\n\n") + + # Reachable findings (highest priority) + if purl_groups[purl]['reachable']: + content_lines.append("**Reachable**\n\n") + for finding in purl_groups[purl]['reachable']: + if findings_shown >= result_limit: + was_truncated = True + break + content_lines.append(f"{finding['severity_emoji']} **{finding['cve_id']}**: *{finding['severity'].upper()}*\n\n") + if finding['trace']: + content_lines.append(f"```\n{finding['trace']}\n```\n\n") + findings_shown += 1 + + # Unknown reachability findings + if purl_groups[purl]['unknown'] and findings_shown < result_limit: + content_lines.append("**Unknown**\n\n") + for finding in purl_groups[purl]['unknown']: + if findings_shown >= result_limit: + was_truncated = True + break + content_lines.append(f"{finding['severity_emoji']} **{finding['cve_id']}**: *{finding['severity'].upper()}*\n\n") + findings_shown += 1 + + # Error reachability findings + if purl_groups[purl]['error'] and findings_shown < result_limit: + content_lines.append("**Error**\n\n") + for finding in purl_groups[purl]['error']: + if findings_shown >= result_limit: + was_truncated = True + break + content_lines.append(f"{finding['severity_emoji']} **{finding['cve_id']}**: *{finding['severity'].upper()}*\n\n") + findings_shown += 1 + + # Unreachable findings (lowest priority) + if purl_groups[purl]['unreachable'] and findings_shown < result_limit: + content_lines.append("**Unreachable**\n\n") + for finding in purl_groups[purl]['unreachable']: + if findings_shown >= result_limit: + was_truncated = True + break + content_lines.append(f"{finding['severity_emoji']} **{finding['cve_id']}**: *{finding['severity'].upper()}*\n\n") + findings_shown += 1 + + content_lines.append("---\n\n") + + content = "".join(content_lines) - content = '\n'.join([header_row, separator_row] + content_rows) + # Add truncation notice if needed + if was_truncated: + content += f"\nāš ļø **Showing {findings_shown} of {total_results} findings (highest severity first).**" return [{ - 'title': 'Socket Tier1 Reachability Analysis', + 'title': 'Socket Tier1 Reachability', 'content': content }] \ No newline at end of file diff --git a/socket_basics/core/connector/socket_tier1/slack.py b/socket_basics/core/connector/socket_tier1/slack.py index 35ca933..bb8a287 100644 --- a/socket_basics/core/connector/socket_tier1/slack.py +++ b/socket_basics/core/connector/socket_tier1/slack.py @@ -1,6 +1,23 @@ """Slack notifier formatting for Socket Tier1 reachability analysis.""" from typing import Dict, Any, List +from pathlib import Path +import logging +import yaml + +logger = logging.getLogger(__name__) + + +def _get_slack_result_limit() -> int: + """Get the result limit for Slack notifications.""" + try: + notifications_yaml = Path(__file__).parent.parent.parent.parent / 'notifications.yaml' + with open(notifications_yaml, 'r') as f: + config = yaml.safe_load(f) + return config.get('settings', {}).get('result_limits', {}).get('slack', 50) + except Exception as e: + logger.warning(f"Could not load Slack result limit from notifications.yaml: {e}, using default 50") + return 50 def _make_purl(comp: Dict[str, Any]) -> str: @@ -24,8 +41,21 @@ def _make_purl(comp: Dict[str, Any]) -> str: def format_notifications(components_list: List[Dict[str, Any]]) -> List[Dict[str, Any]]: - """Format for Slack notifications - concise with emojis.""" - rows = [] + """Format for Slack notifications - grouped by PURL and reachability.""" + from collections import defaultdict + + severity_order = {'critical': 0, 'high': 1, 'medium': 2, 'low': 3} + severity_emoji = { + 'critical': 'šŸ”“', + 'high': '🟠', + 'medium': '🟔', + 'low': '⚪' + } + + # Group by PURL -> Reachability -> Findings + purl_groups = defaultdict(lambda: {'reachable': [], 'unknown': [], 'error': [], 'unreachable': []}) + severity_counts = {'critical': 0, 'high': 0, 'medium': 0, 'low': 0} + for comp in components_list: comp_name = str(comp.get('name') or comp.get('id') or '-') @@ -33,49 +63,136 @@ def format_notifications(components_list: List[Dict[str, Any]]) -> List[Dict[str props = a.get('props', {}) or {} purl = str(props.get('purl') or _make_purl(comp) or comp_name) cve_id = str(props.get('ghsaId') or props.get('cveId') or a.get('title') or '') - severity = str(a.get('severity') or props.get('severity') or '') - reachability = str(props.get('reachability') or '').lower() + severity = str(a.get('severity') or props.get('severity') or '').lower() + reachability = str(props.get('reachability') or 'unknown').lower() + + # Count by severity + if severity in severity_counts: + severity_counts[severity] += 1 - # Add severity emojis and reachability emojis for Slack - severity_lower = severity.lower() - severity_emoji = { - 'critical': 'šŸ”“', - 'high': '🟠', - 'medium': '🟔', - 'low': '🟢' - }.get(severity_lower, '⚪') + # Get trace data + trace_raw = props.get('trace') or '' + trace_str = '' + if isinstance(trace_raw, list): + trace_str = '\n'.join(str(x) for x in trace_raw) + elif isinstance(trace_raw, str): + trace_str = trace_raw - reach_emoji = { - 'reachable': 'šŸ”„', - 'unreachable': 'āœ…', - 'unknown': 'ā“' - }.get(reachability, '⚪') + # Truncate long traces + if trace_str and len(trace_str) > 500: + trace_str = trace_str[:500] + '\n...' - # Truncate PURL for Slack readability - short_purl = purl[:50] + '...' if len(purl) > 50 else purl + finding = { + 'cve_id': cve_id, + 'severity': severity, + 'severity_order': severity_order.get(severity, 4), + 'severity_emoji': severity_emoji.get(severity, '⚪'), + 'trace': trace_str + } - rows.append([ - cve_id, - f"{severity_emoji} {severity}", - f"{reach_emoji} {reachability}", - short_purl, - 'Yes' if reachability == 'reachable' else 'No' - ]) + # Group by reachability + if reachability in purl_groups[purl]: + purl_groups[purl][reachability].append(finding) + + # Sort findings within each group by severity (Critical -> High -> Medium -> Low) + for purl in purl_groups: + for reach_type in ['reachable', 'unknown', 'error', 'unreachable']: + purl_groups[purl][reach_type].sort(key=lambda x: x['severity_order']) + + # Apply truncation at PURL level - keep top packages by highest severity + result_limit = _get_slack_result_limit() + total_results = sum(severity_counts.values()) - # Format as markdown table for Slack - if not rows: - content = "No Socket Tier1 vulnerabilities found." + # Format for Slack + if not purl_groups: + content = "āœ… No vulnerabilities found." else: - headers = ['CVE/GHSA', 'Severity', 'Reachability', 'Package', 'Has Trace'] - header_row = ' | '.join(headers) - separator_row = ' | '.join(['---'] * len(headers)) - content_rows = [] - for row in rows: - content_rows.append(' | '.join(str(cell) for cell in row)) + # Add summary table + content_lines = [ + "*Summary*", + f"šŸ”“ Critical: {severity_counts['critical']} | 🟠 High: {severity_counts['high']} | 🟔 Medium: {severity_counts['medium']} | ⚪ Low: {severity_counts['low']}", + "", + "*Details*", + "" + ] + + findings_shown = 0 + was_truncated = False + + # Sort PURLs by highest severity finding (critical first) + purl_severity_list = [] + for purl in purl_groups: + min_sev = 999 + for reach_type in ['reachable', 'unknown', 'error', 'unreachable']: + for finding in purl_groups[purl][reach_type]: + if finding['severity_order'] < min_sev: + min_sev = finding['severity_order'] + purl_severity_list.append((min_sev, purl)) + + # Sort ascending so critical (0) comes first + purl_severity_list.sort(key=lambda x: x[0]) + + for _, purl in purl_severity_list: + if findings_shown >= result_limit: + was_truncated = True + break + + content_lines.append(f"*Package:* `{purl}`") + content_lines.append("") + + # Reachable findings (highest priority) + if purl_groups[purl]['reachable']: + content_lines.append("*Reachable*") + for finding in purl_groups[purl]['reachable']: + if findings_shown >= result_limit: + was_truncated = True + break + content_lines.append(f"{finding['severity_emoji']} *{finding['cve_id']}*: {finding['severity'].upper()}") + if finding['trace']: + content_lines.append(f"```\n{finding['trace']}\n```") + findings_shown += 1 + content_lines.append("") + + # Unknown reachability findings + if purl_groups[purl]['unknown'] and findings_shown < result_limit: + content_lines.append("*Unknown*") + for finding in purl_groups[purl]['unknown']: + if findings_shown >= result_limit: + was_truncated = True + break + content_lines.append(f"{finding['severity_emoji']} *{finding['cve_id']}*: {finding['severity'].upper()}") + findings_shown += 1 + content_lines.append("") + + # Error reachability findings + if purl_groups[purl]['error'] and findings_shown < result_limit: + content_lines.append("*Error*") + for finding in purl_groups[purl]['error']: + if findings_shown >= result_limit: + was_truncated = True + break + content_lines.append(f"{finding['severity_emoji']} *{finding['cve_id']}*: {finding['severity'].upper()}") + findings_shown += 1 + content_lines.append("") + + # Unreachable findings (lowest priority) + if purl_groups[purl]['unreachable'] and findings_shown < result_limit: + content_lines.append("*Unreachable*") + for finding in purl_groups[purl]['unreachable']: + if findings_shown >= result_limit: + was_truncated = True + break + content_lines.append(f"{finding['severity_emoji']} *{finding['cve_id']}*: {finding['severity'].upper()}") + findings_shown += 1 + content_lines.append("") + + content = "\n".join(content_lines) - content = '\n'.join([header_row, separator_row] + content_rows) + # Add truncation notice if needed + if was_truncated: + content += f"\nāš ļø *Showing {findings_shown} of {total_results} findings (highest severity first).*" return [{ - 'title': 'Socket Tier1 Reachability Analysis', + 'title': 'Socket Tier1 Reachability', 'content': content }] \ No newline at end of file diff --git a/socket_basics/core/connector/trivy/ms_teams.py b/socket_basics/core/connector/trivy/ms_teams.py index bff59f9..12dbf4b 100644 --- a/socket_basics/core/connector/trivy/ms_teams.py +++ b/socket_basics/core/connector/trivy/ms_teams.py @@ -19,6 +19,7 @@ def format_notifications(mapping: Dict[str, Any], item_name: str = "Unknown", sc """ # Group vulnerabilities by package and severity package_groups = defaultdict(lambda: defaultdict(set)) # Use set to avoid duplicates + severity_counts = {'critical': 0, 'high': 0, 'medium': 0, 'low': 0} if scan_type == 'dockerfile': # Process dockerfile components @@ -26,10 +27,14 @@ def format_notifications(mapping: Dict[str, Any], item_name: str = "Unknown", sc for alert in comp.get('alerts', []): props = alert.get('props', {}) or {} rule_id = str(props.get('ruleId', '') or alert.get('title', '')) - severity = str(alert.get('severity', '')) + severity = str(alert.get('severity', '')).lower() message = str(alert.get('description', '')) resolution = str(props.get('resolution', '')) + # Count by severity + if severity in severity_counts: + severity_counts[severity] += 1 + rule_info = f"{rule_id}|{message}|{resolution}" package_groups[rule_id][severity].add(rule_info) @@ -48,7 +53,12 @@ def format_notifications(mapping: Dict[str, Any], item_name: str = "Unknown", sc for alert in comp.get('alerts', []): props = alert.get('props', {}) or {} cve_id = str(props.get('vulnerabilityId', '') or alert.get('title', '')) - severity = str(alert.get('severity', '')) + severity = str(alert.get('severity', '')).lower() + + # Count by severity + if severity in severity_counts: + severity_counts[severity] += 1 + package_groups[package_key][severity].add(cve_id) # Create rows with proper formatting @@ -64,16 +74,24 @@ def format_notifications(mapping: Dict[str, Any], item_name: str = "Unknown", sc parts = rule_info.split('|', 2) if len(parts) >= 3: _, message, resolution = parts + + # Add severity emojis + severity_emoji = { + 'critical': 'šŸ”“', + 'high': '🟠', + 'medium': '🟔', + 'low': '⚪' + }.get(severity, '⚪') + rows.append(( severity_order.get(severity, 4), - [rule_id, severity, message, resolution[:150] + '...' if len(resolution) > 150 else resolution] + [rule_id, severity_emoji, severity, message, resolution[:150] + '...' if len(resolution) > 150 else resolution] )) # Sort by severity and extract rows rows.sort(key=lambda x: x[0]) rows = [row[1] for row in rows] - headers = ['Rule ID', 'Severity', 'Message', 'Resolution'] else: # Image format: Package | CVEs | Severity for package_name, severity_dict in package_groups.items(): @@ -89,19 +107,25 @@ def format_notifications(mapping: Dict[str, Any], item_name: str = "Unknown", sc else: cve_list = ', '.join(cves) + # Add severity emojis + severity_emoji = { + 'critical': 'šŸ”“', + 'high': '🟠', + 'medium': '🟔', + 'low': '⚪' + }.get(severity, '⚪') + # Truncate package name for readability if needed display_package = package_name[:40] + '...' if len(package_name) > 40 else package_name rows.append(( severity_order.get(severity, 4), - [display_package, cve_list, severity] + [display_package, cve_list, severity_emoji, severity] )) # Sort by severity and extract rows rows.sort(key=lambda x: x[0]) rows = [row[1] for row in rows] - - headers = ['Package', 'CVEs', 'Severity'] # Apply truncation for MS Teams max_rows = get_notifier_result_limit('msteams') @@ -112,29 +136,49 @@ def format_notifications(mapping: Dict[str, Any], item_name: str = "Unknown", sc truncated = True logger.info(f"Truncated MS Teams results from {original_count} to {max_rows}") - # Format as simple table for MS Teams + # Format for MS Teams if not rows: - content = "No vulnerabilities found." + content = "āœ… No vulnerabilities found." else: - content_lines = [' | '.join(headers)] - content_lines.append(' | '.join(['---'] * len(headers))) - for row in rows: - content_lines.append(' | '.join(str(cell) for cell in row)) + # Add summary table + content_lines = [ + "**Summary**\n\n", + f"šŸ”“ Critical: {severity_counts['critical']} | 🟠 High: {severity_counts['high']} | 🟔 Medium: {severity_counts['medium']} | ⚪ Low: {severity_counts['low']}\n\n", + "---\n\n", + "**Details**\n\n" + ] + + if scan_type == 'dockerfile': + # Dockerfile format + for idx, row in enumerate(rows, 1): + rule_id, severity_emoji, severity, message, resolution = row + content_lines.append( + f"{severity_emoji} **{rule_id}** ({severity.upper()})\n\n" + f"**Message:** {message}\n\n" + f"**Resolution:** {resolution}\n\n---\n" + ) + else: + # Image/CVE format + for idx, row in enumerate(rows, 1): + package, cves, severity_emoji, severity = row + content_lines.append( + f"{severity_emoji} **{package}** ({severity.upper()})\n\n" + f"**CVEs:** {cves}\n\n---\n" + ) + + content = "".join(content_lines) # Add truncation notice if needed if truncated: - content_lines.append('') - content_lines.append(f"āš ļø **Showing top {max_rows} results (by severity).** {original_count - max_rows} additional results truncated. View full results at the scan URL below.") - - content = '\n'.join(content_lines) + content += f"\nāš ļø **Showing top {max_rows} results (by severity).** {original_count - max_rows} additional results truncated. View more in full scan." # Create title based on scan type if scan_type == 'vuln': - title = f'Socket CVE Scanning Results: {item_name}' + title = f'Socket Trivy CVE: {item_name}' elif scan_type == 'dockerfile': - title = f'Socket Dockerfile Results: {item_name}' + title = f'Socket Trivy Dockerfile: {item_name}' else: # image - title = f'Socket Image Scanning Results: {item_name}' + title = f'Socket Trivy Image: {item_name}' return [{ 'title': title, diff --git a/socket_basics/core/connector/trivy/slack.py b/socket_basics/core/connector/trivy/slack.py index ee217a4..ca62eb3 100644 --- a/socket_basics/core/connector/trivy/slack.py +++ b/socket_basics/core/connector/trivy/slack.py @@ -19,6 +19,7 @@ def format_notifications(mapping: Dict[str, Any], item_name: str = "Unknown", sc """ # Group vulnerabilities by package and severity package_groups = defaultdict(lambda: defaultdict(set)) # Use set to avoid duplicates + severity_counts = {'critical': 0, 'high': 0, 'medium': 0, 'low': 0} if scan_type == 'dockerfile': # Process dockerfile components @@ -26,10 +27,14 @@ def format_notifications(mapping: Dict[str, Any], item_name: str = "Unknown", sc for alert in comp.get('alerts', []): props = alert.get('props', {}) or {} rule_id = str(props.get('ruleId', '') or alert.get('title', '')) - severity = str(alert.get('severity', '')) + severity = str(alert.get('severity', '')).lower() message = str(alert.get('description', '')) resolution = str(props.get('resolution', '')) + # Count by severity + if severity in severity_counts: + severity_counts[severity] += 1 + rule_info = f"{rule_id}|{message}|{resolution}" package_groups[rule_id][severity].add(rule_info) @@ -48,7 +53,12 @@ def format_notifications(mapping: Dict[str, Any], item_name: str = "Unknown", sc for alert in comp.get('alerts', []): props = alert.get('props', {}) or {} cve_id = str(props.get('vulnerabilityId', '') or alert.get('title', '')) - severity = str(alert.get('severity', '')) + severity = str(alert.get('severity', '')).lower() + + # Count by severity + if severity in severity_counts: + severity_counts[severity] += 1 + package_groups[package_key][severity].add(cve_id) # Create rows with proper formatting @@ -66,24 +76,22 @@ def format_notifications(mapping: Dict[str, Any], item_name: str = "Unknown", sc _, message, resolution = parts # Add severity emojis for Slack - severity_lower = severity.lower() severity_emoji = { 'critical': 'šŸ”“', 'high': '🟠', 'medium': '🟔', - 'low': '🟢' - }.get(severity_lower, '⚪') + 'low': '⚪' + }.get(severity, '⚪') rows.append(( severity_order.get(severity, 4), - [rule_id, f"{severity_emoji} {severity}", message, resolution[:100] + '...' if len(resolution) > 100 else resolution] + [rule_id, severity_emoji, severity, message, resolution[:100] + '...' if len(resolution) > 100 else resolution] )) # Sort by severity and extract rows rows.sort(key=lambda x: x[0]) rows = [row[1] for row in rows] - headers = ['Rule ID', 'Severity', 'Message', 'Resolution'] else: # Image format: Package | CVEs | Severity for package_name, severity_dict in package_groups.items(): @@ -100,27 +108,24 @@ def format_notifications(mapping: Dict[str, Any], item_name: str = "Unknown", sc cve_bullets = '\n'.join([f"• {cve}" for cve in cves]) # Add severity emojis for Slack - severity_lower = severity.lower() severity_emoji = { 'critical': 'šŸ”“', 'high': '🟠', 'medium': '🟔', - 'low': '🟢' - }.get(severity_lower, '⚪') + 'low': '⚪' + }.get(severity, '⚪') # Truncate package name for readability if needed display_package = package_name[:40] + '...' if len(package_name) > 40 else package_name rows.append(( severity_order.get(severity, 4), - [display_package, cve_bullets, f"{severity_emoji} {severity}"] + [display_package, cve_bullets, severity_emoji, severity] )) # Sort by severity and extract rows rows.sort(key=lambda x: x[0]) rows = [row[1] for row in rows] - - headers = ['Package', 'CVEs', 'Severity'] # Apply truncation for Slack max_rows = get_notifier_result_limit('slack') @@ -131,31 +136,50 @@ def format_notifications(mapping: Dict[str, Any], item_name: str = "Unknown", sc truncated = True logger.info(f"Truncated Slack results from {original_count} to {max_rows}") - # Format rows as markdown table for Slack + # Format for Slack if not rows: - content = "No vulnerabilities found." + content = "āœ… No vulnerabilities found." else: - # Create markdown table using the correct headers - header_row = ' | '.join(headers) - separator_row = ' | '.join(['---'] * len(headers)) - content_rows = [] - for row in rows: - content_rows.append(' | '.join(str(cell) for cell in row)) + # Add summary table + content_lines = [ + "*Summary*", + f"šŸ”“ Critical: {severity_counts['critical']} | 🟠 High: {severity_counts['high']} | 🟔 Medium: {severity_counts['medium']} | ⚪ Low: {severity_counts['low']}", + "", + "*Details*", + "" + ] + + if scan_type == 'dockerfile': + # Dockerfile format + for idx, row in enumerate(rows, 1): + rule_id, severity_emoji, severity, message, resolution = row + content_lines.append( + f"{severity_emoji} *{rule_id}* ({severity.upper()})\n" + f"Message: {message}\n" + f"Resolution: {resolution}\n" + ) + else: + # Image/CVE format - using bullet points + for idx, row in enumerate(rows, 1): + package, cves, severity_emoji, severity = row + content_lines.append( + f"{severity_emoji} *{package}* ({severity.upper()})\n" + f"{cves}\n" + ) + + content = "\n".join(content_lines) # Add truncation notice if needed if truncated: - content_rows.append('') - content_rows.append(f"āš ļø *Showing top {max_rows} results (by severity).* {original_count - max_rows} additional results truncated. View full results at the scan URL below.") - - content = '\n'.join([header_row, separator_row] + content_rows) + content += f"\nāš ļø *Showing top {max_rows} results (by severity).* {original_count - max_rows} additional results truncated. View more in full scan." # Create title based on scan type if scan_type == 'vuln': - title = f'Socket CVE Scanning Results: {item_name}' + title = f'Socket Trivy CVE: {item_name}' elif scan_type == 'dockerfile': - title = f'Socket Dockerfile Results: {item_name}' + title = f'Socket Trivy Dockerfile: {item_name}' else: # image - title = f'Socket Image Scanning Results: {item_name}' + title = f'Socket Trivy Image: {item_name}' return [{ 'title': title, diff --git a/socket_basics/core/connector/trivy/trivy.py b/socket_basics/core/connector/trivy/trivy.py index 26bf2b5..a80af01 100644 --- a/socket_basics/core/connector/trivy/trivy.py +++ b/socket_basics/core/connector/trivy/trivy.py @@ -510,7 +510,7 @@ def _process_image_results(self, trivy_output: Dict[str, Any], image: str) -> Di "name": pkg_name, "internal": True, "version": installed_version, - "direct": True, + "direct": False, "dev": False, "dead": False, "dependencies": [], @@ -561,7 +561,7 @@ def _process_image_results(self, trivy_output: Dict[str, Any], image: str) -> Di # Combine all components (packages first, then image) components.update(package_components) - # components[image_id] = image_component + components[image_id] = image_component return components diff --git a/socket_basics/core/connector/trufflehog/github_pr.py b/socket_basics/core/connector/trufflehog/github_pr.py index 7ea99d3..959d807 100644 --- a/socket_basics/core/connector/trufflehog/github_pr.py +++ b/socket_basics/core/connector/trufflehog/github_pr.py @@ -9,12 +9,14 @@ def format_notifications(mapping: Dict[str, Any], config=None) -> List[Dict[str, Any]]: """Format for GitHub PR comments - detailed with markdown formatting.""" + severity_order = {'critical': 0, 'high': 1, 'medium': 2, 'low': 3} rows = [] + for comp in mapping.values(): for a in comp.get('alerts', []): props = a.get('props', {}) or {} detector = str(props.get('detectorName', '') or a.get('title') or '') - severity = str(a.get('severity', '')) + severity = str(a.get('severity', '')).lower() file_path = str(props.get('filePath', '-')) line = str(props.get('lineNumber', '')) redacted = str(props.get('redactedValue', '')) @@ -26,13 +28,20 @@ def format_notifications(mapping: Dict[str, Any], config=None) -> List[Dict[str, if line: file_display += f":{line}" - rows.append([ - f"**{detector}**", - f"*{severity}*", - status, - file_display, - f"`{redacted}`" if redacted else '-' - ]) + rows.append(( + severity_order.get(severity, 4), + [ + f"**{detector}**", + f"*{severity.upper()}*", + status, + file_display, + f"`{redacted}`" if redacted else '-' + ] + )) + + # Sort by severity (critical first) + rows.sort(key=lambda x: x[0]) + rows = [row[1] for row in rows] # Create markdown table if not rows: diff --git a/socket_basics/core/connector/trufflehog/jira.py b/socket_basics/core/connector/trufflehog/jira.py index 4006192..7ea2582 100644 --- a/socket_basics/core/connector/trufflehog/jira.py +++ b/socket_basics/core/connector/trufflehog/jira.py @@ -5,28 +5,46 @@ """ from typing import Dict, Any, List +from pathlib import Path +import logging +import yaml + +logger = logging.getLogger(__name__) + + +def _get_jira_result_limit() -> int: + """Get the result limit for Jira notifications.""" + try: + notifications_yaml = Path(__file__).parent.parent.parent.parent / 'notifications.yaml' + with open(notifications_yaml, 'r') as f: + config = yaml.safe_load(f) + return config.get('settings', {}).get('result_limits', {}).get('jira', 30) + except Exception as e: + logger.warning(f"Could not load Jira result limit from notifications.yaml: {e}, using default 30") + return 30 def format_notifications(mapping: Dict[str, Any], config=None) -> List[Dict[str, Any]]: """Format for Jira tickets - generate ADF format directly for proper formatting.""" - rows = [] + findings = [] + severity_order = {'critical': 0, 'high': 1, 'medium': 2, 'low': 3} + for comp in mapping.values(): for a in comp.get('alerts', []): props = a.get('props', {}) or {} detector = str(props.get('detectorName', '') or a.get('title') or '') - severity = str(a.get('severity', '')) + severity = str(a.get('severity', '')).lower() file_path = str(props.get('filePath', '')) line = str(props.get('lineNumber', '')) verified = props.get('verified', False) # Map severity to Jira priority - severity_lower = severity.lower() jira_priority = { 'critical': 'Highest', 'high': 'High', 'medium': 'Medium', 'low': 'Low' - }.get(severity_lower, 'Medium') + }.get(severity, 'Medium') # Enhanced priority for verified secrets if verified and jira_priority != 'Highest': @@ -40,15 +58,32 @@ def format_notifications(mapping: Dict[str, Any], config=None) -> List[Dict[str, location = f"{file_path}:{line}" if line else file_path - rows.append([ - {"type": "paragraph", "content": [{"type": "text", "text": detector}]}, - {"type": "paragraph", "content": [{"type": "text", "text": jira_priority}]}, - {"type": "paragraph", "content": [{"type": "text", "text": 'Verified' if verified else 'Unverified'}]}, - {"type": "paragraph", "content": [{"type": "text", "text": risk_level}]}, - {"type": "paragraph", "content": [{"type": "text", "text": location}]}, - {"type": "paragraph", "content": [{"type": "text", "text": action}]}, - {"type": "paragraph", "content": [{"type": "text", "text": str(a.get('description', ''))}]} - ]) + findings.append(( + severity_order.get(severity, 4), + [ + {"type": "paragraph", "content": [{"type": "text", "text": detector}]}, + {"type": "paragraph", "content": [{"type": "text", "text": jira_priority}]}, + {"type": "paragraph", "content": [{"type": "text", "text": 'Verified' if verified else 'Unverified'}]}, + {"type": "paragraph", "content": [{"type": "text", "text": risk_level}]}, + {"type": "paragraph", "content": [{"type": "text", "text": location}]}, + {"type": "paragraph", "content": [{"type": "text", "text": action}]}, + {"type": "paragraph", "content": [{"type": "text", "text": str(a.get('description', ''))}]} + ] + )) + + # Sort by severity + findings.sort(key=lambda x: x[0]) + rows = [f[1] for f in findings] + + # Apply truncation + result_limit = _get_jira_result_limit() + total_results = len(rows) + was_truncated = False + + if total_results > result_limit: + logger.info(f"Truncating Jira TruffleHog results from {total_results} to {result_limit} (prioritized by severity)") + rows = rows[:result_limit] + was_truncated = True # Build simple title with repo/branch/commit info from config title_parts = ["Socket Security Issues found for"] @@ -114,25 +149,45 @@ def format_notifications(mapping: Dict[str, Any], config=None) -> List[Dict[str, "content": data_cells }) + # Build content + doc_content = [ + { + "type": "heading", + "attrs": {"level": 2}, + "content": [{"type": "text", "text": "TruffleHog Secret Detection"}] + }, + { + "type": "table", + "attrs": { + "isNumberColumnEnabled": False, + "layout": "default" + }, + "content": table_rows + } + ] + + # Add truncation notice if needed + if was_truncated: + doc_content.extend([ + { + "type": "panel", + "attrs": {"panelType": "warning"}, + "content": [ + { + "type": "paragraph", + "content": [ + {"type": "text", "text": f"āš ļø Results truncated to {result_limit} highest severity findings (total: {total_results}). View more in full scan.", "marks": [{"type": "strong"}]} + ] + } + ] + } + ]) + # Create complete ADF document content = { "type": "doc", "version": 1, - "content": [ - { - "type": "heading", - "attrs": {"level": 2}, - "content": [{"type": "text", "text": "TruffleHog Secret Detection"}] - }, - { - "type": "table", - "attrs": { - "isNumberColumnEnabled": False, - "layout": "default" - }, - "content": table_rows - } - ] + "content": doc_content } return [{ diff --git a/socket_basics/core/connector/trufflehog/ms_teams.py b/socket_basics/core/connector/trufflehog/ms_teams.py index b53a1cf..fe816a7 100644 --- a/socket_basics/core/connector/trufflehog/ms_teams.py +++ b/socket_basics/core/connector/trufflehog/ms_teams.py @@ -5,48 +5,112 @@ """ from typing import Dict, Any, List +from pathlib import Path +import logging +import yaml + +logger = logging.getLogger(__name__) + + +def _get_ms_teams_result_limit() -> int: + """Get the result limit for MS Teams notifications.""" + try: + notifications_yaml = Path(__file__).parent.parent.parent.parent / 'notifications.yaml' + with open(notifications_yaml, 'r') as f: + config = yaml.safe_load(f) + return config.get('settings', {}).get('result_limits', {}).get('ms_teams', 50) + except Exception as e: + logger.warning(f"Could not load MS Teams result limit from notifications.yaml: {e}, using default 50") + return 50 def format_notifications(mapping: Dict[str, Any]) -> List[Dict[str, Any]]: - """Format for Microsoft Teams - clean tabular format.""" - rows = [] + """Format for Microsoft Teams - clean list format.""" + findings = [] + severity_order = {'critical': 0, 'high': 1, 'medium': 2, 'low': 3} + severity_counts = {'critical': 0, 'high': 0, 'medium': 0, 'low': 0} + for comp in mapping.values(): for a in comp.get('alerts', []): props = a.get('props', {}) or {} detector = str(props.get('detectorName', '') or a.get('title') or '') - severity = str(a.get('severity', '')) + severity = str(a.get('severity', '')).lower() file_path = str(props.get('filePath', '-')) line = str(props.get('lineNumber', '')) redacted = str(props.get('redactedValue', '')) verified = props.get('verified', False) + # Count by severity + if severity in severity_counts: + severity_counts[severity] += 1 + + # Add severity emojis + severity_emoji = { + 'critical': 'šŸ”“', + 'high': '🟠', + 'medium': '🟔', + 'low': '⚪' + }.get(severity, '⚪') + location = f"{file_path}:{line}" if line else file_path # Truncate for Teams if len(location) > 60: location = location[:57] + '...' - rows.append([ - detector, - severity, - 'Verified' if verified else 'Unverified', - location, - redacted[:30] + '...' if len(redacted) > 30 else redacted # Truncate for Teams - ]) + findings.append(( + severity_order.get(severity, 4), + { + 'detector': detector, + 'severity': severity, + 'severity_emoji': severity_emoji, + 'verified': verified, + 'location': location, + 'redacted': redacted[:30] + '...' if len(redacted) > 30 else redacted + } + )) - # Format as structured data - if not rows: - content = "No secrets found." + # Sort by severity + findings.sort(key=lambda x: x[0]) + findings = [f[1] for f in findings] + + # Apply truncation + result_limit = _get_ms_teams_result_limit() + total_results = len(findings) + was_truncated = False + + if total_results > result_limit: + logger.info(f"Truncating MS Teams TruffleHog results from {total_results} to {result_limit} (prioritized by severity)") + findings = findings[:result_limit] + was_truncated = True + + # Format for MS Teams + if not findings: + content = "āœ… No secrets found." else: - headers = ['Detector', 'Severity', 'Status', 'Location', 'Secret'] - header_row = ' | '.join(headers) - separator_row = ' | '.join(['---'] * len(headers)) - content_rows = [] - for row in rows: - content_rows.append(' | '.join(str(cell) for cell in row)) + # Add summary table + content_lines = [ + "**Summary**\n\n", + f"šŸ”“ Critical: {severity_counts['critical']} | 🟠 High: {severity_counts['high']} | 🟔 Medium: {severity_counts['medium']} | ⚪ Low: {severity_counts['low']}\n\n", + "---\n\n", + "**Details**\n\n" + ] + + for idx, f in enumerate(findings, 1): + status = 'Verified' if f['verified'] else 'Unverified' + content_lines.append( + f"{f['severity_emoji']} **{f['detector']}** ({f['severity'].upper()})\n\n" + f"**Status:** {status}\n\n" + f"**Location:** `{f['location']}`\n\n" + f"**Secret:** `{f['redacted']}`\n\n---\n" + ) + + content = "".join(content_lines) - content = '\n'.join([header_row, separator_row] + content_rows) + # Add truncation notice if needed + if was_truncated: + content += f"\nāš ļø **Results truncated to {result_limit} highest severity findings (total: {total_results}). View more in full scan.**" return [{ - 'title': 'TruffleHog Secret Detection Results', + 'title': 'Socket Secret Detection', 'content': content }] \ No newline at end of file diff --git a/socket_basics/core/connector/trufflehog/slack.py b/socket_basics/core/connector/trufflehog/slack.py index 7743df0..749efcc 100644 --- a/socket_basics/core/connector/trufflehog/slack.py +++ b/socket_basics/core/connector/trufflehog/slack.py @@ -5,58 +5,112 @@ """ from typing import Dict, Any, List +from pathlib import Path +import logging +import yaml + +logger = logging.getLogger(__name__) + + +def _get_slack_result_limit() -> int: + """Get the result limit for Slack notifications.""" + try: + notifications_yaml = Path(__file__).parent.parent.parent.parent / 'notifications.yaml' + with open(notifications_yaml, 'r') as f: + config = yaml.safe_load(f) + return config.get('settings', {}).get('result_limits', {}).get('slack', 50) + except Exception as e: + logger.warning(f"Could not load Slack result limit from notifications.yaml: {e}, using default 50") + return 50 def format_notifications(mapping: Dict[str, Any]) -> List[Dict[str, Any]]: """Format for Slack notifications - concise with emojis.""" - rows = [] + findings = [] + severity_order = {'critical': 0, 'high': 1, 'medium': 2, 'low': 3} + severity_counts = {'critical': 0, 'high': 0, 'medium': 0, 'low': 0} + for comp in mapping.values(): for a in comp.get('alerts', []): props = a.get('props', {}) or {} detector = str(props.get('detectorName', '') or a.get('title') or '') - severity = str(a.get('severity', '')) + severity = str(a.get('severity', '')).lower() file_path = str(props.get('filePath', '-')) line = str(props.get('lineNumber', '')) redacted = str(props.get('redactedValue', '')) verified = props.get('verified', False) + # Count by severity + if severity in severity_counts: + severity_counts[severity] += 1 + # Add emojis for Slack - severity_lower = severity.lower() severity_emoji = { 'critical': 'šŸ”“', 'high': '🟠', 'medium': '🟔', - 'low': '🟢' - }.get(severity_lower, '⚪') - - status_emoji = 'āœ…' if verified else 'āš ļø' + 'low': '⚪' + }.get(severity, '⚪') # Truncate file path for Slack short_path = file_path[:40] + '...' if len(file_path) > 40 else file_path location = f"{short_path}:{line}" if line else short_path - rows.append([ - detector, - f"{severity_emoji} {severity}", - f"{status_emoji} {'Verified' if verified else 'Unverified'}", - location, - redacted[:20] + '...' if len(redacted) > 20 else redacted - ]) + findings.append(( + severity_order.get(severity, 4), + { + 'detector': detector, + 'severity': severity, + 'severity_emoji': severity_emoji, + 'verified': verified, + 'location': location, + 'redacted': redacted[:20] + '...' if len(redacted) > 20 else redacted + } + )) - # Format as markdown table for Slack - if not rows: - content = "No secrets found." + # Sort by severity + findings.sort(key=lambda x: x[0]) + findings = [f[1] for f in findings] + + # Apply truncation + result_limit = _get_slack_result_limit() + total_results = len(findings) + was_truncated = False + + if total_results > result_limit: + logger.info(f"Truncating Slack TruffleHog results from {total_results} to {result_limit} (prioritized by severity)") + findings = findings[:result_limit] + was_truncated = True + + # Format for Slack + if not findings: + content = "āœ… No secrets found." else: - headers = ['Detector', 'Severity', 'Status', 'Location', 'Secret'] - header_row = ' | '.join(headers) - separator_row = ' | '.join(['---'] * len(headers)) - content_rows = [] - for row in rows: - content_rows.append(' | '.join(str(cell) for cell in row)) + # Add summary table + content_lines = [ + "*Summary*", + f"šŸ”“ Critical: {severity_counts['critical']} | 🟠 High: {severity_counts['high']} | 🟔 Medium: {severity_counts['medium']} | ⚪ Low: {severity_counts['low']}", + "", + "*Details*", + "" + ] + + for idx, f in enumerate(findings, 1): + status = 'Verified' if f['verified'] else 'Unverified' + content_lines.append( + f"{f['severity_emoji']} *{f['detector']}* ({f['severity'].upper()})\n" + f"Status: *{status}*\n" + f"Location: `{f['location']}`\n" + f"Secret: `{f['redacted']}`\n" + ) + + content = "\n".join(content_lines) - content = '\n'.join([header_row, separator_row] + content_rows) + # Add truncation notice if needed + if was_truncated: + content += f"\nāš ļø *Results truncated to {result_limit} highest severity findings (total: {total_results}). View more in full scan.*" return [{ - 'title': 'TruffleHog Secret Detection Results', + 'title': 'Socket Secret Detection', 'content': content }] \ No newline at end of file diff --git a/socket_basics/core/notification/github_pr_notifier.py b/socket_basics/core/notification/github_pr_notifier.py index 2e91e58..94e0120 100644 --- a/socket_basics/core/notification/github_pr_notifier.py +++ b/socket_basics/core/notification/github_pr_notifier.py @@ -6,6 +6,9 @@ logger = logging.getLogger(__name__) +# GitHub API comment character limit +GITHUB_COMMENT_MAX_LENGTH = 65536 + class GithubPRNotifier(BaseNotifier): """GitHub PR notifier: posts security findings as PR comments. @@ -41,8 +44,8 @@ def notify(self, facts: Dict[str, Any]) -> None: logger.info('GithubPRNotifier: no notifications present; skipping') return - # Get full scan URL if available - full_scan_url = facts.get('full_scan_html_url') + # Get full scan URL if available and store it for use in truncation + self.full_scan_url = facts.get('full_scan_html_url') # Validate format valid_notifications = [] @@ -50,10 +53,9 @@ def notify(self, facts: Dict[str, Any]) -> None: if isinstance(item, dict) and 'title' in item and 'content' in item: # Append full scan URL to content if available content = item['content'] - if full_scan_url: - content += f"\n\n---\n\nšŸ”— [View complete scan results]({full_scan_url})\n" - item = {'title': item['title'], 'content': content} - valid_notifications.append(item) + if self.full_scan_url: + content += f"\n\n---\n\nšŸ”— [View Full Socket Scan]({self.full_scan_url})\n" + valid_notifications.append({'title': item['title'], 'content': content}) else: logger.warning('GithubPRNotifier: skipping invalid notification item: %s', type(item)) @@ -248,16 +250,58 @@ def _update_section_in_comment(self, comment_body: str, section_type: str, new_s pattern = f'.*?' # Replace the existing section with new content - updated_body = re.sub(pattern, new_section_content, comment_body, flags=re.DOTALL) + # Use a lambda to avoid regex replacement string interpretation issues + updated_body = re.sub(pattern, lambda m: new_section_content, comment_body, flags=re.DOTALL) return updated_body + def _truncate_comment_if_needed(self, comment_body: str, full_scan_url: Optional[str] = None) -> str: + """Truncate comment if it exceeds GitHub's character limit. + + Args: + comment_body: The comment body to check + full_scan_url: Optional URL to the full scan results + + Returns: + Potentially truncated comment body with a link to full results + """ + if len(comment_body) <= GITHUB_COMMENT_MAX_LENGTH: + return comment_body + + # Calculate space needed for truncation message + truncation_msg = "\n\n---\n\nāš ļø **Results truncated due to size limits.**" + if full_scan_url: + truncation_msg += f"\n\nšŸ”— [View complete scan results in Socket Report]({full_scan_url})" + else: + truncation_msg += "\n\nThe complete results exceed GitHub's comment size limit." + + # Reserve space for the truncation message + max_content_length = GITHUB_COMMENT_MAX_LENGTH - len(truncation_msg) - 100 # Extra buffer + + # Truncate at a reasonable boundary (try to break at newline) + truncated = comment_body[:max_content_length] + + # Try to find the last complete line or section + last_newline = truncated.rfind('\n') + if last_newline > max_content_length * 0.8: # If we find a newline in the last 20% + truncated = truncated[:last_newline] + + logger.warning( + f'GithubPRNotifier: comment truncated from {len(comment_body)} to {len(truncated)} characters' + ) + + return truncated + truncation_msg + def _update_comment(self, pr_number: int, comment_id: int, comment_body: str) -> bool: """Update an existing comment.""" owner_repo = self.repository if not self.repository: return False + + # Truncate if needed + full_scan_url = getattr(self, 'full_scan_url', None) + comment_body = self._truncate_comment_if_needed(comment_body, full_scan_url) try: import requests @@ -285,6 +329,10 @@ def _post_comment(self, pr_number: int, comment_body: str) -> bool: if not self.repository: logger.warning('GithubPRNotifier: no repository configured') return False + + # Truncate if needed + full_scan_url = getattr(self, 'full_scan_url', None) + comment_body = self._truncate_comment_if_needed(comment_body, full_scan_url) try: import requests diff --git a/socket_basics/core/notification/jira_notifier.py b/socket_basics/core/notification/jira_notifier.py index 92d8f10..0fea3eb 100644 --- a/socket_basics/core/notification/jira_notifier.py +++ b/socket_basics/core/notification/jira_notifier.py @@ -253,7 +253,7 @@ def _add_comment_to_issue(self, issue_key: str, title: str, content: Dict[str, A {"type": "text", "text": "šŸ”— "}, { "type": "text", - "text": "View complete scan results", + "text": "View Full Socket Scan", "marks": [ { "type": "link", diff --git a/socket_basics/core/notification/manager.py b/socket_basics/core/notification/manager.py index eaf3df1..27a1a44 100644 --- a/socket_basics/core/notification/manager.py +++ b/socket_basics/core/notification/manager.py @@ -275,10 +275,11 @@ def notify_all(self, facts: Dict[str, Any]) -> None: facts['repository'] = self.app_config.get('repo', 'Unknown') # Note: uses 'repo' not 'repository' facts['branch'] = self.app_config.get('branch', 'Unknown') facts['commit_hash'] = self.app_config.get('commit_hash', 'Unknown') - # Add full scan URL if available - full_scan_url = self.app_config.get('full_scan_html_url') - if full_scan_url: - facts['full_scan_html_url'] = full_scan_url + # Add full scan URL if available (from app_config or already in facts) + if 'full_scan_html_url' not in facts: + full_scan_url = self.app_config.get('full_scan_html_url') + if full_scan_url: + facts['full_scan_html_url'] = full_scan_url # Determine allowed severities for notifications. Honor SOCKET_BASICS_SEVERITIES # environment variable (comma-separated), fall back to INPUT_FINDING_SEVERITIES, diff --git a/socket_basics/core/notification/ms_teams_notifier.py b/socket_basics/core/notification/ms_teams_notifier.py index 9896b30..1416b74 100644 --- a/socket_basics/core/notification/ms_teams_notifier.py +++ b/socket_basics/core/notification/ms_teams_notifier.py @@ -45,9 +45,8 @@ def notify(self, facts: Dict[str, Any]) -> None: # Append full scan URL to content if available content = item['content'] if full_scan_url: - content += f"\n\nšŸ”— [View complete scan results]({full_scan_url})" - item = {'title': item['title'], 'content': content} - valid_notifications.append(item) + content += f"\n\nšŸ”— [View Full Socket Scan]({full_scan_url})" + valid_notifications.append({'title': item['title'], 'content': content}) else: logger.warning('MSTeamsNotifier: skipping invalid notification item: %s', type(item)) diff --git a/socket_basics/core/notification/slack_notifier.py b/socket_basics/core/notification/slack_notifier.py index ded2a22..9e19a06 100644 --- a/socket_basics/core/notification/slack_notifier.py +++ b/socket_basics/core/notification/slack_notifier.py @@ -45,16 +45,16 @@ def notify(self, facts: Dict[str, Any]) -> None: # Get full scan URL if available full_scan_url = facts.get('full_scan_html_url') - # Validate format + # Validate format and store URL separately for block building valid_notifications = [] for item in notifications: if isinstance(item, dict) and 'title' in item and 'content' in item: - # Append full scan URL to content if available - content = item['content'] - if full_scan_url: - content += f"\n\nšŸ”— <{full_scan_url}|View complete scan results>" - item = {'title': item['title'], 'content': content} - valid_notifications.append(item) + # Store original content and URL separately so we can add URL as its own block + valid_notifications.append({ + 'title': item['title'], + 'content': item['content'], + 'full_scan_url': full_scan_url + }) else: logger.warning('SlackNotifier: skipping invalid notification item: %s', type(item)) @@ -65,9 +65,10 @@ def notify(self, facts: Dict[str, Any]) -> None: for item in valid_notifications: title = item['title'] content = item['content'] - self._send_slack_message(facts, title, content) + full_scan_url = item.get('full_scan_url') + self._send_slack_message(facts, title, content, full_scan_url) - def _send_slack_message(self, facts: Dict[str, Any], title: str, content: str) -> None: + def _send_slack_message(self, facts: Dict[str, Any], title: str, content: str, full_scan_url: str | None = None) -> None: """Send a single Slack message with title and content.""" if not self.webhook_url: logger.warning('SlackNotifier: no Slack webhook URL configured') @@ -78,36 +79,49 @@ def _send_slack_message(self, facts: Dict[str, Any], title: str, content: str) - branch = self.config.get('branch', 'Unknown') try: - # Truncate content if too long for Slack (3000 char limit per text block) - max_content_length = 2500 # Leave room for title and formatting + # Truncate content if it's too long for a single Slack block (3000 char limit) + max_content_length = 2900 # Leave room for title and formatting if len(content) > max_content_length: - content = content[:max_content_length] + "...\n[Content truncated]" + content = content[:max_content_length] + "\n\n_(content truncated)_" # Create Slack payload with pre-formatted content + blocks = [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": f"šŸ” *Security Findings* - {repo} ({branch})" + } + }, + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": f"*{title}*\n\n{content}" + } + } + ] + + # Add full scan URL as a separate context block if available + if full_scan_url: + blocks.append({ + "type": "section", + "text": { + "type": "mrkdwn", + "text": f"šŸ”— <{full_scan_url}|View Full Socket Scan>" + } + }) + payload = { "username": self.username, - "blocks": [ - { - "type": "section", - "text": { - "type": "mrkdwn", - "text": f"šŸ” *Security Findings* - {repo} ({branch})" - } - }, - { - "type": "section", - "text": { - "type": "mrkdwn", - "text": f"*{title}*\n```\n{content}\n```" - } - } - ] + "blocks": blocks } import requests resp = requests.post(self.webhook_url, json=payload, timeout=10) if resp.status_code >= 400: - logger.warning('SlackNotifier: webhook error %s: %s', resp.status_code, resp.text[:200]) + logger.warning('SlackNotifier: webhook error %s: %s', resp.status_code, resp.text[:500]) + logger.debug(f'Failed Slack payload: {payload}') else: logger.info('SlackNotifier: posted message for "%s"', title) diff --git a/socket_basics/version.py b/socket_basics/version.py index 7863915..976498a 100644 --- a/socket_basics/version.py +++ b/socket_basics/version.py @@ -1 +1 @@ -__version__ = "1.0.2" +__version__ = "1.0.3"