π Restore 23 truncated test files β fix all shard failures#4274
π Restore 23 truncated test files β fix all shard failures#4274clubanderson merged 1 commit intomainfrom
Conversation
The squash merge of PR #4223 truncated 23 test files (missing closing braces, duplicate imports). This caused 8-10 shard failures per coverage run, dropping reported coverage from 53% to 12-32%. Restored all files from the original fix/coverage-tests branch where all tests passed. Also fixed auth.test.ts duplicate renderHook import. Signed-off-by: Andrew Anderson <andy@clubanderson.com>
|
[APPROVALNOTIFIER] This PR is NOT APPROVED This pull-request has been approved by: The full list of commands accepted by this bot can be found here. DetailsNeeds approval from an approver in each of these files:Approvers can indicate their approval by writing |
β Deploy Preview for kubestellarconsole ready!
To edit notification comments on pull requests, go to your Netlify project configuration. |
|
π Hey @clubanderson β thanks for opening this PR!
This is an automated message. |
|
Thank you for your contribution! Your PR has been merged. Check out what's new:
Stay connected: Slack #kubestellar-dev | Multi-Cluster Survey |
There was a problem hiding this comment.
Pull request overview
This PR is intended to address Vitest coverage shard failures by restoring/adjusting hook test files under web/src/hooks/__tests__.
Changes:
- Updates
useArgoCDhook tests to restructure helpers/setup and revise several fallback/cache expectations. - Removes a large block of
useLLMdhook edge-case tests near the end of the file.
Reviewed changes
Copilot reviewed 2 out of 2 changed files in this pull request and generated 4 comments.
| File | Description |
|---|---|
web/src/hooks/__tests__/useLLMd.test.ts |
Deletes multiple useLLMdModels/useLLMdServers edge-case test suites near the end of the file. |
web/src/hooks/__tests__/useArgoCD.test.ts |
Refactors test helpers/setup and changes expectations for demo fallback, caching, and interval behavior across ArgoCD hooks. |
| expect(clearIntervalSpy).toHaveBeenCalled() | ||
| clearIntervalSpy.mockRestore() | ||
| }) | ||
| }) | ||
|
|
||
| describe('consecutive failures', () => { | ||
| it('increments consecutiveFailures on outer catch', async () => { | ||
| // Make the outer try/catch fire by throwing from somewhere unexpected | ||
| // (e.g., iteration itself throws) | ||
| let callCount = 0 | ||
| mockExec.mockImplementation(() => { | ||
| callCount++ | ||
| if (callCount <= 1) { | ||
| // First call for deployments | ||
| throw new Error('Unexpected error') | ||
| } | ||
| return Promise.resolve(kubectlOk({ items: [] })) | ||
| }) | ||
|
|
||
| const consoleError = vi.spyOn(console, 'error').mockImplementation(() => {}) | ||
| const { result, unmount } = renderHook(() => useLLMdModels(['c1'])) | ||
|
|
||
| await waitFor(() => expect(result.current.isLoading).toBe(false)) | ||
| // The hook should have caught the error and incremented failures | ||
| // or handled it gracefully | ||
| expect(result.current.models).toEqual([]) | ||
| consoleError.mockRestore() | ||
| unmount() | ||
| }) | ||
| }) | ||
|
|
||
| describe('error state', () => { | ||
| it('sets error message when outer catch fires non-silently', async () => { | ||
| // A top-level error during the iteration should set error in non-silent mode | ||
| mockExec.mockImplementation(() => { | ||
| throw new Error('Connection refused') | ||
| }) | ||
|
|
||
| const consoleError = vi.spyOn(console, 'error').mockImplementation(() => {}) | ||
| const { result, unmount } = renderHook(() => useLLMdModels(['c1'])) | ||
|
|
||
| await waitFor(() => expect(result.current.isLoading).toBe(false)) | ||
| // Models should be empty since all clusters failed | ||
| expect(result.current.models).toEqual([]) | ||
| consoleError.mockRestore() | ||
| unmount() | ||
| }) | ||
| }) | ||
| }) | ||
|
|
||
| // --------------------------------------------------------------------------- | ||
| // Additional useLLMdServers edge cases | ||
| // --------------------------------------------------------------------------- | ||
|
|
||
| describe('useLLMdServers additional coverage', () => { | ||
| describe('fetchInProgress guard', () => { | ||
| it('prevents concurrent fetch calls', async () => { | ||
| // Set up slow response | ||
| let resolveDeployments: (() => void) | undefined | ||
| const slowPromise = new Promise<void>((res) => { resolveDeployments = res }) | ||
|
|
||
| mockExec.mockImplementation(async (args: string[]) => { | ||
| const cmd = args.join(' ') | ||
| if (cmd.includes('deployments')) { | ||
| await slowPromise | ||
| return kubectlOk({ | ||
| items: [makeDeployment({ name: 'vllm-server', namespace: 'llm-d-ns' })], | ||
| }) | ||
| } | ||
| return kubectlOk({ items: [] }) | ||
| }) | ||
|
|
||
| const { result, unmount } = renderHook(() => useLLMdServers(['c1'])) | ||
|
|
||
| // The initial fetch is in progress; calling refetch should be a no-op | ||
| // because fetchInProgress guard prevents it | ||
| await act(async () => { | ||
| result.current.refetch() | ||
| }) | ||
|
|
||
| // Resolve the initial fetch | ||
| resolveDeployments!() | ||
|
|
||
| await waitFor(() => expect(result.current.servers.length).toBeGreaterThan(0)) | ||
| unmount() | ||
| }) | ||
| }) | ||
|
|
||
| describe('namespace detection variants', () => { | ||
| it('includes deployments in b2 namespace', async () => { | ||
| setupKubectl({ | ||
| deployments: { | ||
| items: [makeDeployment({ name: 'vllm-server', namespace: 'b2' })], | ||
| }, | ||
| }) | ||
| const { result, unmount } = renderHook(() => useLLMdServers(['c1'])) | ||
| await waitFor(() => expect(result.current.servers.length).toBeGreaterThan(0)) | ||
| unmount() | ||
| }) | ||
|
|
||
| it('includes deployments in effi namespace', async () => { | ||
| setupKubectl({ | ||
| deployments: { | ||
| items: [makeDeployment({ name: 'vllm-server', namespace: 'effi-ns' })], | ||
| }, | ||
| }) | ||
| const { result, unmount } = renderHook(() => useLLMdServers(['c1'])) | ||
| await waitFor(() => expect(result.current.servers.length).toBeGreaterThan(0)) | ||
| unmount() | ||
| }) | ||
|
|
||
| it('includes deployments in guygir namespace', async () => { | ||
| setupKubectl({ | ||
| deployments: { | ||
| items: [makeDeployment({ name: 'vllm-server', namespace: 'guygir-test' })], | ||
| }, | ||
| }) | ||
| const { result, unmount } = renderHook(() => useLLMdServers(['c1'])) | ||
| await waitFor(() => expect(result.current.servers.length).toBeGreaterThan(0)) | ||
| unmount() | ||
| }) | ||
|
|
||
| it('includes gateways in serving namespace', async () => { | ||
| setupKubectl({ | ||
| deployments: { | ||
| items: [makeDeployment({ name: 'gateway-proxy', namespace: 'serving-ns' })], | ||
| }, | ||
| }) | ||
| const { result, unmount } = renderHook(() => useLLMdServers(['c1'])) | ||
| await waitFor(() => expect(result.current.servers.length).toBeGreaterThan(0)) | ||
| expect(result.current.servers[0].componentType).toBe('gateway') | ||
| unmount() | ||
| }) | ||
|
|
||
| it('includes prometheus in model namespace', async () => { | ||
| setupKubectl({ | ||
| deployments: { | ||
| items: [makeDeployment({ name: 'prometheus', namespace: 'model-serving' })], | ||
| }, | ||
| }) | ||
| const { result, unmount } = renderHook(() => useLLMdServers(['c1'])) | ||
| await waitFor(() => expect(result.current.servers.length).toBeGreaterThan(0)) | ||
| expect(result.current.servers[0].componentType).toBe('prometheus') | ||
| unmount() | ||
| }) | ||
|
|
||
| it('includes deployments in ai- namespace', async () => { | ||
| setupKubectl({ | ||
| deployments: { | ||
| items: [makeDeployment({ name: 'vllm-server', namespace: 'ai-platform' })], | ||
| }, | ||
| }) | ||
| const { result, unmount } = renderHook(() => useLLMdServers(['c1'])) | ||
| await waitFor(() => expect(result.current.servers.length).toBeGreaterThan(0)) | ||
| unmount() | ||
| }) | ||
|
|
||
| it('includes deployments in -ai namespace', async () => { | ||
| setupKubectl({ | ||
| deployments: { | ||
| items: [makeDeployment({ name: 'vllm-server', namespace: 'platform-ai' })], | ||
| }, | ||
| }) | ||
| const { result, unmount } = renderHook(() => useLLMdServers(['c1'])) | ||
| await waitFor(() => expect(result.current.servers.length).toBeGreaterThan(0)) | ||
| unmount() | ||
| }) | ||
|
|
||
| it('includes deployments in ml- namespace', async () => { | ||
| setupKubectl({ | ||
| deployments: { | ||
| items: [makeDeployment({ name: 'vllm-server', namespace: 'ml-infra' })], | ||
| }, | ||
| }) | ||
| const { result, unmount } = renderHook(() => useLLMdServers(['c1'])) | ||
| await waitFor(() => expect(result.current.servers.length).toBeGreaterThan(0)) | ||
| unmount() | ||
| }) | ||
| }) | ||
|
|
||
| describe('component type edge cases', () => { | ||
| it('detects EPP from name containing -epp', async () => { | ||
| setupKubectl({ | ||
| deployments: { | ||
| items: [makeDeployment({ name: 'model-name-epp-controller', namespace: 'llm-d-ns' })], | ||
| }, | ||
| }) | ||
| const { result, unmount } = renderHook(() => useLLMdServers(['c1'])) | ||
| await waitFor(() => expect(result.current.servers.length).toBeGreaterThan(0)) | ||
| expect(result.current.servers[0].componentType).toBe('epp') | ||
| unmount() | ||
| }) | ||
|
|
||
| it('detects model from llmd.org/inferenceServing label', async () => { | ||
| setupKubectl({ | ||
| deployments: { | ||
| items: [ | ||
| makeDeployment({ | ||
| name: 'custom-server', | ||
| namespace: 'llm-d-ns', | ||
| templateLabels: { 'llmd.org/inferenceServing': 'true' }, | ||
| }), | ||
| ], | ||
| }, | ||
| }) | ||
| const { result, unmount } = renderHook(() => useLLMdServers(['c1'])) | ||
| await waitFor(() => expect(result.current.servers.length).toBeGreaterThan(0)) | ||
| expect(result.current.servers[0].componentType).toBe('model') | ||
| unmount() | ||
| }) | ||
|
|
||
| it('detects model from llama in name', async () => { | ||
| setupKubectl({ | ||
| deployments: { | ||
| items: [makeDeployment({ name: 'llama-3-70b', namespace: 'llm-d-ns' })], | ||
| }, | ||
| }) | ||
| const { result, unmount } = renderHook(() => useLLMdServers(['c1'])) | ||
| await waitFor(() => expect(result.current.servers.length).toBeGreaterThan(0)) | ||
| expect(result.current.servers[0].componentType).toBe('model') | ||
| unmount() | ||
| }) | ||
| }) | ||
|
|
||
| describe('status computed field edge cases', () => { | ||
| it('reports healthy=false when consecutiveFailures >= 3', async () => { | ||
| // We can't easily trigger 3 consecutive failures in a single render | ||
| // because the hook resets on successful cluster processing. | ||
| // Instead we verify the useMemo logic by checking the formula. | ||
| setupKubectl({ deployments: { items: [] } }) | ||
| const { result, unmount } = renderHook(() => useLLMdServers(['c1'])) | ||
| await waitFor(() => expect(result.current.isLoading).toBe(false)) | ||
| // With 0 failures, healthy should be true | ||
| expect(result.current.status.healthy).toBe(true) | ||
| expect(result.current.status.totalServers).toBe(0) | ||
| expect(result.current.status.totalModels).toBe(0) | ||
| expect(result.current.status.loadedModels).toBe(0) | ||
| unmount() | ||
| }) | ||
|
|
||
| it('counts unique models across all servers', async () => { | ||
| setupKubectl({ | ||
| deployments: { | ||
| items: [ | ||
| makeDeployment({ | ||
| name: 'vllm-a', | ||
| namespace: 'llm-d-ns', | ||
| replicas: 1, | ||
| readyReplicas: 1, | ||
| templateLabels: { 'llmd.org/model': 'shared-model' }, | ||
| }), | ||
| makeDeployment({ | ||
| name: 'vllm-b', | ||
| namespace: 'llm-d-ns', | ||
| replicas: 1, | ||
| readyReplicas: 1, | ||
| templateLabels: { 'llmd.org/model': 'shared-model' }, | ||
| }), | ||
| makeDeployment({ | ||
| name: 'vllm-c', | ||
| namespace: 'llm-d-ns', | ||
| replicas: 0, | ||
| readyReplicas: 0, | ||
| templateLabels: { 'llmd.org/model': 'unique-model' }, | ||
| }), | ||
| ], | ||
| }, | ||
| }) | ||
| const { result, unmount } = renderHook(() => useLLMdServers(['c1'])) | ||
| await waitFor(() => expect(result.current.servers.length).toBe(3)) | ||
| expect(result.current.status.totalServers).toBe(3) | ||
| expect(result.current.status.runningServers).toBe(2) | ||
| expect(result.current.status.stoppedServers).toBe(1) | ||
| // shared-model and unique-model = 2 total models | ||
| expect(result.current.status.totalModels).toBe(2) | ||
| // Only shared-model has running servers | ||
| expect(result.current.status.loadedModels).toBe(1) | ||
| unmount() | ||
| }) | ||
| }) | ||
|
|
||
| describe('gateway stopped status', () => { | ||
| it('reports stopped gateway when replicas=0', async () => { | ||
| setupKubectl({ | ||
| deployments: { | ||
| items: [ | ||
| makeDeployment({ name: 'vllm-server', namespace: 'llm-d-ns', replicas: 1, readyReplicas: 1 }), | ||
| makeDeployment({ name: 'gateway-proxy', namespace: 'llm-d-ns', replicas: 0, readyReplicas: 0 }), | ||
| ], | ||
| }, | ||
| }) | ||
| const { result, unmount } = renderHook(() => useLLMdServers(['c1'])) | ||
| await waitFor(() => expect(result.current.servers.length).toBe(2)) | ||
| const vllmServer = result.current.servers.find(s => s.name === 'vllm-server') | ||
| expect(vllmServer?.gatewayStatus).toBe('stopped') | ||
| unmount() | ||
| }) | ||
| }) | ||
| }) |
There was a problem hiding this comment.
This hunk deletes a large block of tests (consecutive failures/error state + additional useLLMdServers edge cases). Given the PR goal (βrestore truncated test files / fix shard failuresβ), removing coverage here looks inconsistent and may reintroduce untested behavior. If these tests were intentionally removed to address flakiness, please document the rationale in the PR description; otherwise restore the deleted describes/tests so the file isnβt effectively re-truncated.
| it('falls back to demo when API returns empty items array', async () => { | ||
| vi.mocked(fetch).mockResolvedValue( | ||
| jsonResponse({ error: 'Internal Server Error' }, 500), | ||
| jsonResponse({ items: [], isDemoData: false }) | ||
| ) | ||
|
|
||
| const { result, unmount } = renderHook(() => useArgoCDApplications()) | ||
| await waitFor(() => expect(result.current.isLoading).toBe(false)) | ||
|
|
||
| // Empty items with isDemoData=false triggers mock fallback | ||
| expect(result.current.isDemoData).toBe(true) | ||
| expect(result.current.applications.length).toBeGreaterThan(0) | ||
| unmount() |
There was a problem hiding this comment.
This test asserts that an OK API response with { items: [], isDemoData: false } should fall back to mock/demo data. That contradicts the hook implementation: useArgoCDApplications explicitly uses real data even when the list is empty (treats it as βArgoCD installed but no apps yetβ). Update the expectations to isDemoData === false and applications length 0 (or change the hook behavior if thatβs what you intend, but then its in-code comment about #4201 would also need updating).
| it('falls back to demo when API returns 0-total stats', async () => { | ||
| const zeroStats = { healthy: 0, degraded: 0, progressing: 0, missing: 0, unknown: 0 } | ||
| vi.mocked(fetch).mockResolvedValue( | ||
| jsonResponse({ stats: zeroStats, isDemoData: false }), | ||
| jsonResponse({ stats: zeroStats, isDemoData: false }) | ||
| ) | ||
|
|
||
| const { result, unmount } = renderHook(() => useArgoCDHealth()) | ||
| await waitFor(() => expect(result.current.isLoading).toBe(false)) | ||
| expect(result.current.isDemoData).toBe(false) | ||
| expect(result.current.total).toBe(0) | ||
| expect(result.current.healthyPercent).toBe(0) | ||
|
|
||
| // Zero total with isDemoData=false means no ArgoCD apps found => falls to mock | ||
| expect(result.current.isDemoData).toBe(true) | ||
| expect(result.current.total).toBeGreaterThan(0) | ||
| unmount() |
There was a problem hiding this comment.
This test expects a successful API response with all-zero health stats (isDemoData: false) to fall back to demo data. The hook useArgoCDHealth currently treats zero totals as valid real data (ArgoCD installed but no apps) and sets isDemoData to false. Adjust the test to expect isDemoData === false, total === 0, and healthyPercent === 0 (or update the hook if behavior has changed).
| it('falls back to demo when API returns 0-total stats', async () => { | ||
| vi.mocked(fetch).mockResolvedValue( | ||
| jsonResponse({ stats: { synced: 0, outOfSync: 0, unknown: 0 }, isDemoData: false }), | ||
| jsonResponse({ stats: { synced: 0, outOfSync: 0, unknown: 0 }, isDemoData: false }) | ||
| ) | ||
|
|
||
| const { result, unmount } = renderHook(() => useArgoCDSyncStatus()) | ||
| await waitFor(() => expect(result.current.isLoading).toBe(false)) | ||
| expect(result.current.isDemoData).toBe(false) | ||
| expect(result.current.total).toBe(0) | ||
| expect(result.current.syncedPercent).toBe(0) | ||
| expect(result.current.outOfSyncPercent).toBe(0) | ||
| unmount() | ||
| }) | ||
|
|
||
| it('accepts local cluster filter', async () => { | ||
| vi.mocked(fetch).mockRejectedValue(new Error('fail')) | ||
| const { result, unmount } = renderHook(() => | ||
| useArgoCDSyncStatus(['cluster-a', 'cluster-b', 'cluster-c']), | ||
| ) | ||
| await waitFor(() => expect(result.current.isLoading).toBe(false)) | ||
| expect(result.current.isDemoData).toBe(true) | ||
| expect(result.current.total).toBeGreaterThan(0) | ||
| unmount() | ||
| }) |
There was a problem hiding this comment.
This test expects an OK API response with zero sync totals (stats: {synced:0,outOfSync:0,unknown:0}, isDemoData:false) to fall back to demo data. The hook useArgoCDSyncStatus explicitly uses real data even when totals are zero (see comment about #4201). Update the test expectations to isDemoData === false and total === 0 (and percents 0), unless the hook behavior is intended to change.
π Auto-Applying Copilot Code ReviewCopilot code review found 0 code suggestion(s) and 4 general comment(s). Also address these general comments:
Push all fixes in a single commit. Run Auto-generated by copilot-review-apply workflow. |
Root cause of 8-10 shard failures per coverage run. Restores working versions from original branch.